OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [arch/] [x86_64/] [kernel/] [setup.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 *  linux/arch/x86-64/kernel/setup.c
3
 *
4
 *  Copyright (C) 1995  Linus Torvalds
5
 *
6
 *  Nov 2001 Dave Jones <davej@suse.de>
7
 *  Forked from i386 setup code.
8
 */
9
 
10
/*
11
 * This file handles the architecture-dependent parts of initialization
12
 */
13
 
14
#include <linux/errno.h>
15
#include <linux/sched.h>
16
#include <linux/kernel.h>
17
#include <linux/mm.h>
18
#include <linux/stddef.h>
19
#include <linux/unistd.h>
20
#include <linux/ptrace.h>
21
#include <linux/slab.h>
22
#include <linux/user.h>
23
#include <linux/a.out.h>
24
#include <linux/tty.h>
25
#include <linux/ioport.h>
26
#include <linux/delay.h>
27
#include <linux/config.h>
28
#include <linux/init.h>
29
#include <linux/acpi.h>
30
#include <linux/blk.h>
31
#include <linux/highmem.h>
32
#include <linux/bootmem.h>
33
#include <linux/module.h>
34
#include <asm/processor.h>
35
#include <linux/console.h>
36
#include <linux/seq_file.h>
37
#include <asm/mtrr.h>
38
#include <asm/uaccess.h>
39
#include <asm/system.h>
40
#include <asm/io.h>
41
#include <asm/smp.h>
42
#include <asm/msr.h>
43
#include <asm/desc.h>
44
#include <asm/e820.h>
45
#include <asm/dma.h>
46
#include <asm/mpspec.h>
47
#include <asm/mmu_context.h>
48
#include <asm/bootsetup.h>
49
#include <asm/proto.h>
50
 
51
int acpi_disabled = 0;
52
#ifdef  CONFIG_ACPI_BOOT
53
int acpi_noirq __initdata = 0;   /* skip ACPI IRQ initialization */
54
#endif
55
 
56
 
57
int swiotlb;
58
 
59
extern  int phys_proc_id[NR_CPUS];
60
 
61
/*
62
 * Machine setup..
63
 */
64
 
65
struct cpuinfo_x86 boot_cpu_data = {
66
        cpuid_level: -1,
67
};
68
 
69
unsigned long mmu_cr4_features;
70
EXPORT_SYMBOL(mmu_cr4_features);
71
 
72
/* For PCI or other memory-mapped resources */
73
unsigned long pci_mem_start = 0x10000000;
74
 
75
/*
76
 * Setup options
77
 */
78
struct drive_info_struct { char dummy[32]; } drive_info;
79
struct screen_info screen_info;
80
struct sys_desc_table_struct {
81
        unsigned short length;
82
        unsigned char table[0];
83
};
84
 
85
struct e820map e820;
86
 
87
unsigned char aux_device_present;
88
 
89
extern int root_mountflags;
90
extern char _text, _etext, _edata, _end;
91
 
92
char command_line[COMMAND_LINE_SIZE];
93
char saved_command_line[COMMAND_LINE_SIZE];
94
 
95
struct resource standard_io_resources[] = {
96
        { "dma1", 0x00, 0x1f, IORESOURCE_BUSY },
97
        { "pic1", 0x20, 0x3f, IORESOURCE_BUSY },
98
        { "timer", 0x40, 0x5f, IORESOURCE_BUSY },
99
        { "keyboard", 0x60, 0x6f, IORESOURCE_BUSY },
100
        { "dma page reg", 0x80, 0x8f, IORESOURCE_BUSY },
101
        { "pic2", 0xa0, 0xbf, IORESOURCE_BUSY },
102
        { "dma2", 0xc0, 0xdf, IORESOURCE_BUSY },
103
        { "fpu", 0xf0, 0xff, IORESOURCE_BUSY }
104
};
105
 
106
#define STANDARD_IO_RESOURCES (sizeof(standard_io_resources)/sizeof(struct resource))
107
 
108
struct resource code_resource = { "Kernel code", 0x100000, 0 };
109
struct resource data_resource = { "Kernel data", 0, 0 };
110
struct resource vram_resource = { "Video RAM area", 0xa0000, 0xbffff, IORESOURCE_BUSY };
111
 
112
 
113
/* System ROM resources */
114
#define MAXROMS 6
115
static struct resource rom_resources[MAXROMS] = {
116
        { "System ROM", 0xF0000, 0xFFFFF, IORESOURCE_BUSY },
117
        { "Video ROM", 0xc0000, 0xc7fff, IORESOURCE_BUSY }
118
};
119
 
120
#define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
121
 
122
static void __init probe_roms(void)
123
{
124
        int roms = 1;
125
        unsigned long base;
126
        unsigned char *romstart;
127
 
128
        request_resource(&iomem_resource, rom_resources+0);
129
 
130
        /* Video ROM is standard at C000:0000 - C7FF:0000, check signature */
131
        for (base = 0xC0000; base < 0xE0000; base += 2048) {
132
                romstart = bus_to_virt(base);
133
                if (!romsignature(romstart))
134
                        continue;
135
                request_resource(&iomem_resource, rom_resources + roms);
136
                roms++;
137
                break;
138
        }
139
 
140
        /* Extension roms at C800:0000 - DFFF:0000 */
141
        for (base = 0xC8000; base < 0xE0000; base += 2048) {
142
                unsigned long length;
143
 
144
                romstart = bus_to_virt(base);
145
                if (!romsignature(romstart))
146
                        continue;
147
                length = romstart[2] * 512;
148
                if (length) {
149
                        unsigned int i;
150
                        unsigned char chksum;
151
 
152
                        chksum = 0;
153
                        for (i = 0; i < length; i++)
154
                                chksum += romstart[i];
155
 
156
                        /* Good checksum? */
157
                        if (!chksum) {
158
                                rom_resources[roms].start = base;
159
                                rom_resources[roms].end = base + length - 1;
160
                                rom_resources[roms].name = "Extension ROM";
161
                                rom_resources[roms].flags = IORESOURCE_BUSY;
162
 
163
                                request_resource(&iomem_resource, rom_resources + roms);
164
                                roms++;
165
                                if (roms >= MAXROMS)
166
                                        return;
167
                        }
168
                }
169
        }
170
 
171
        /* Final check for motherboard extension rom at E000:0000 */
172
        base = 0xE0000;
173
        romstart = bus_to_virt(base);
174
 
175
        if (romsignature(romstart)) {
176
                rom_resources[roms].start = base;
177
                rom_resources[roms].end = base + 65535;
178
                rom_resources[roms].name = "Extension ROM";
179
                rom_resources[roms].flags = IORESOURCE_BUSY;
180
 
181
                request_resource(&iomem_resource, rom_resources + roms);
182
        }
183
}
184
 
185
unsigned long start_pfn, end_pfn;
186
extern unsigned long table_start, table_end;
187
 
188
#ifndef CONFIG_DISCONTIGMEM
189
static void __init contig_initmem_init(void)
190
{
191
        unsigned long bootmap_size, bootmap;
192
        bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
193
        bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
194
        if (bootmap == -1L)
195
                panic("Cannot find bootmem map of size %ld\n",bootmap_size);
196
        bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
197
        e820_bootmem_free(&contig_page_data, 0, end_pfn << PAGE_SHIFT);
198
        reserve_bootmem(bootmap, bootmap_size);
199
}
200
#endif
201
 
202
void __init setup_arch(char **cmdline_p)
203
{
204
        int i;
205
        unsigned long kernel_end;
206
 
207
        ROOT_DEV = to_kdev_t(ORIG_ROOT_DEV);
208
        drive_info = DRIVE_INFO;
209
        screen_info = SCREEN_INFO;
210
        aux_device_present = AUX_DEVICE_INFO;
211
 
212
#ifdef CONFIG_BLK_DEV_RAM
213
        rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
214
        rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
215
        rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
216
#endif
217
        setup_memory_region();
218
 
219
        if (!MOUNT_ROOT_RDONLY)
220
                root_mountflags &= ~MS_RDONLY;
221
        init_mm.start_code = (unsigned long) &_text;
222
        init_mm.end_code = (unsigned long) &_etext;
223
        init_mm.end_data = (unsigned long) &_edata;
224
        init_mm.brk = (unsigned long) &_end;
225
 
226
        code_resource.start = virt_to_bus(&_text);
227
        code_resource.end = virt_to_bus(&_etext)-1;
228
        data_resource.start = virt_to_bus(&_etext);
229
        data_resource.end = virt_to_bus(&_edata)-1;
230
 
231
        parse_mem_cmdline(cmdline_p);
232
 
233
        e820_end_of_ram();
234
 
235
        check_efer();
236
 
237
        init_memory_mapping();
238
 
239
#ifdef CONFIG_BLK_DEV_INITRD
240
        if (LOADER_TYPE && INITRD_START) {
241
                if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
242
                        initrd_start =
243
                                INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
244
                        initrd_end = initrd_start+INITRD_SIZE;
245
        }
246
                else {
247
                        printk(KERN_ERR "initrd extends beyond end of memory "
248
                            "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
249
                            (unsigned long)INITRD_START + INITRD_SIZE,
250
                            (unsigned long)(end_pfn << PAGE_SHIFT));
251
                        initrd_start = 0;
252
        }
253
        }
254
#endif
255
 
256
#ifdef CONFIG_DISCONTIGMEM
257
        numa_initmem_init(0, end_pfn);
258
#else
259
        contig_initmem_init();
260
#endif  
261
 
262
        /* Reserve direct mapping */
263
        reserve_bootmem_generic(table_start << PAGE_SHIFT,
264
                                (table_end - table_start) << PAGE_SHIFT);
265
 
266
#ifdef CONFIG_BLK_DEV_INITRD
267
        if (initrd_start)
268
                reserve_bootmem_generic(INITRD_START, INITRD_SIZE);
269
#endif
270
 
271
        /* Reserve BIOS data page. Some things still need it */
272
        reserve_bootmem_generic(0, PAGE_SIZE);
273
 
274
#ifdef CONFIG_SMP
275
        /*
276
         * But first pinch a few for the stack/trampoline stuff
277
         * FIXME: Don't need the extra page at 4K, but need to fix
278
         * trampoline before removing it. (see the GDT stuff)
279
         */
280
        reserve_bootmem_generic(PAGE_SIZE, PAGE_SIZE);
281
 
282
        /* Reserve SMP trampoline */
283
        reserve_bootmem_generic(0x6000, PAGE_SIZE);
284
#endif
285
        /* Reserve Kernel */
286
        kernel_end = round_up(__pa_symbol(&_end), PAGE_SIZE);
287
        reserve_bootmem_generic(HIGH_MEMORY, kernel_end - HIGH_MEMORY);
288
 
289
#ifdef CONFIG_ACPI_SLEEP
290
        /*
291
         * Reserve low memory region for sleep support.
292
         */
293
        acpi_reserve_bootmem();
294
#endif
295
#ifdef CONFIG_X86_LOCAL_APIC
296
        /*
297
         * Find and reserve possible boot-time SMP configuration:
298
         */
299
        find_smp_config();
300
#endif
301
 
302
#ifdef CONFIG_SMP
303
        /* AP processor realmode stacks in low memory*/
304
        smp_alloc_memory();
305
#endif
306
 
307
        paging_init();
308
#if !defined(CONFIG_SMP) && defined(CONFIG_X86_IO_APIC)
309
        extern void check_ioapic(void);
310
        check_ioapic();
311
#endif
312
 
313
#ifdef CONFIG_ACPI_BOOT
314
        /*
315
         * Parse the ACPI tables for possible boot-time SMP configuration.
316
         */
317
        acpi_boot_init();
318
#endif
319
#ifdef CONFIG_X86_LOCAL_APIC
320
        /*
321
         * get boot-time SMP configuration:
322
         */
323
        if (smp_found_config)
324
                get_smp_config();
325
        init_apic_mappings();
326
#endif
327
 
328
        /*
329
         * Request address space for all standard RAM and ROM resources
330
         * and also for regions reported as reserved by the e820.
331
         */
332
        probe_roms();
333
        e820_reserve_resources();
334
        request_resource(&iomem_resource, &vram_resource);
335
 
336
        /* request I/O space for devices used on all i[345]86 PCs */
337
        for (i = 0; i < STANDARD_IO_RESOURCES; i++)
338
                request_resource(&ioport_resource, standard_io_resources+i);
339
 
340
        /* We put PCI memory up to make sure VALID_PAGE with DISCONTIGMEM
341
           never returns true for it */
342
 
343
        /* Tell the PCI layer not to allocate too close to the RAM area.. */
344
        pci_mem_start = IOMAP_START;
345
 
346
#ifdef CONFIG_GART_IOMMU
347
        iommu_hole_init();
348
#endif
349
#ifdef CONFIG_SWIOTLB
350
       if (!iommu_aperture && end_pfn >= 0xffffffff>>PAGE_SHIFT) {
351
              swiotlb_init();
352
              swiotlb = 1;
353
       }
354
#endif
355
 
356
#ifdef CONFIG_VT
357
#if defined(CONFIG_VGA_CONSOLE)
358
        conswitchp = &vga_con;
359
#elif defined(CONFIG_DUMMY_CONSOLE)
360
        conswitchp = &dummy_con;
361
#endif
362
#endif
363
 
364
        num_mappedpages = end_pfn;
365
}
366
 
367
static int __init get_model_name(struct cpuinfo_x86 *c)
368
{
369
        unsigned int *v;
370
 
371
        if (cpuid_eax(0x80000000) < 0x80000004)
372
                return 0;
373
 
374
        v = (unsigned int *) c->x86_model_id;
375
        cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
376
        cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
377
        cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
378
        c->x86_model_id[48] = 0;
379
        return 1;
380
}
381
 
382
 
383
static void __init display_cacheinfo(struct cpuinfo_x86 *c)
384
{
385
        unsigned int n, dummy, ecx, edx, eax, ebx, eax_2, ebx_2, ecx_2;
386
 
387
        n = cpuid_eax(0x80000000);
388
 
389
        if (n >= 0x80000005) {
390
                if (n >= 0x80000006)
391
                        cpuid(0x80000006, &eax_2, &ebx_2, &ecx_2, &dummy);
392
 
393
                cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
394
                printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line/%d way), D cache %dK (%d bytes/line/%d way)\n",
395
                       edx>>24, edx&0xFF, (edx>>16)&0xff,
396
                       ecx>>24, ecx&0xFF, (ecx>>16)&0xff);
397
                c->x86_cache_size=(ecx>>24)+(edx>>24);
398
                if (n >= 0x80000006) {
399
                        printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line/%d way)\n",
400
                               ecx_2>>16, ecx_2&0xFF,
401
                               /*  use bits[15:13] as power of 2 for # of ways */
402
                               1 << ((ecx>>13) & 0x7)
403
                               /* Direct and Full associative L2 are very unlikely */);
404
                        c->x86_cache_size = ecx_2 >> 16;
405
                c->x86_tlbsize = ((ebx>>16)&0xff) + ((ebx_2>>16)&0xfff) +
406
                        (ebx&0xff) + ((ebx_2)&0xfff);
407
        }
408
                if (n >= 0x80000007)
409
                        cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power);
410
                if (n >= 0x80000008) {
411
                        cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
412
                        c->x86_virt_bits = (eax >> 8) & 0xff;
413
                        c->x86_phys_bits = eax & 0xff;
414
                }
415
        }
416
}
417
 
418
#define LVL_1_INST      1
419
#define LVL_1_DATA      2
420
#define LVL_2           3
421
#define LVL_3           4
422
#define LVL_TRACE       5
423
 
424
struct _cache_table
425
{
426
        unsigned char descriptor;
427
        char cache_type;
428
        short size;
429
};
430
 
431
/* all the cache descriptor types we care about (no TLB or trace cache entries) */
432
static struct _cache_table cache_table[] __initdata =
433
{
434
        { 0x06, LVL_1_INST, 8 },
435
        { 0x08, LVL_1_INST, 16 },
436
        { 0x0A, LVL_1_DATA, 8 },
437
        { 0x0C, LVL_1_DATA, 16 },
438
        { 0x22, LVL_3,      512 },
439
        { 0x23, LVL_3,      1024 },
440
        { 0x25, LVL_3,      2048 },
441
        { 0x29, LVL_3,      4096 },
442
        { 0x39, LVL_2,      128 },
443
        { 0x3C, LVL_2,      256 },
444
        { 0x41, LVL_2,      128 },
445
        { 0x42, LVL_2,      256 },
446
        { 0x43, LVL_2,      512 },
447
        { 0x44, LVL_2,      1024 },
448
        { 0x45, LVL_2,      2048 },
449
        { 0x66, LVL_1_DATA, 8 },
450
        { 0x67, LVL_1_DATA, 16 },
451
        { 0x68, LVL_1_DATA, 32 },
452
        { 0x70, LVL_TRACE,  12 },
453
        { 0x71, LVL_TRACE,  16 },
454
        { 0x72, LVL_TRACE,  32 },
455
        { 0x79, LVL_2,      128 },
456
        { 0x7A, LVL_2,      256 },
457
        { 0x7B, LVL_2,      512 },
458
        { 0x7C, LVL_2,      1024 },
459
        { 0x82, LVL_2,      256 },
460
        { 0x83, LVL_2,      512 },
461
        { 0x84, LVL_2,      1024 },
462
        { 0x85, LVL_2,      2048 },
463
        { 0x00, 0, 0}
464
};
465
 
466
int select_idle_routine(struct cpuinfo_x86 *c);
467
 
468
static void __init init_intel(struct cpuinfo_x86 *c)
469
{
470
        unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
471
        char *p = NULL;
472
        u32 eax, dummy;
473
 
474
        unsigned int n;
475
 
476
 
477
        select_idle_routine(c);
478
        if (c->cpuid_level > 1) {
479
                /* supports eax=2  call */
480
                int i, j, n;
481
                int regs[4];
482
                unsigned char *dp = (unsigned char *)regs;
483
 
484
                /* Number of times to iterate */
485
                n = cpuid_eax(2) & 0xFF;
486
 
487
                for ( i = 0 ; i < n ; i++ ) {
488
                        cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
489
 
490
                        /* If bit 31 is set, this is an unknown format */
491
                        for ( j = 0 ; j < 3 ; j++ ) {
492
                                if ( regs[j] < 0 ) regs[j] = 0;
493
                        }
494
 
495
                        /* Byte 0 is level count, not a descriptor */
496
                        for ( j = 1 ; j < 16 ; j++ ) {
497
                                unsigned char des = dp[j];
498
                                unsigned char k = 0;
499
 
500
                                /* look up this descriptor in the table */
501
                                while (cache_table[k].descriptor != 0)
502
                                {
503
                                        if (cache_table[k].descriptor == des) {
504
                                                switch (cache_table[k].cache_type) {
505
                                                case LVL_1_INST:
506
                                                        l1i += cache_table[k].size;
507
                                                        break;
508
                                                case LVL_1_DATA:
509
                                                        l1d += cache_table[k].size;
510
                                                        break;
511
                                                case LVL_2:
512
                                                        l2 += cache_table[k].size;
513
                                                        break;
514
                                                case LVL_3:
515
                                                        l3 += cache_table[k].size;
516
                                                        break;
517
                                                case LVL_TRACE:
518
                                                        trace += cache_table[k].size;
519
                                                        break;
520
                                                }
521
                                                break;
522
                                        }
523
 
524
                                        k++;
525
                                }
526
                        }
527
                }
528
 
529
                if ( trace )
530
                        printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
531
                else if ( l1i )
532
                        printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
533
                if ( l1d )
534
                        printk(", L1 D cache: %dK\n", l1d);
535
                else
536
                        printk("\n");
537
                if ( l2 )
538
                        printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
539
                if ( l3 )
540
                        printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
541
 
542
                /*
543
                 * This assumes the L3 cache is shared; it typically lives in
544
                 * the northbridge.  The L1 caches are included by the L2
545
                 * cache, and so should not be included for the purpose of
546
                 * SMP switching weights.
547
                 */
548
                c->x86_cache_size = l2 ? l2 : (l1i+l1d);
549
        }
550
 
551
        if ( p )
552
                strcpy(c->x86_model_id, p);
553
 
554
#ifdef CONFIG_SMP
555
        if (test_bit(X86_FEATURE_HT, &c->x86_capability)) {
556
                int     index_lsb, index_msb, tmp;
557
                int     initial_apic_id;
558
                int     cpu = smp_processor_id();
559
                u32     ebx, ecx, edx;
560
 
561
                cpuid(1, &eax, &ebx, &ecx, &edx);
562
                smp_num_siblings = (ebx & 0xff0000) >> 16;
563
 
564
                if (smp_num_siblings == 1) {
565
                        printk(KERN_INFO  "CPU: Hyper-Threading is disabled\n");
566
                } else if (smp_num_siblings > 1 ) {
567
                        index_lsb = 0;
568
                        index_msb = 31;
569
                        /*
570
                         * At this point we only support two siblings per
571
                         * processor package.
572
                         */
573
#define NR_SIBLINGS     2
574
                        if (smp_num_siblings != NR_SIBLINGS) {
575
                                printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
576
                                smp_num_siblings = 1;
577
                                return;
578
                        }
579
                        tmp = smp_num_siblings;
580
                        while ((tmp & 1) == 0) {
581
                                tmp >>=1 ;
582
                                index_lsb++;
583
                        }
584
                        tmp = smp_num_siblings;
585
                        while ((tmp & 0x80000000 ) == 0) {
586
                                tmp <<=1 ;
587
                                index_msb--;
588
                        }
589
                        if (index_lsb != index_msb )
590
                                index_msb++;
591
                        initial_apic_id = ebx >> 24 & 0xff;
592
                        phys_proc_id[cpu] = initial_apic_id >> index_msb;
593
 
594
                        printk(KERN_INFO  "CPU: Physical Processor ID: %d\n",
595
                               phys_proc_id[cpu]);
596
                }
597
 
598
        }
599
#endif
600
 
601
        n = cpuid_eax(0x80000000);
602
        if (n >= 0x80000008) {
603
                cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
604
                c->x86_virt_bits = (eax >> 8) & 0xff;
605
                c->x86_phys_bits = eax & 0xff;
606
        }
607
 
608
}
609
 
610
static int __init init_amd(struct cpuinfo_x86 *c)
611
{
612
        int r;
613
 
614
        /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
615
           3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
616
        clear_bit(0*32+31, &c->x86_capability);
617
 
618
        r = get_model_name(c);
619
        if (!r) {
620
                switch (c->x86) {
621
                case 15:
622
                        /* Should distingush Models here, but this is only
623
                           a fallback anyways. */
624
                        strcpy(c->x86_model_id, "Hammer");
625
                        break;
626
                }
627
        }
628
        display_cacheinfo(c);
629
        return r;
630
}
631
 
632
 
633
void __init get_cpu_vendor(struct cpuinfo_x86 *c)
634
{
635
        char *v = c->x86_vendor_id;
636
 
637
        if (!strcmp(v, "AuthenticAMD"))
638
                c->x86_vendor = X86_VENDOR_AMD;
639
        else if (!strcmp(v, "GenuineIntel"))
640
                c->x86_vendor = X86_VENDOR_INTEL;
641
        else
642
                c->x86_vendor = X86_VENDOR_UNKNOWN;
643
}
644
 
645
struct cpu_model_info {
646
        int vendor;
647
        int family;
648
        char *model_names[16];
649
};
650
 
651
/*
652
 * This does the hard work of actually picking apart the CPU stuff...
653
 */
654
void __init identify_cpu(struct cpuinfo_x86 *c)
655
{
656
        int i;
657
        u32 xlvl, tfms;
658
 
659
        c->loops_per_jiffy = loops_per_jiffy;
660
        c->x86_cache_size = -1;
661
        c->x86_vendor = X86_VENDOR_UNKNOWN;
662
        c->x86_model = c->x86_mask = 0;  /* So far unknown... */
663
        c->x86_vendor_id[0] = '\0'; /* Unset */
664
        c->x86_model_id[0] = '\0';  /* Unset */
665
        memset(&c->x86_capability, 0, sizeof c->x86_capability);
666
 
667
        /* Get vendor name */
668
        cpuid(0x00000000, &c->cpuid_level,
669
              (int *)&c->x86_vendor_id[0],
670
              (int *)&c->x86_vendor_id[8],
671
              (int *)&c->x86_vendor_id[4]);
672
 
673
        get_cpu_vendor(c);
674
        /* Initialize the standard set of capabilities */
675
        /* Note that the vendor-specific code below might override */
676
 
677
        /* Intel-defined flags: level 0x00000001 */
678
        if ( c->cpuid_level >= 0x00000001 ) {
679
                __u32 misc;
680
                cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
681
                      &c->x86_capability[0]);
682
                c->x86 = (tfms >> 8) & 15;
683
                c->x86_model = (tfms >> 4) & 15;
684
                if (c->x86 == 0xf) { /* extended */
685
                        c->x86 += (tfms >> 20) & 0xff;
686
                        c->x86_model += ((tfms >> 16) & 0xF) << 4;
687
                }
688
                c->x86_mask = tfms & 15;
689
                if (c->x86_capability[0] & (1<<19))
690
                        c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
691
        } else {
692
                /* Have CPUID level 0 only - unheard of */
693
                c->x86 = 4;
694
        }
695
 
696
        /* AMD-defined flags: level 0x80000001 */
697
        xlvl = cpuid_eax(0x80000000);
698
        if ( (xlvl & 0xffff0000) == 0x80000000 ) {
699
                if ( xlvl >= 0x80000001 )
700
                        c->x86_capability[1] = cpuid_edx(0x80000001);
701
                if ( xlvl >= 0x80000004 )
702
                        get_model_name(c); /* Default name */
703
        }
704
 
705
        /* Transmeta-defined flags: level 0x80860001 */
706
        xlvl = cpuid_eax(0x80860000);
707
        if ( (xlvl & 0xffff0000) == 0x80860000 ) {
708
                if (  xlvl >= 0x80860001 )
709
                        c->x86_capability[2] = cpuid_edx(0x80860001);
710
        }
711
 
712
 
713
        /*
714
         * Vendor-specific initialization.  In this section we
715
         * canonicalize the feature flags, meaning if there are
716
         * features a certain CPU supports which CPUID doesn't
717
         * tell us, CPUID claiming incorrect flags, or other bugs,
718
         * we handle them here.
719
         *
720
         * At the end of this section, c->x86_capability better
721
         * indicate the features this CPU genuinely supports!
722
         */
723
        switch ( c->x86_vendor ) {
724
 
725
                case X86_VENDOR_AMD:
726
                        init_amd(c);
727
                        break;
728
 
729
                case X86_VENDOR_INTEL:
730
                        init_intel(c);
731
                        break;
732
                case X86_VENDOR_UNKNOWN:
733
                default:
734
                        display_cacheinfo(c);
735
                        break;
736
        }
737
 
738
        /*
739
         * The vendor-specific functions might have changed features.  Now
740
         * we do "generic changes."
741
         */
742
 
743
        /*
744
         * On SMP, boot_cpu_data holds the common feature set between
745
         * all CPUs; so make sure that we indicate which features are
746
         * common between the CPUs.  The first time this routine gets
747
         * executed, c == &boot_cpu_data.
748
         */
749
        if ( c != &boot_cpu_data ) {
750
                /* AND the already accumulated flags with these */
751
                for ( i = 0 ; i < NCAPINTS ; i++ )
752
                        boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
753
        }
754
 
755
#ifdef CONFIG_MCE
756
        mcheck_init(c);
757
#endif
758
}
759
 
760
void __init print_cpu_info(struct cpuinfo_x86 *c)
761
{
762
        if (c->x86_model_id[0])
763
                printk("%s", c->x86_model_id);
764
 
765
        if (c->x86_mask || c->cpuid_level >= 0)
766
                printk(" stepping %02x\n", c->x86_mask);
767
        else
768
                printk("\n");
769
}
770
 
771
/*
772
 *      Get CPU information for use by the procfs.
773
 */
774
 
775
static int show_cpuinfo(struct seq_file *m, void *v)
776
{
777
        struct cpuinfo_x86 *c = v;
778
 
779
        /*
780
         * These flag bits must match the definitions in <asm/cpufeature.h>.
781
         * NULL means this bit is undefined or reserved; either way it doesn't
782
         * have meaning as far as Linux is concerned.  Note that it's important
783
         * to realize there is a difference between this table and CPUID -- if
784
         * applications want to get the raw CPUID data, they should access
785
         * /dev/cpu/<cpu_nr>/cpuid instead.
786
         */
787
        static char *x86_cap_flags[] = {
788
                /* Intel-defined */
789
                "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
790
                "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
791
                "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
792
                "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", "pbe",
793
 
794
                /* AMD-defined */
795
                NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
796
                NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
797
                NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
798
                NULL, NULL, NULL, NULL, NULL, "lm", "3dnowext", "3dnow",
799
 
800
                /* Transmeta-defined */
801
                "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
802
                NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
803
                NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
804
                NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
805
 
806
                /* Other (Linux-defined) */
807
                "cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr", NULL, NULL, NULL, NULL,
808
                NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
809
                NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
810
                NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
811
 
812
                /* Intel Defined (cpuid 1 and ecx) */
813
                "pni", NULL, NULL, "monitor", "ds-cpl", NULL, NULL, "est",
814
                "tm2", NULL, "cid", NULL, NULL, "cmpxchg16b", NULL, NULL,
815
                NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
816
                NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
817
        };
818
        static char *x86_power_flags[] = {
819
                "ts",   /* temperature sensor */
820
                "fid",  /* frequency id control */
821
                "vid",  /* voltage id control */
822
                "ttp",  /* thermal trip */
823
        };
824
 
825
#ifdef CONFIG_SMP
826
        if (!(cpu_online_map & (1<<(c-cpu_data))))
827
                return 0;
828
#endif
829
 
830
        seq_printf(m,"processor\t: %u\n"
831
                     "vendor_id\t: %s\n"
832
                     "cpu family\t: %d\n"
833
                     "model\t\t: %d\n"
834
                     "model name\t: %s\n",
835
                     (unsigned)(c-cpu_data),
836
                     c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
837
                     c->x86,
838
                     (int)c->x86_model,
839
                     c->x86_model_id[0] ? c->x86_model_id : "unknown");
840
 
841
        if (c->x86_mask || c->cpuid_level >= 0)
842
                seq_printf(m, "stepping\t: %d\n", c->x86_mask);
843
        else
844
                seq_printf(m, "stepping\t: unknown\n");
845
 
846
        if ( test_bit(X86_FEATURE_TSC, &c->x86_capability) ) {
847
                seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
848
                             cpu_khz / 1000, (cpu_khz % 1000));
849
        }
850
 
851
        seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
852
 
853
#ifdef CONFIG_SMP
854
        seq_printf(m, "physical id\t: %d\n",phys_proc_id[c - cpu_data]);
855
        seq_printf(m, "siblings\t: %d\n",smp_num_siblings);
856
#endif
857
 
858
        seq_printf(m,
859
                "fpu\t\t: yes\n"
860
                "fpu_exception\t: yes\n"
861
                "cpuid level\t: %d\n"
862
                "wp\t\t: yes\n"
863
                "flags\t\t:",
864
                   c->cpuid_level);
865
 
866
        {
867
                int i;
868
                for ( i = 0 ; i < 32*NCAPINTS ; i++ )
869
                        if ( test_bit(i, &c->x86_capability) &&
870
                             x86_cap_flags[i] != NULL )
871
                                seq_printf(m, " %s", x86_cap_flags[i]);
872
        }
873
 
874
        seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
875
                   c->loops_per_jiffy/(500000/HZ),
876
                   (c->loops_per_jiffy/(5000/HZ)) % 100);
877
 
878
        if (c->x86_tlbsize > 0)
879
                seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
880
        seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
881
 
882
        if (c->x86_phys_bits > 0)
883
        seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
884
                   c->x86_phys_bits, c->x86_virt_bits);
885
 
886
        seq_printf(m, "power management:");
887
        {
888
                int i;
889
                for (i = 0; i < 32; i++)
890
                        if (c->x86_power & (1 << i)) {
891
                                if (i < ARRAY_SIZE(x86_power_flags))
892
                                        seq_printf(m, " %s", x86_power_flags[i]);
893
                                else
894
                                        seq_printf(m, " [%d]", i);
895
                        }
896
        }
897
 
898
        seq_printf(m, "\n\n");
899
        return 0;
900
}
901
 
902
static void *c_start(struct seq_file *m, loff_t *pos)
903
{
904
        return *pos < NR_CPUS ? cpu_data + *pos : NULL;
905
}
906
 
907
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
908
{
909
        ++*pos;
910
        return c_start(m, pos);
911
}
912
 
913
static void c_stop(struct seq_file *m, void *v)
914
{
915
}
916
 
917
struct seq_operations cpuinfo_op = {
918
        start:  c_start,
919
        next:   c_next,
920
        stop:   c_stop,
921
        show:   show_cpuinfo,
922
};

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.