OpenCores
URL https://opencores.org/ocsvn/or1k_soc_on_altera_embedded_dev_kit/or1k_soc_on_altera_embedded_dev_kit/trunk

Subversion Repositories or1k_soc_on_altera_embedded_dev_kit

[/] [or1k_soc_on_altera_embedded_dev_kit/] [trunk/] [linux-2.6/] [linux-2.6.24/] [arch/] [powerpc/] [kernel/] [setup-common.c] - Blame information for rev 3

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 3 xianfeng
/*
2
 * Common boot and setup code for both 32-bit and 64-bit.
3
 * Extracted from arch/powerpc/kernel/setup_64.c.
4
 *
5
 * Copyright (C) 2001 PPC64 Team, IBM Corp
6
 *
7
 *      This program is free software; you can redistribute it and/or
8
 *      modify it under the terms of the GNU General Public License
9
 *      as published by the Free Software Foundation; either version
10
 *      2 of the License, or (at your option) any later version.
11
 */
12
 
13
#undef DEBUG
14
 
15
#include <linux/module.h>
16
#include <linux/string.h>
17
#include <linux/sched.h>
18
#include <linux/init.h>
19
#include <linux/kernel.h>
20
#include <linux/reboot.h>
21
#include <linux/delay.h>
22
#include <linux/initrd.h>
23
#include <linux/platform_device.h>
24
#include <linux/seq_file.h>
25
#include <linux/ioport.h>
26
#include <linux/console.h>
27
#include <linux/utsname.h>
28
#include <linux/screen_info.h>
29
#include <linux/root_dev.h>
30
#include <linux/notifier.h>
31
#include <linux/cpu.h>
32
#include <linux/unistd.h>
33
#include <linux/serial.h>
34
#include <linux/serial_8250.h>
35
#include <linux/debugfs.h>
36
#include <asm/io.h>
37
#include <asm/prom.h>
38
#include <asm/processor.h>
39
#include <asm/vdso_datapage.h>
40
#include <asm/pgtable.h>
41
#include <asm/smp.h>
42
#include <asm/elf.h>
43
#include <asm/machdep.h>
44
#include <asm/time.h>
45
#include <asm/cputable.h>
46
#include <asm/sections.h>
47
#include <asm/firmware.h>
48
#include <asm/btext.h>
49
#include <asm/nvram.h>
50
#include <asm/setup.h>
51
#include <asm/system.h>
52
#include <asm/rtas.h>
53
#include <asm/iommu.h>
54
#include <asm/serial.h>
55
#include <asm/cache.h>
56
#include <asm/page.h>
57
#include <asm/mmu.h>
58
#include <asm/lmb.h>
59
#include <asm/xmon.h>
60
 
61
#include "setup.h"
62
 
63
#ifdef DEBUG
64
#include <asm/udbg.h>
65
#define DBG(fmt...) udbg_printf(fmt)
66
#else
67
#define DBG(fmt...)
68
#endif
69
 
70
/* The main machine-dep calls structure
71
 */
72
struct machdep_calls ppc_md;
73
EXPORT_SYMBOL(ppc_md);
74
struct machdep_calls *machine_id;
75
EXPORT_SYMBOL(machine_id);
76
 
77
unsigned long klimit = (unsigned long) _end;
78
 
79
char cmd_line[COMMAND_LINE_SIZE];
80
 
81
/*
82
 * This still seems to be needed... -- paulus
83
 */
84
struct screen_info screen_info = {
85
        .orig_x = 0,
86
        .orig_y = 25,
87
        .orig_video_cols = 80,
88
        .orig_video_lines = 25,
89
        .orig_video_isVGA = 1,
90
        .orig_video_points = 16
91
};
92
 
93
#ifdef __DO_IRQ_CANON
94
/* XXX should go elsewhere eventually */
95
int ppc_do_canonicalize_irqs;
96
EXPORT_SYMBOL(ppc_do_canonicalize_irqs);
97
#endif
98
 
99
/* also used by kexec */
100
void machine_shutdown(void)
101
{
102
        if (ppc_md.machine_shutdown)
103
                ppc_md.machine_shutdown();
104
}
105
 
106
void machine_restart(char *cmd)
107
{
108
        machine_shutdown();
109
        if (ppc_md.restart)
110
                ppc_md.restart(cmd);
111
#ifdef CONFIG_SMP
112
        smp_send_stop();
113
#endif
114
        printk(KERN_EMERG "System Halted, OK to turn off power\n");
115
        local_irq_disable();
116
        while (1) ;
117
}
118
 
119
void machine_power_off(void)
120
{
121
        machine_shutdown();
122
        if (ppc_md.power_off)
123
                ppc_md.power_off();
124
#ifdef CONFIG_SMP
125
        smp_send_stop();
126
#endif
127
        printk(KERN_EMERG "System Halted, OK to turn off power\n");
128
        local_irq_disable();
129
        while (1) ;
130
}
131
/* Used by the G5 thermal driver */
132
EXPORT_SYMBOL_GPL(machine_power_off);
133
 
134
void (*pm_power_off)(void) = machine_power_off;
135
EXPORT_SYMBOL_GPL(pm_power_off);
136
 
137
void machine_halt(void)
138
{
139
        machine_shutdown();
140
        if (ppc_md.halt)
141
                ppc_md.halt();
142
#ifdef CONFIG_SMP
143
        smp_send_stop();
144
#endif
145
        printk(KERN_EMERG "System Halted, OK to turn off power\n");
146
        local_irq_disable();
147
        while (1) ;
148
}
149
 
150
 
151
#ifdef CONFIG_TAU
152
extern u32 cpu_temp(unsigned long cpu);
153
extern u32 cpu_temp_both(unsigned long cpu);
154
#endif /* CONFIG_TAU */
155
 
156
#ifdef CONFIG_SMP
157
DEFINE_PER_CPU(unsigned int, pvr);
158
#endif
159
 
160
static int show_cpuinfo(struct seq_file *m, void *v)
161
{
162
        unsigned long cpu_id = (unsigned long)v - 1;
163
        unsigned int pvr;
164
        unsigned short maj;
165
        unsigned short min;
166
 
167
        if (cpu_id == NR_CPUS) {
168
#if defined(CONFIG_SMP) && defined(CONFIG_PPC32)
169
                unsigned long bogosum = 0;
170
                int i;
171
                for_each_online_cpu(i)
172
                        bogosum += loops_per_jiffy;
173
                seq_printf(m, "total bogomips\t: %lu.%02lu\n",
174
                           bogosum/(500000/HZ), bogosum/(5000/HZ) % 100);
175
#endif /* CONFIG_SMP && CONFIG_PPC32 */
176
                seq_printf(m, "timebase\t: %lu\n", ppc_tb_freq);
177
                if (ppc_md.name)
178
                        seq_printf(m, "platform\t: %s\n", ppc_md.name);
179
                if (ppc_md.show_cpuinfo != NULL)
180
                        ppc_md.show_cpuinfo(m);
181
 
182
                return 0;
183
        }
184
 
185
        /* We only show online cpus: disable preempt (overzealous, I
186
         * knew) to prevent cpu going down. */
187
        preempt_disable();
188
        if (!cpu_online(cpu_id)) {
189
                preempt_enable();
190
                return 0;
191
        }
192
 
193
#ifdef CONFIG_SMP
194
        pvr = per_cpu(pvr, cpu_id);
195
#else
196
        pvr = mfspr(SPRN_PVR);
197
#endif
198
        maj = (pvr >> 8) & 0xFF;
199
        min = pvr & 0xFF;
200
 
201
        seq_printf(m, "processor\t: %lu\n", cpu_id);
202
        seq_printf(m, "cpu\t\t: ");
203
 
204
        if (cur_cpu_spec->pvr_mask)
205
                seq_printf(m, "%s", cur_cpu_spec->cpu_name);
206
        else
207
                seq_printf(m, "unknown (%08x)", pvr);
208
 
209
#ifdef CONFIG_ALTIVEC
210
        if (cpu_has_feature(CPU_FTR_ALTIVEC))
211
                seq_printf(m, ", altivec supported");
212
#endif /* CONFIG_ALTIVEC */
213
 
214
        seq_printf(m, "\n");
215
 
216
#ifdef CONFIG_TAU
217
        if (cur_cpu_spec->cpu_features & CPU_FTR_TAU) {
218
#ifdef CONFIG_TAU_AVERAGE
219
                /* more straightforward, but potentially misleading */
220
                seq_printf(m,  "temperature \t: %u C (uncalibrated)\n",
221
                           cpu_temp(cpu_id));
222
#else
223
                /* show the actual temp sensor range */
224
                u32 temp;
225
                temp = cpu_temp_both(cpu_id);
226
                seq_printf(m, "temperature \t: %u-%u C (uncalibrated)\n",
227
                           temp & 0xff, temp >> 16);
228
#endif
229
        }
230
#endif /* CONFIG_TAU */
231
 
232
        /*
233
         * Assume here that all clock rates are the same in a
234
         * smp system.  -- Cort
235
         */
236
        if (ppc_proc_freq)
237
                seq_printf(m, "clock\t\t: %lu.%06luMHz\n",
238
                           ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
239
 
240
        if (ppc_md.show_percpuinfo != NULL)
241
                ppc_md.show_percpuinfo(m, cpu_id);
242
 
243
        /* If we are a Freescale core do a simple check so
244
         * we dont have to keep adding cases in the future */
245
        if (PVR_VER(pvr) & 0x8000) {
246
                maj = PVR_MAJ(pvr);
247
                min = PVR_MIN(pvr);
248
        } else {
249
                switch (PVR_VER(pvr)) {
250
                        case 0x0020:    /* 403 family */
251
                                maj = PVR_MAJ(pvr) + 1;
252
                                min = PVR_MIN(pvr);
253
                                break;
254
                        case 0x1008:    /* 740P/750P ?? */
255
                                maj = ((pvr >> 8) & 0xFF) - 1;
256
                                min = pvr & 0xFF;
257
                                break;
258
                        default:
259
                                maj = (pvr >> 8) & 0xFF;
260
                                min = pvr & 0xFF;
261
                                break;
262
                }
263
        }
264
 
265
        seq_printf(m, "revision\t: %hd.%hd (pvr %04x %04x)\n",
266
                   maj, min, PVR_VER(pvr), PVR_REV(pvr));
267
 
268
#ifdef CONFIG_PPC32
269
        seq_printf(m, "bogomips\t: %lu.%02lu\n",
270
                   loops_per_jiffy / (500000/HZ),
271
                   (loops_per_jiffy / (5000/HZ)) % 100);
272
#endif
273
 
274
#ifdef CONFIG_SMP
275
        seq_printf(m, "\n");
276
#endif
277
 
278
        preempt_enable();
279
        return 0;
280
}
281
 
282
static void *c_start(struct seq_file *m, loff_t *pos)
283
{
284
        unsigned long i = *pos;
285
 
286
        return i <= NR_CPUS ? (void *)(i + 1) : NULL;
287
}
288
 
289
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
290
{
291
        ++*pos;
292
        return c_start(m, pos);
293
}
294
 
295
static void c_stop(struct seq_file *m, void *v)
296
{
297
}
298
 
299
struct seq_operations cpuinfo_op = {
300
        .start =c_start,
301
        .next = c_next,
302
        .stop = c_stop,
303
        .show = show_cpuinfo,
304
};
305
 
306
void __init check_for_initrd(void)
307
{
308
#ifdef CONFIG_BLK_DEV_INITRD
309
        DBG(" -> check_for_initrd()  initrd_start=0x%lx  initrd_end=0x%lx\n",
310
            initrd_start, initrd_end);
311
 
312
        /* If we were passed an initrd, set the ROOT_DEV properly if the values
313
         * look sensible. If not, clear initrd reference.
314
         */
315
        if (is_kernel_addr(initrd_start) && is_kernel_addr(initrd_end) &&
316
            initrd_end > initrd_start)
317
                ROOT_DEV = Root_RAM0;
318
        else
319
                initrd_start = initrd_end = 0;
320
 
321
        if (initrd_start)
322
                printk("Found initrd at 0x%lx:0x%lx\n", initrd_start, initrd_end);
323
 
324
        DBG(" <- check_for_initrd()\n");
325
#endif /* CONFIG_BLK_DEV_INITRD */
326
}
327
 
328
#ifdef CONFIG_SMP
329
 
330
/**
331
 * setup_cpu_maps - initialize the following cpu maps:
332
 *                  cpu_possible_map
333
 *                  cpu_present_map
334
 *                  cpu_sibling_map
335
 *
336
 * Having the possible map set up early allows us to restrict allocations
337
 * of things like irqstacks to num_possible_cpus() rather than NR_CPUS.
338
 *
339
 * We do not initialize the online map here; cpus set their own bits in
340
 * cpu_online_map as they come up.
341
 *
342
 * This function is valid only for Open Firmware systems.  finish_device_tree
343
 * must be called before using this.
344
 *
345
 * While we're here, we may as well set the "physical" cpu ids in the paca.
346
 *
347
 * NOTE: This must match the parsing done in early_init_dt_scan_cpus.
348
 */
349
void __init smp_setup_cpu_maps(void)
350
{
351
        struct device_node *dn = NULL;
352
        int cpu = 0;
353
 
354
        while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < NR_CPUS) {
355
                const int *intserv;
356
                int j, len = sizeof(u32), nthreads = 1;
357
 
358
                intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s",
359
                                &len);
360
                if (intserv)
361
                        nthreads = len / sizeof(int);
362
                else {
363
                        intserv = of_get_property(dn, "reg", NULL);
364
                        if (!intserv)
365
                                intserv = &cpu; /* assume logical == phys */
366
                }
367
 
368
                for (j = 0; j < nthreads && cpu < NR_CPUS; j++) {
369
                        cpu_set(cpu, cpu_present_map);
370
                        set_hard_smp_processor_id(cpu, intserv[j]);
371
                        cpu_set(cpu, cpu_possible_map);
372
                        cpu++;
373
                }
374
        }
375
 
376
#ifdef CONFIG_PPC64
377
        /*
378
         * On pSeries LPAR, we need to know how many cpus
379
         * could possibly be added to this partition.
380
         */
381
        if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR) &&
382
            (dn = of_find_node_by_path("/rtas"))) {
383
                int num_addr_cell, num_size_cell, maxcpus;
384
                const unsigned int *ireg;
385
 
386
                num_addr_cell = of_n_addr_cells(dn);
387
                num_size_cell = of_n_size_cells(dn);
388
 
389
                ireg = of_get_property(dn, "ibm,lrdr-capacity", NULL);
390
 
391
                if (!ireg)
392
                        goto out;
393
 
394
                maxcpus = ireg[num_addr_cell + num_size_cell];
395
 
396
                /* Double maxcpus for processors which have SMT capability */
397
                if (cpu_has_feature(CPU_FTR_SMT))
398
                        maxcpus *= 2;
399
 
400
                if (maxcpus > NR_CPUS) {
401
                        printk(KERN_WARNING
402
                               "Partition configured for %d cpus, "
403
                               "operating system maximum is %d.\n",
404
                               maxcpus, NR_CPUS);
405
                        maxcpus = NR_CPUS;
406
                } else
407
                        printk(KERN_INFO "Partition configured for %d cpus.\n",
408
                               maxcpus);
409
 
410
                for (cpu = 0; cpu < maxcpus; cpu++)
411
                        cpu_set(cpu, cpu_possible_map);
412
        out:
413
                of_node_put(dn);
414
        }
415
 
416
        vdso_data->processorCount = num_present_cpus();
417
#endif /* CONFIG_PPC64 */
418
}
419
 
420
/*
421
 * Being that cpu_sibling_map is now a per_cpu array, then it cannot
422
 * be initialized until the per_cpu areas have been created.  This
423
 * function is now called from setup_per_cpu_areas().
424
 */
425
void __init smp_setup_cpu_sibling_map(void)
426
{
427
#if defined(CONFIG_PPC64)
428
        int cpu;
429
 
430
        /*
431
         * Do the sibling map; assume only two threads per processor.
432
         */
433
        for_each_possible_cpu(cpu) {
434
                cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
435
                if (cpu_has_feature(CPU_FTR_SMT))
436
                        cpu_set(cpu ^ 0x1, per_cpu(cpu_sibling_map, cpu));
437
        }
438
#endif /* CONFIG_PPC64 */
439
}
440
#endif /* CONFIG_SMP */
441
 
442
static __init int add_pcspkr(void)
443
{
444
        struct device_node *np;
445
        struct platform_device *pd;
446
        int ret;
447
 
448
        np = of_find_compatible_node(NULL, NULL, "pnpPNP,100");
449
        of_node_put(np);
450
        if (!np)
451
                return -ENODEV;
452
 
453
        pd = platform_device_alloc("pcspkr", -1);
454
        if (!pd)
455
                return -ENOMEM;
456
 
457
        ret = platform_device_add(pd);
458
        if (ret)
459
                platform_device_put(pd);
460
 
461
        return ret;
462
}
463
device_initcall(add_pcspkr);
464
 
465
void probe_machine(void)
466
{
467
        extern struct machdep_calls __machine_desc_start;
468
        extern struct machdep_calls __machine_desc_end;
469
 
470
        /*
471
         * Iterate all ppc_md structures until we find the proper
472
         * one for the current machine type
473
         */
474
        DBG("Probing machine type ...\n");
475
 
476
        for (machine_id = &__machine_desc_start;
477
             machine_id < &__machine_desc_end;
478
             machine_id++) {
479
                DBG("  %s ...", machine_id->name);
480
                memcpy(&ppc_md, machine_id, sizeof(struct machdep_calls));
481
                if (ppc_md.probe()) {
482
                        DBG(" match !\n");
483
                        break;
484
                }
485
                DBG("\n");
486
        }
487
        /* What can we do if we didn't find ? */
488
        if (machine_id >= &__machine_desc_end) {
489
                DBG("No suitable machine found !\n");
490
                for (;;);
491
        }
492
 
493
        printk(KERN_INFO "Using %s machine description\n", ppc_md.name);
494
}
495
 
496
/* Match a class of boards, not a specific device configuration. */
497
int check_legacy_ioport(unsigned long base_port)
498
{
499
        struct device_node *parent, *np = NULL;
500
        int ret = -ENODEV;
501
 
502
        switch(base_port) {
503
        case I8042_DATA_REG:
504
                if (!(np = of_find_compatible_node(NULL, NULL, "pnpPNP,303")))
505
                        np = of_find_compatible_node(NULL, NULL, "pnpPNP,f03");
506
                if (np) {
507
                        parent = of_get_parent(np);
508
                        of_node_put(np);
509
                        np = parent;
510
                        break;
511
                }
512
                np = of_find_node_by_type(NULL, "8042");
513
                /* Pegasos has no device_type on its 8042 node, look for the
514
                 * name instead */
515
                if (!np)
516
                        np = of_find_node_by_name(NULL, "8042");
517
                break;
518
        case FDC_BASE: /* FDC1 */
519
                np = of_find_node_by_type(NULL, "fdc");
520
                break;
521
#ifdef CONFIG_PPC_PREP
522
        case _PIDXR:
523
        case _PNPWRP:
524
        case PNPBIOS_BASE:
525
                /* implement me */
526
#endif
527
        default:
528
                /* ipmi is supposed to fail here */
529
                break;
530
        }
531
        if (!np)
532
                return ret;
533
        parent = of_get_parent(np);
534
        if (parent) {
535
                if (strcmp(parent->type, "isa") == 0)
536
                        ret = 0;
537
                of_node_put(parent);
538
        }
539
        of_node_put(np);
540
        return ret;
541
}
542
EXPORT_SYMBOL(check_legacy_ioport);
543
 
544
static int ppc_panic_event(struct notifier_block *this,
545
                             unsigned long event, void *ptr)
546
{
547
        ppc_md.panic(ptr);  /* May not return */
548
        return NOTIFY_DONE;
549
}
550
 
551
static struct notifier_block ppc_panic_block = {
552
        .notifier_call = ppc_panic_event,
553
        .priority = INT_MIN /* may not return; must be done last */
554
};
555
 
556
void __init setup_panic(void)
557
{
558
        atomic_notifier_chain_register(&panic_notifier_list, &ppc_panic_block);
559
}
560
 
561
#ifdef CONFIG_CHECK_CACHE_COHERENCY
562
/*
563
 * For platforms that have configurable cache-coherency.  This function
564
 * checks that the cache coherency setting of the kernel matches the setting
565
 * left by the firmware, as indicated in the device tree.  Since a mismatch
566
 * will eventually result in DMA failures, we print * and error and call
567
 * BUG() in that case.
568
 */
569
 
570
#ifdef CONFIG_NOT_COHERENT_CACHE
571
#define KERNEL_COHERENCY        0
572
#else
573
#define KERNEL_COHERENCY        1
574
#endif
575
 
576
static int __init check_cache_coherency(void)
577
{
578
        struct device_node *np;
579
        const void *prop;
580
        int devtree_coherency;
581
 
582
        np = of_find_node_by_path("/");
583
        prop = of_get_property(np, "coherency-off", NULL);
584
        of_node_put(np);
585
 
586
        devtree_coherency = prop ? 0 : 1;
587
 
588
        if (devtree_coherency != KERNEL_COHERENCY) {
589
                printk(KERN_ERR
590
                        "kernel coherency:%s != device tree_coherency:%s\n",
591
                        KERNEL_COHERENCY ? "on" : "off",
592
                        devtree_coherency ? "on" : "off");
593
                BUG();
594
        }
595
 
596
        return 0;
597
}
598
 
599
late_initcall(check_cache_coherency);
600
#endif /* CONFIG_CHECK_CACHE_COHERENCY */
601
 
602
#ifdef CONFIG_DEBUG_FS
603
struct dentry *powerpc_debugfs_root;
604
 
605
static int powerpc_debugfs_init(void)
606
{
607
        powerpc_debugfs_root = debugfs_create_dir("powerpc", NULL);
608
 
609
        return powerpc_debugfs_root == NULL;
610
}
611
arch_initcall(powerpc_debugfs_init);
612
#endif

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.