OpenCores
URL https://opencores.org/ocsvn/or1k_soc_on_altera_embedded_dev_kit/or1k_soc_on_altera_embedded_dev_kit/trunk

Subversion Repositories or1k_soc_on_altera_embedded_dev_kit

[/] [or1k_soc_on_altera_embedded_dev_kit/] [trunk/] [linux-2.6/] [linux-2.6.24/] [arch/] [powerpc/] [kernel/] [smp.c] - Blame information for rev 3

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 3 xianfeng
/*
2
 * SMP support for ppc.
3
 *
4
 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
5
 * deal of code from the sparc and intel versions.
6
 *
7
 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
8
 *
9
 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
10
 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
11
 *
12
 *      This program is free software; you can redistribute it and/or
13
 *      modify it under the terms of the GNU General Public License
14
 *      as published by the Free Software Foundation; either version
15
 *      2 of the License, or (at your option) any later version.
16
 */
17
 
18
#undef DEBUG
19
 
20
#include <linux/kernel.h>
21
#include <linux/module.h>
22
#include <linux/sched.h>
23
#include <linux/smp.h>
24
#include <linux/interrupt.h>
25
#include <linux/delay.h>
26
#include <linux/init.h>
27
#include <linux/spinlock.h>
28
#include <linux/cache.h>
29
#include <linux/err.h>
30
#include <linux/sysdev.h>
31
#include <linux/cpu.h>
32
#include <linux/notifier.h>
33
#include <linux/topology.h>
34
 
35
#include <asm/ptrace.h>
36
#include <asm/atomic.h>
37
#include <asm/irq.h>
38
#include <asm/page.h>
39
#include <asm/pgtable.h>
40
#include <asm/prom.h>
41
#include <asm/smp.h>
42
#include <asm/time.h>
43
#include <asm/machdep.h>
44
#include <asm/cputable.h>
45
#include <asm/system.h>
46
#include <asm/mpic.h>
47
#include <asm/vdso_datapage.h>
48
#ifdef CONFIG_PPC64
49
#include <asm/paca.h>
50
#endif
51
 
52
#ifdef DEBUG
53
#include <asm/udbg.h>
54
#define DBG(fmt...) udbg_printf(fmt)
55
#else
56
#define DBG(fmt...)
57
#endif
58
 
59
int smp_hw_index[NR_CPUS];
60
struct thread_info *secondary_ti;
61
 
62
cpumask_t cpu_possible_map = CPU_MASK_NONE;
63
cpumask_t cpu_online_map = CPU_MASK_NONE;
64
DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
65
 
66
EXPORT_SYMBOL(cpu_online_map);
67
EXPORT_SYMBOL(cpu_possible_map);
68
EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
69
 
70
/* SMP operations for this machine */
71
struct smp_ops_t *smp_ops;
72
 
73
static volatile unsigned int cpu_callin_map[NR_CPUS];
74
 
75
void smp_call_function_interrupt(void);
76
 
77
int smt_enabled_at_boot = 1;
78
 
79
static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL;
80
 
81
#ifdef CONFIG_PPC64
82
void __devinit smp_generic_kick_cpu(int nr)
83
{
84
        BUG_ON(nr < 0 || nr >= NR_CPUS);
85
 
86
        /*
87
         * The processor is currently spinning, waiting for the
88
         * cpu_start field to become non-zero After we set cpu_start,
89
         * the processor will continue on to secondary_start
90
         */
91
        paca[nr].cpu_start = 1;
92
        smp_mb();
93
}
94
#endif
95
 
96
void smp_message_recv(int msg)
97
{
98
        switch(msg) {
99
        case PPC_MSG_CALL_FUNCTION:
100
                smp_call_function_interrupt();
101
                break;
102
        case PPC_MSG_RESCHEDULE:
103
                /* XXX Do we have to do this? */
104
                set_need_resched();
105
                break;
106
        case PPC_MSG_DEBUGGER_BREAK:
107
                if (crash_ipi_function_ptr) {
108
                        crash_ipi_function_ptr(get_irq_regs());
109
                        break;
110
                }
111
#ifdef CONFIG_DEBUGGER
112
                debugger_ipi(get_irq_regs());
113
                break;
114
#endif /* CONFIG_DEBUGGER */
115
                /* FALLTHROUGH */
116
        default:
117
                printk("SMP %d: smp_message_recv(): unknown msg %d\n",
118
                       smp_processor_id(), msg);
119
                break;
120
        }
121
}
122
 
123
void smp_send_reschedule(int cpu)
124
{
125
        if (likely(smp_ops))
126
                smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE);
127
}
128
 
129
#ifdef CONFIG_DEBUGGER
130
void smp_send_debugger_break(int cpu)
131
{
132
        if (likely(smp_ops))
133
                smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
134
}
135
#endif
136
 
137
#ifdef CONFIG_KEXEC
138
void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
139
{
140
        crash_ipi_function_ptr = crash_ipi_callback;
141
        if (crash_ipi_callback && smp_ops) {
142
                mb();
143
                smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_DEBUGGER_BREAK);
144
        }
145
}
146
#endif
147
 
148
static void stop_this_cpu(void *dummy)
149
{
150
        local_irq_disable();
151
        while (1)
152
                ;
153
}
154
 
155
/*
156
 * Structure and data for smp_call_function(). This is designed to minimise
157
 * static memory requirements. It also looks cleaner.
158
 * Stolen from the i386 version.
159
 */
160
static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock);
161
 
162
static struct call_data_struct {
163
        void (*func) (void *info);
164
        void *info;
165
        atomic_t started;
166
        atomic_t finished;
167
        int wait;
168
} *call_data;
169
 
170
/* delay of at least 8 seconds */
171
#define SMP_CALL_TIMEOUT        8
172
 
173
/*
174
 * These functions send a 'generic call function' IPI to other online
175
 * CPUS in the system.
176
 *
177
 * [SUMMARY] Run a function on other CPUs.
178
 * <func> The function to run. This must be fast and non-blocking.
179
 * <info> An arbitrary pointer to pass to the function.
180
 * <nonatomic> currently unused.
181
 * <wait> If true, wait (atomically) until function has completed on other CPUs.
182
 * [RETURNS] 0 on success, else a negative status code. Does not return until
183
 * remote CPUs are nearly ready to execute <<func>> or are or have executed.
184
 *
185
 * You must not call this function with disabled interrupts or from a
186
 * hardware interrupt handler or from a bottom half handler.
187
 */
188
int smp_call_function_map(void (*func) (void *info), void *info, int nonatomic,
189
                        int wait, cpumask_t map)
190
{
191
        struct call_data_struct data;
192
        int ret = -1, num_cpus;
193
        int cpu;
194
        u64 timeout;
195
 
196
        if (unlikely(smp_ops == NULL))
197
                return ret;
198
 
199
        data.func = func;
200
        data.info = info;
201
        atomic_set(&data.started, 0);
202
        data.wait = wait;
203
        if (wait)
204
                atomic_set(&data.finished, 0);
205
 
206
        spin_lock(&call_lock);
207
 
208
        /* remove 'self' from the map */
209
        if (cpu_isset(smp_processor_id(), map))
210
                cpu_clear(smp_processor_id(), map);
211
 
212
        /* sanity check the map, remove any non-online processors. */
213
        cpus_and(map, map, cpu_online_map);
214
 
215
        num_cpus = cpus_weight(map);
216
        if (!num_cpus)
217
                goto done;
218
 
219
        call_data = &data;
220
        smp_wmb();
221
        /* Send a message to all CPUs in the map */
222
        for_each_cpu_mask(cpu, map)
223
                smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION);
224
 
225
        timeout = get_tb() + (u64) SMP_CALL_TIMEOUT * tb_ticks_per_sec;
226
 
227
        /* Wait for indication that they have received the message */
228
        while (atomic_read(&data.started) != num_cpus) {
229
                HMT_low();
230
                if (get_tb() >= timeout) {
231
                        printk("smp_call_function on cpu %d: other cpus not "
232
                                "responding (%d)\n", smp_processor_id(),
233
                                atomic_read(&data.started));
234
                        debugger(NULL);
235
                        goto out;
236
                }
237
        }
238
 
239
        /* optionally wait for the CPUs to complete */
240
        if (wait) {
241
                while (atomic_read(&data.finished) != num_cpus) {
242
                        HMT_low();
243
                        if (get_tb() >= timeout) {
244
                                printk("smp_call_function on cpu %d: other "
245
                                        "cpus not finishing (%d/%d)\n",
246
                                        smp_processor_id(),
247
                                        atomic_read(&data.finished),
248
                                        atomic_read(&data.started));
249
                                debugger(NULL);
250
                                goto out;
251
                        }
252
                }
253
        }
254
 
255
 done:
256
        ret = 0;
257
 
258
 out:
259
        call_data = NULL;
260
        HMT_medium();
261
        spin_unlock(&call_lock);
262
        return ret;
263
}
264
 
265
static int __smp_call_function(void (*func)(void *info), void *info,
266
                               int nonatomic, int wait)
267
{
268
        return smp_call_function_map(func,info,nonatomic,wait,cpu_online_map);
269
}
270
 
271
int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
272
                        int wait)
273
{
274
        /* Can deadlock when called with interrupts disabled */
275
        WARN_ON(irqs_disabled());
276
 
277
        return __smp_call_function(func, info, nonatomic, wait);
278
}
279
EXPORT_SYMBOL(smp_call_function);
280
 
281
int smp_call_function_single(int cpu, void (*func) (void *info), void *info, int nonatomic,
282
                        int wait)
283
{
284
        cpumask_t map = CPU_MASK_NONE;
285
        int ret = 0;
286
 
287
        /* Can deadlock when called with interrupts disabled */
288
        WARN_ON(irqs_disabled());
289
 
290
        if (!cpu_online(cpu))
291
                return -EINVAL;
292
 
293
        cpu_set(cpu, map);
294
        if (cpu != get_cpu())
295
                ret = smp_call_function_map(func,info,nonatomic,wait,map);
296
        else {
297
                local_irq_disable();
298
                func(info);
299
                local_irq_enable();
300
        }
301
        put_cpu();
302
        return ret;
303
}
304
EXPORT_SYMBOL(smp_call_function_single);
305
 
306
void smp_send_stop(void)
307
{
308
        __smp_call_function(stop_this_cpu, NULL, 1, 0);
309
}
310
 
311
void smp_call_function_interrupt(void)
312
{
313
        void (*func) (void *info);
314
        void *info;
315
        int wait;
316
 
317
        /* call_data will be NULL if the sender timed out while
318
         * waiting on us to receive the call.
319
         */
320
        if (!call_data)
321
                return;
322
 
323
        func = call_data->func;
324
        info = call_data->info;
325
        wait = call_data->wait;
326
 
327
        if (!wait)
328
                smp_mb__before_atomic_inc();
329
 
330
        /*
331
         * Notify initiating CPU that I've grabbed the data and am
332
         * about to execute the function
333
         */
334
        atomic_inc(&call_data->started);
335
        /*
336
         * At this point the info structure may be out of scope unless wait==1
337
         */
338
        (*func)(info);
339
        if (wait) {
340
                smp_mb__before_atomic_inc();
341
                atomic_inc(&call_data->finished);
342
        }
343
}
344
 
345
extern struct gettimeofday_struct do_gtod;
346
 
347
struct thread_info *current_set[NR_CPUS];
348
 
349
DECLARE_PER_CPU(unsigned int, pvr);
350
 
351
static void __devinit smp_store_cpu_info(int id)
352
{
353
        per_cpu(pvr, id) = mfspr(SPRN_PVR);
354
}
355
 
356
static void __init smp_create_idle(unsigned int cpu)
357
{
358
        struct task_struct *p;
359
 
360
        /* create a process for the processor */
361
        p = fork_idle(cpu);
362
        if (IS_ERR(p))
363
                panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
364
#ifdef CONFIG_PPC64
365
        paca[cpu].__current = p;
366
#endif
367
        current_set[cpu] = task_thread_info(p);
368
        task_thread_info(p)->cpu = cpu;
369
}
370
 
371
void __init smp_prepare_cpus(unsigned int max_cpus)
372
{
373
        unsigned int cpu;
374
 
375
        DBG("smp_prepare_cpus\n");
376
 
377
        /*
378
         * setup_cpu may need to be called on the boot cpu. We havent
379
         * spun any cpus up but lets be paranoid.
380
         */
381
        BUG_ON(boot_cpuid != smp_processor_id());
382
 
383
        /* Fixup boot cpu */
384
        smp_store_cpu_info(boot_cpuid);
385
        cpu_callin_map[boot_cpuid] = 1;
386
 
387
        if (smp_ops)
388
                max_cpus = smp_ops->probe();
389
        else
390
                max_cpus = 1;
391
 
392
        smp_space_timers(max_cpus);
393
 
394
        for_each_possible_cpu(cpu)
395
                if (cpu != boot_cpuid)
396
                        smp_create_idle(cpu);
397
}
398
 
399
void __devinit smp_prepare_boot_cpu(void)
400
{
401
        BUG_ON(smp_processor_id() != boot_cpuid);
402
 
403
        cpu_set(boot_cpuid, cpu_online_map);
404
#ifdef CONFIG_PPC64
405
        paca[boot_cpuid].__current = current;
406
#endif
407
        current_set[boot_cpuid] = task_thread_info(current);
408
}
409
 
410
#ifdef CONFIG_HOTPLUG_CPU
411
/* State of each CPU during hotplug phases */
412
DEFINE_PER_CPU(int, cpu_state) = { 0 };
413
 
414
int generic_cpu_disable(void)
415
{
416
        unsigned int cpu = smp_processor_id();
417
 
418
        if (cpu == boot_cpuid)
419
                return -EBUSY;
420
 
421
        cpu_clear(cpu, cpu_online_map);
422
#ifdef CONFIG_PPC64
423
        vdso_data->processorCount--;
424
        fixup_irqs(cpu_online_map);
425
#endif
426
        return 0;
427
}
428
 
429
int generic_cpu_enable(unsigned int cpu)
430
{
431
        /* Do the normal bootup if we haven't
432
         * already bootstrapped. */
433
        if (system_state != SYSTEM_RUNNING)
434
                return -ENOSYS;
435
 
436
        /* get the target out of it's holding state */
437
        per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
438
        smp_wmb();
439
 
440
        while (!cpu_online(cpu))
441
                cpu_relax();
442
 
443
#ifdef CONFIG_PPC64
444
        fixup_irqs(cpu_online_map);
445
        /* counter the irq disable in fixup_irqs */
446
        local_irq_enable();
447
#endif
448
        return 0;
449
}
450
 
451
void generic_cpu_die(unsigned int cpu)
452
{
453
        int i;
454
 
455
        for (i = 0; i < 100; i++) {
456
                smp_rmb();
457
                if (per_cpu(cpu_state, cpu) == CPU_DEAD)
458
                        return;
459
                msleep(100);
460
        }
461
        printk(KERN_ERR "CPU%d didn't die...\n", cpu);
462
}
463
 
464
void generic_mach_cpu_die(void)
465
{
466
        unsigned int cpu;
467
 
468
        local_irq_disable();
469
        cpu = smp_processor_id();
470
        printk(KERN_DEBUG "CPU%d offline\n", cpu);
471
        __get_cpu_var(cpu_state) = CPU_DEAD;
472
        smp_wmb();
473
        while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
474
                cpu_relax();
475
        cpu_set(cpu, cpu_online_map);
476
        local_irq_enable();
477
}
478
#endif
479
 
480
static int __devinit cpu_enable(unsigned int cpu)
481
{
482
        if (smp_ops && smp_ops->cpu_enable)
483
                return smp_ops->cpu_enable(cpu);
484
 
485
        return -ENOSYS;
486
}
487
 
488
int __cpuinit __cpu_up(unsigned int cpu)
489
{
490
        int c;
491
 
492
        secondary_ti = current_set[cpu];
493
        if (!cpu_enable(cpu))
494
                return 0;
495
 
496
        if (smp_ops == NULL ||
497
            (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
498
                return -EINVAL;
499
 
500
        /* Make sure callin-map entry is 0 (can be leftover a CPU
501
         * hotplug
502
         */
503
        cpu_callin_map[cpu] = 0;
504
 
505
        /* The information for processor bringup must
506
         * be written out to main store before we release
507
         * the processor.
508
         */
509
        smp_mb();
510
 
511
        /* wake up cpus */
512
        DBG("smp: kicking cpu %d\n", cpu);
513
        smp_ops->kick_cpu(cpu);
514
 
515
        /*
516
         * wait to see if the cpu made a callin (is actually up).
517
         * use this value that I found through experimentation.
518
         * -- Cort
519
         */
520
        if (system_state < SYSTEM_RUNNING)
521
                for (c = 50000; c && !cpu_callin_map[cpu]; c--)
522
                        udelay(100);
523
#ifdef CONFIG_HOTPLUG_CPU
524
        else
525
                /*
526
                 * CPUs can take much longer to come up in the
527
                 * hotplug case.  Wait five seconds.
528
                 */
529
                for (c = 25; c && !cpu_callin_map[cpu]; c--) {
530
                        msleep(200);
531
                }
532
#endif
533
 
534
        if (!cpu_callin_map[cpu]) {
535
                printk("Processor %u is stuck.\n", cpu);
536
                return -ENOENT;
537
        }
538
 
539
        printk("Processor %u found.\n", cpu);
540
 
541
        if (smp_ops->give_timebase)
542
                smp_ops->give_timebase();
543
 
544
        /* Wait until cpu puts itself in the online map */
545
        while (!cpu_online(cpu))
546
                cpu_relax();
547
 
548
        return 0;
549
}
550
 
551
 
552
/* Activate a secondary processor. */
553
int __devinit start_secondary(void *unused)
554
{
555
        unsigned int cpu = smp_processor_id();
556
 
557
        atomic_inc(&init_mm.mm_count);
558
        current->active_mm = &init_mm;
559
 
560
        smp_store_cpu_info(cpu);
561
        set_dec(tb_ticks_per_jiffy);
562
        preempt_disable();
563
        cpu_callin_map[cpu] = 1;
564
 
565
        smp_ops->setup_cpu(cpu);
566
        if (smp_ops->take_timebase)
567
                smp_ops->take_timebase();
568
 
569
        if (system_state > SYSTEM_BOOTING)
570
                snapshot_timebase();
571
 
572
        secondary_cpu_time_init();
573
 
574
        spin_lock(&call_lock);
575
        cpu_set(cpu, cpu_online_map);
576
        spin_unlock(&call_lock);
577
 
578
        local_irq_enable();
579
 
580
        cpu_idle();
581
        return 0;
582
}
583
 
584
int setup_profiling_timer(unsigned int multiplier)
585
{
586
        return 0;
587
}
588
 
589
void __init smp_cpus_done(unsigned int max_cpus)
590
{
591
        cpumask_t old_mask;
592
 
593
        /* We want the setup_cpu() here to be called from CPU 0, but our
594
         * init thread may have been "borrowed" by another CPU in the meantime
595
         * se we pin us down to CPU 0 for a short while
596
         */
597
        old_mask = current->cpus_allowed;
598
        set_cpus_allowed(current, cpumask_of_cpu(boot_cpuid));
599
 
600
        if (smp_ops)
601
                smp_ops->setup_cpu(boot_cpuid);
602
 
603
        set_cpus_allowed(current, old_mask);
604
 
605
        snapshot_timebases();
606
 
607
        dump_numa_cpu_topology();
608
}
609
 
610
#ifdef CONFIG_HOTPLUG_CPU
611
int __cpu_disable(void)
612
{
613
        if (smp_ops->cpu_disable)
614
                return smp_ops->cpu_disable();
615
 
616
        return -ENOSYS;
617
}
618
 
619
void __cpu_die(unsigned int cpu)
620
{
621
        if (smp_ops->cpu_die)
622
                smp_ops->cpu_die(cpu);
623
}
624
#endif

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.