OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [arch/] [ppc64/] [kernel/] [smp.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 *
3
 *
4
 * SMP support for ppc.
5
 *
6
 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
7
 * deal of code from the sparc and intel versions.
8
 *
9
 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
10
 *
11
 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
12
 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
13
 *
14
 *      This program is free software; you can redistribute it and/or
15
 *      modify it under the terms of the GNU General Public License
16
 *      as published by the Free Software Foundation; either version
17
 *      2 of the License, or (at your option) any later version.
18
 */
19
 
20
#include <linux/config.h>
21
#include <linux/kernel.h>
22
#include <linux/sched.h>
23
#include <linux/smp.h>
24
#include <linux/smp_lock.h>
25
#include <linux/interrupt.h>
26
#include <linux/kernel_stat.h>
27
#include <linux/delay.h>
28
#define __KERNEL_SYSCALLS__
29
#include <linux/unistd.h>
30
#include <linux/init.h>
31
#include <linux/mm.h>
32
#include <linux/spinlock.h>
33
#include <linux/cache.h>
34
 
35
#include <asm/ptrace.h>
36
#include <asm/atomic.h>
37
#include <asm/irq.h>
38
#include <asm/page.h>
39
#include <asm/pgtable.h>
40
#include <asm/hardirq.h>
41
#include <asm/softirq.h>
42
#include <asm/init.h>
43
#include <asm/io.h>
44
#include <asm/prom.h>
45
#include <asm/smp.h>
46
#include <asm/naca.h>
47
#include <asm/paca.h>
48
#include <asm/iSeries/LparData.h>
49
#include <asm/iSeries/HvCall.h>
50
#include <asm/iSeries/HvCallCfg.h>
51
#include <asm/time.h>
52
#include <asm/ppcdebug.h>
53
#include "open_pic.h"
54
#include <asm/machdep.h>
55
#include <asm/cputable.h>
56
#if defined(CONFIG_DUMP) || defined(CONFIG_DUMP_MODULE)
57
int (*dump_ipi_function_ptr)(struct pt_regs *);
58
#include <asm/dump.h>
59
#endif
60
 
61
#ifdef CONFIG_KDB
62
#include <linux/kdb.h>
63
#endif
64
 
65
int smp_threads_ready = 0;
66
volatile int smp_commenced = 0;
67
int smp_num_cpus = 1;
68
int smp_tb_synchronized = 0;
69
extern atomic_t ipi_recv;
70
extern atomic_t ipi_sent;
71
spinlock_t kernel_flag __cacheline_aligned = SPIN_LOCK_UNLOCKED;
72
cycles_t cacheflush_time;
73
static int max_cpus __initdata = NR_CPUS;
74
 
75
unsigned long cpu_online_map;
76
 
77
volatile unsigned long cpu_callin_map[NR_CPUS] = {0,};
78
 
79
#define TB_SYNC_PASSES 4
80
volatile unsigned long __initdata tb_sync_flag = 0;
81
volatile unsigned long __initdata tb_offset = 0;
82
 
83
extern unsigned char stab_array[];
84
 
85
int start_secondary(void *);
86
extern int cpu_idle(void *unused);
87
void smp_call_function_interrupt(void);
88
void smp_message_pass(int target, int msg, unsigned long data, int wait);
89
static unsigned long iSeries_smp_message[NR_CPUS];
90
 
91
void xics_setup_cpu(void);
92
void xics_cause_IPI(int cpu);
93
 
94
long h_register_vpa(unsigned long flags, unsigned long proc,
95
                    unsigned long vpa);
96
 
97
/*
98
 * XICS only has a single IPI, so encode the messages per CPU
99
 */
100
volatile unsigned long xics_ipi_message[NR_CPUS] = {0};
101
 
102
#define smp_message_pass(t,m,d,w) \
103
    do {     atomic_inc(&ipi_sent); \
104
             ppc_md.smp_message_pass((t),(m),(d),(w)); \
105
       } while(0)
106
 
107
#ifdef CONFIG_KDB
108
/* save regs here before calling kdb_ipi */
109
struct pt_regs *kdb_smp_regs[NR_CPUS];
110
 
111
/* called for each processor.. drop each into kdb. */
112
void smp_kdb_stop_proc(void)
113
{
114
    kdb_ipi(kdb_smp_regs[smp_processor_id()], NULL);
115
}
116
 
117
void smp_kdb_stop(void)
118
{
119
  int ret=0;
120
  ret =    smp_call_function(smp_kdb_stop_proc, NULL, 1, 0);
121
}
122
 
123
#endif
124
 
125
static inline void set_tb(unsigned int upper, unsigned int lower)
126
{
127
        mtspr(SPRN_TBWL, 0);
128
        mtspr(SPRN_TBWU, upper);
129
        mtspr(SPRN_TBWL, lower);
130
}
131
 
132
void iSeries_smp_message_recv( struct pt_regs * regs )
133
{
134
        int cpu = smp_processor_id();
135
        int msg;
136
 
137
        if ( smp_num_cpus < 2 )
138
                return;
139
 
140
        for ( msg = 0; msg < 4; ++msg )
141
                if ( test_and_clear_bit( msg, &iSeries_smp_message[cpu] ) )
142
                        smp_message_recv( msg, regs );
143
 
144
}
145
 
146
static void smp_iSeries_message_pass(int target, int msg, unsigned long data, int wait)
147
{
148
        int i;
149
        for (i = 0; i < smp_num_cpus; ++i) {
150
                if ( (target == MSG_ALL) ||
151
                    (target == i) ||
152
                    ((target == MSG_ALL_BUT_SELF) && (i != smp_processor_id())) ) {
153
                        set_bit( msg, &iSeries_smp_message[i] );
154
                        HvCall_sendIPI(&(paca[i]));
155
                }
156
        }
157
}
158
 
159
static int smp_iSeries_numProcs(void)
160
{
161
        unsigned np, i;
162
        struct ItLpPaca * lpPaca;
163
 
164
        np = 0;
165
        for (i=0; i < MAX_PACAS; ++i) {
166
                lpPaca = paca[i].xLpPacaPtr;
167
                if ( lpPaca->xDynProcStatus < 2 ) {
168
                        ++np;
169
                }
170
        }
171
        return np;
172
}
173
 
174
static int smp_iSeries_probe(void)
175
{
176
        unsigned i;
177
        unsigned np;
178
        struct ItLpPaca * lpPaca;
179
 
180
        np = 0;
181
        for (i=0; i < MAX_PACAS; ++i) {
182
                lpPaca = paca[i].xLpPacaPtr;
183
                if ( lpPaca->xDynProcStatus < 2 ) {
184
                        ++np;
185
                        paca[i].next_jiffy_update_tb = paca[0].next_jiffy_update_tb;
186
                }
187
        }
188
 
189
        smp_tb_synchronized = 1;
190
        return np;
191
}
192
 
193
static void smp_iSeries_kick_cpu(int nr)
194
{
195
        struct ItLpPaca * lpPaca;
196
        /* Verify we have a Paca for processor nr */
197
        if ( ( nr <= 0 ) ||
198
             ( nr >= MAX_PACAS ) )
199
                return;
200
        /* Verify that our partition has a processor nr */
201
        lpPaca = paca[nr].xLpPacaPtr;
202
        if ( lpPaca->xDynProcStatus >= 2 )
203
                return;
204
 
205
        /* The information for processor bringup must
206
         * be written out to main store before we release
207
         * the processor.
208
         */
209
        mb();
210
 
211
        /* The processor is currently spinning, waiting
212
         * for the xProcStart field to become non-zero
213
         * After we set xProcStart, the processor will
214
         * continue on to secondary_start in iSeries_head.S
215
         */
216
        paca[nr].xProcStart = 1;
217
}
218
 
219
static void smp_iSeries_setup_cpu(int nr)
220
{
221
}
222
 
223
/* This is called very early. */
224
void smp_init_iSeries(void)
225
{
226
        ppc_md.smp_message_pass = smp_iSeries_message_pass;
227
        ppc_md.smp_probe        = smp_iSeries_probe;
228
        ppc_md.smp_kick_cpu     = smp_iSeries_kick_cpu;
229
        ppc_md.smp_setup_cpu    = smp_iSeries_setup_cpu;
230
 
231
        systemcfg->processorCount       = smp_iSeries_numProcs();
232
}
233
 
234
 
235
static void
236
smp_openpic_message_pass(int target, int msg, unsigned long data, int wait)
237
{
238
        /* make sure we're sending something that translates to an IPI */
239
        if ( msg > 0x3 ){
240
                printk("SMP %d: smp_message_pass: unknown msg %d\n",
241
                       smp_processor_id(), msg);
242
                return;
243
        }
244
        switch ( target )
245
        {
246
        case MSG_ALL:
247
                openpic_cause_IPI(msg, 0xffffffff);
248
                break;
249
        case MSG_ALL_BUT_SELF:
250
                openpic_cause_IPI(msg,
251
                                  0xffffffff & ~(1 << smp_processor_id()));
252
                break;
253
        default:
254
                openpic_cause_IPI(msg, 1<<target);
255
                break;
256
        }
257
}
258
 
259
static int
260
smp_chrp_probe(void)
261
{
262
        if (systemcfg->processorCount > 1)
263
                openpic_request_IPIs();
264
 
265
        return systemcfg->processorCount;
266
}
267
 
268
static void
269
smp_kick_cpu(int nr)
270
{
271
        /* Verify we have a Paca for processor nr */
272
        if ( ( nr <= 0 ) ||
273
             ( nr >= MAX_PACAS ) )
274
                return;
275
 
276
        /* The information for processor bringup must
277
         * be written out to main store before we release
278
         * the processor.
279
         */
280
        mb();
281
 
282
        /* The processor is currently spinning, waiting
283
         * for the xProcStart field to become non-zero
284
         * After we set xProcStart, the processor will
285
         * continue on to secondary_start in iSeries_head.S
286
         */
287
        paca[nr].xProcStart = 1;
288
}
289
 
290
static void smp_space_timers( unsigned nr )
291
{
292
        unsigned long offset, i;
293
 
294
        offset = tb_ticks_per_jiffy / nr;
295
        for ( i=1; i<nr; ++i ) {
296
                paca[i].next_jiffy_update_tb = paca[i-1].next_jiffy_update_tb + offset;
297
        }
298
}
299
 
300
static void
301
smp_chrp_setup_cpu(int cpu_nr)
302
{
303
        static atomic_t ready = ATOMIC_INIT(1);
304
        static volatile int frozen = 0;
305
 
306
        if (systemcfg->platform == PLATFORM_PSERIES_LPAR) {
307
                /* timebases already synced under the hypervisor. */
308
                paca[cpu_nr].next_jiffy_update_tb = tb_last_stamp = get_tb();
309
                if (cpu_nr == 0) {
310
                        systemcfg->tb_orig_stamp = tb_last_stamp;
311
                        /* Should update naca->stamp_xsec.
312
                         * For now we leave it which means the time can be some
313
                         * number of msecs off until someone does a settimeofday()
314
                         */
315
                }
316
                smp_tb_synchronized = 1;
317
        } else {
318
                if (cpu_nr == 0) {
319
                        /* wait for all the others */
320
                        while (atomic_read(&ready) < smp_num_cpus)
321
                                barrier();
322
                        atomic_set(&ready, 1);
323
                        /* freeze the timebase */
324
                        rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);
325
                        mb();
326
                        frozen = 1;
327
                        set_tb(0, 0);
328
                        paca[0].next_jiffy_update_tb = 0;
329
                        smp_space_timers(smp_num_cpus);
330
                        while (atomic_read(&ready) < smp_num_cpus)
331
                                barrier();
332
                        /* thaw the timebase again */
333
                        rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL);
334
                        mb();
335
                        frozen = 0;
336
                        tb_last_stamp = get_tb();
337
                        systemcfg->tb_orig_stamp = tb_last_stamp;
338
                        smp_tb_synchronized = 1;
339
                } else {
340
                        atomic_inc(&ready);
341
                        while (!frozen)
342
                                barrier();
343
                        set_tb(0, 0);
344
                        mb();
345
                        atomic_inc(&ready);
346
                        while (frozen)
347
                                barrier();
348
                }
349
        }
350
 
351
        if (OpenPIC_Addr) {
352
                do_openpic_setup_cpu();
353
        } else {
354
          if (cpu_nr > 0)
355
            xics_setup_cpu();
356
        }
357
}
358
 
359
static void
360
smp_xics_message_pass(int target, int msg, unsigned long data, int wait)
361
{
362
        int i;
363
 
364
        for (i = 0; i < smp_num_cpus; ++i) {
365
                if (target == MSG_ALL || target == i
366
                    || (target == MSG_ALL_BUT_SELF
367
                        && i != smp_processor_id())) {
368
                        set_bit(msg, &xics_ipi_message[i]);
369
                        mb();
370
                        xics_cause_IPI(i);
371
                }
372
        }
373
}
374
 
375
static int
376
smp_xics_probe(void)
377
{
378
        return systemcfg->processorCount;
379
}
380
 
381
/* This is called very early */
382
void smp_init_pSeries(void)
383
{
384
        if(naca->interrupt_controller == IC_OPEN_PIC) {
385
                ppc_md.smp_message_pass = smp_openpic_message_pass;
386
                ppc_md.smp_probe        = smp_chrp_probe;
387
                ppc_md.smp_kick_cpu     = smp_kick_cpu;
388
                ppc_md.smp_setup_cpu    = smp_chrp_setup_cpu;
389
        } else {
390
                ppc_md.smp_message_pass = smp_xics_message_pass;
391
                ppc_md.smp_probe        = smp_xics_probe;
392
                ppc_md.smp_kick_cpu     = smp_kick_cpu;
393
                ppc_md.smp_setup_cpu    = smp_chrp_setup_cpu;
394
        }
395
}
396
 
397
 
398
void smp_local_timer_interrupt(struct pt_regs * regs)
399
{
400
        if (!--(get_paca()->prof_counter)) {
401
                update_process_times(user_mode(regs));
402
                (get_paca()->prof_counter)=get_paca()->prof_multiplier;
403
        }
404
}
405
 
406
void smp_message_recv(int msg, struct pt_regs *regs)
407
{
408
        atomic_inc(&ipi_recv);
409
 
410
        switch( msg ) {
411
        case PPC_MSG_CALL_FUNCTION:
412
#ifdef CONFIG_KDB
413
                kdb_smp_regs[smp_processor_id()]=regs;
414
#endif
415
                smp_call_function_interrupt();
416
                break;
417
        case PPC_MSG_RESCHEDULE:
418
                current->need_resched = 1;
419
                break;
420
#ifdef CONFIG_XMON
421
        case PPC_MSG_XMON_BREAK:
422
                /* ToDo: need a nmi way to handle this.  Soft disable? */
423
#if defined(CONFIG_DUMP) || defined(CONFIG_DUMP_MODULE)
424
                if (dump_ipi_function_ptr) {
425
                        printk(KERN_ALERT "got dump ipi...\n");
426
                        dump_ipi_function_ptr(regs);
427
                } else
428
#endif
429
                        xmon(regs);
430
                break;
431
#endif /* CONFIG_XMON */
432
        default:
433
                printk("SMP %d: smp_message_recv(): unknown msg %d\n",
434
                       smp_processor_id(), msg);
435
                break;
436
        }
437
}
438
 
439
void smp_send_reschedule(int cpu)
440
{
441
        if ((systemcfg->platform & PLATFORM_LPAR) &&
442
            (paca[cpu].yielded == 1)) {
443
#ifdef CONFIG_PPC_ISERIES
444
                HvCall_sendLpProd(cpu);
445
#else
446
                prod_processor(cpu);
447
#endif
448
        } else {
449
        smp_message_pass(cpu, PPC_MSG_RESCHEDULE, 0, 0);
450
}
451
}
452
 
453
#ifdef CONFIG_XMON
454
void smp_send_xmon_break(int cpu)
455
{
456
        smp_message_pass(cpu, PPC_MSG_XMON_BREAK, 0, 0);
457
}
458
#endif /* CONFIG_XMON */
459
 
460
#if defined(CONFIG_DUMP) || defined(CONFIG_DUMP_MODULE)
461
void dump_send_ipi(int (*dump_ipi_callback)(struct pt_regs *))
462
{
463
        dump_ipi_function_ptr = dump_ipi_callback;
464
        if (dump_ipi_callback) {
465
                printk(KERN_ALERT "dump_send_ipi...\n");
466
                mb();
467
                smp_message_pass(MSG_ALL_BUT_SELF, PPC_MSG_XMON_BREAK, 0, 0);
468
        }
469
}
470
#endif
471
 
472
static void stop_this_cpu(void *dummy)
473
{
474
        __cli();
475
        while (1)
476
                ;
477
}
478
 
479
void smp_send_stop(void)
480
{
481
        smp_call_function(stop_this_cpu, NULL, 1, 0);
482
        smp_num_cpus = 1;
483
}
484
 
485
/*
486
 * Structure and data for smp_call_function(). This is designed to minimise
487
 * static memory requirements. It also looks cleaner.
488
 * Stolen from the i386 version.
489
 */
490
static spinlock_t call_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
491
 
492
static struct call_data_struct {
493
        void (*func) (void *info);
494
        void *info;
495
        atomic_t started;
496
        atomic_t finished;
497
        int wait;
498
} *call_data;
499
 
500
/*
501
 * This function sends a 'generic call function' IPI to all other CPUs
502
 * in the system.
503
 *
504
 * [SUMMARY] Run a function on all other CPUs.
505
 * <func> The function to run. This must be fast and non-blocking.
506
 * <info> An arbitrary pointer to pass to the function.
507
 * <nonatomic> currently unused.
508
 * <wait> If true, wait (atomically) until function has completed on other CPUs.
509
 * [RETURNS] 0 on success, else a negative status code. Does not return until
510
 * remote CPUs are nearly ready to execute <<func>> or are or have executed.
511
 *
512
 * You must not call this function with disabled interrupts or from a
513
 * hardware interrupt handler or from a bottom half handler.
514
 */
515
int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
516
                        int wait)
517
 
518
{
519
        struct call_data_struct data;
520
        int ret = -1, cpus = smp_num_cpus-1;
521
        int timeout;
522
 
523
        if (!cpus)
524
                return 0;
525
 
526
        data.func = func;
527
        data.info = info;
528
        atomic_set(&data.started, 0);
529
        data.wait = wait;
530
        if (wait)
531
                atomic_set(&data.finished, 0);
532
 
533
        spin_lock_bh(&call_lock);
534
        call_data = &data;
535
        /* Send a message to all other CPUs and wait for them to respond */
536
        smp_message_pass(MSG_ALL_BUT_SELF, PPC_MSG_CALL_FUNCTION, 0, 0);
537
 
538
        /* Wait for response */
539
        timeout = 8000000;
540
        while (atomic_read(&data.started) != cpus) {
541
                HMT_low();
542
                if (--timeout == 0) {
543
                        printk("smp_call_function on cpu %d: other cpus not responding (%d)\n",
544
                               smp_processor_id(), atomic_read(&data.started));
545
#ifdef CONFIG_XMON
546
                        xmon(0);
547
#endif
548
#ifdef CONFIG_KDB
549
                        kdb(KDB_REASON_CALL,0, (kdb_eframe_t) 0);
550
#endif
551
 
552
#ifdef CONFIG_PPC_ISERIES
553
                        HvCall_terminateMachineSrc();
554
#endif
555
                        goto out;
556
                }
557
                barrier();
558
                udelay(1);
559
        }
560
 
561
        if (wait) {
562
                timeout = 1000000;
563
                while (atomic_read(&data.finished) != cpus) {
564
                        HMT_low();
565
                        if (--timeout == 0) {
566
                                printk("smp_call_function on cpu %d: other cpus not finishing (%d/%d)\n",
567
                                       smp_processor_id(), atomic_read(&data.finished), atomic_read(&data.started));
568
#ifdef CONFIG_PPC_ISERIES
569
                                HvCall_terminateMachineSrc();
570
#endif
571
                                goto out;
572
                        }
573
                        barrier();
574
                        udelay(1);
575
                }
576
        }
577
        ret = 0;
578
 
579
 out:
580
        call_data = NULL;
581
        HMT_medium();
582
        spin_unlock_bh(&call_lock);
583
        return ret;
584
}
585
 
586
void smp_call_function_interrupt(void)
587
{
588
        void (*func) (void *info);
589
        void *info;
590
        int wait;
591
 
592
 
593
        /* call_data will be NULL if the sender timed out while
594
         * waiting on us to receive the call.
595
         */
596
        if (!call_data)
597
                return;
598
 
599
        func = call_data->func;
600
        info = call_data->info;
601
        wait = call_data->wait;
602
 
603
        /*
604
         * Notify initiating CPU that I've grabbed the data and am
605
         * about to execute the function
606
         */
607
        atomic_inc(&call_data->started);
608
        /*
609
         * At this point the info structure may be out of scope unless wait==1
610
         */
611
        (*func)(info);
612
        if (wait)
613
                atomic_inc(&call_data->finished);
614
}
615
 
616
 
617
extern unsigned long decr_overclock;
618
 
619
void __init smp_boot_cpus(void)
620
{
621
        extern struct current_set_struct current_set[];
622
        extern void __secondary_start_chrp(void);
623
        int i, cpu_nr;
624
        struct task_struct *p;
625
        unsigned long sp;
626
 
627
        printk("Entering SMP Mode...\n");
628
        PPCDBG(PPCDBG_SMP, "smp_boot_cpus: start.  NR_CPUS = 0x%lx\n", NR_CPUS);
629
 
630
        smp_num_cpus = 1;
631
        smp_store_cpu_info(0);
632
        cpu_online_map = 1UL;
633
 
634
        /*
635
         * assume for now that the first cpu booted is
636
         * cpu 0, the master -- Cort
637
         */
638
        cpu_callin_map[0] = 1;
639
        current->processor = 0;
640
 
641
        init_idle();
642
 
643
        for (i = 0; i < NR_CPUS; i++) {
644
                paca[i].prof_counter = 1;
645
                paca[i].prof_multiplier = 1;
646
                if(i != 0) {
647
                        /*
648
                         * Processor 0's segment table is statically
649
                         * initialized to real address STAB0_PHYS_ADDR.  The
650
                         * Other processor's tables are created and
651
                         * initialized here.
652
                         */
653
                        paca[i].xStab_data.virt = (unsigned long)&stab_array[PAGE_SIZE * (i-1)];
654
                        memset((void *)paca[i].xStab_data.virt, 0, PAGE_SIZE);
655
                        paca[i].xStab_data.real = __v2a(paca[i].xStab_data.virt);
656
                        paca[i].default_decr = tb_ticks_per_jiffy / decr_overclock;
657
                }
658
        }
659
 
660
        /*
661
         * XXX very rough, assumes 20 bus cycles to read a cache line,
662
         * timebase increments every 4 bus cycles, 32kB L1 data cache.
663
         */
664
        cacheflush_time = 5 * 1024;
665
 
666
        /* Probe arch for CPUs */
667
        cpu_nr = ppc_md.smp_probe();
668
 
669
        printk("Probe found %d CPUs\n", cpu_nr);
670
 
671
        /*
672
         * only check for cpus we know exist.  We keep the callin map
673
         * with cpus at the bottom -- Cort
674
         */
675
        if (cpu_nr > max_cpus)
676
                cpu_nr = max_cpus;
677
 
678
#ifdef CONFIG_PPC_ISERIES
679
        smp_space_timers( cpu_nr );
680
#endif
681
 
682
        printk("Waiting for %d CPUs\n", cpu_nr-1);
683
 
684
        for ( i = 1 ; i < cpu_nr; i++ ) {
685
                int c;
686
                struct pt_regs regs;
687
 
688
                /* create a process for the processor */
689
                /* we don't care about the values in regs since we'll
690
                   never reschedule the forked task. */
691
                /* We DO care about one bit in the pt_regs we
692
                   pass to do_fork.  That is the MSR_FP bit in
693
                   regs.msr.  If that bit is on, then do_fork
694
                   (via copy_thread) will call giveup_fpu.
695
                   giveup_fpu will get a pointer to our (current's)
696
                   last register savearea via current->thread.regs
697
                   and using that pointer will turn off the MSR_FP,
698
                   MSR_FE0 and MSR_FE1 bits.  At this point, this
699
                   pointer is pointing to some arbitrary point within
700
                   our stack */
701
 
702
                memset(&regs, 0, sizeof(struct pt_regs));
703
 
704
                if (do_fork(CLONE_VM|CLONE_PID, 0, &regs, 0) < 0)
705
                        panic("failed fork for CPU %d", i);
706
                p = init_task.prev_task;
707
                if (!p)
708
                        panic("No idle task for CPU %d", i);
709
 
710
                PPCDBG(PPCDBG_SMP,"\tProcessor %d, task = 0x%lx\n", i, p);
711
 
712
                del_from_runqueue(p);
713
                unhash_process(p);
714
                init_tasks[i] = p;
715
 
716
                p->processor = i;
717
                p->cpus_runnable = 1 << i; /* we schedule the first task manually */
718
                current_set[i].task = p;
719
                sp = ((unsigned long)p) + sizeof(union task_union)
720
                        - STACK_FRAME_OVERHEAD;
721
                current_set[i].sp_real = (void *)__v2a(sp);
722
 
723
                /* wake up cpus */
724
                ppc_md.smp_kick_cpu(i);
725
 
726
                /*
727
                 * wait to see if the cpu made a callin (is actually up).
728
                 * use this value that I found through experimentation.
729
                 * -- Cort
730
                 */
731
                for ( c = 5000; c && !cpu_callin_map[i] ; c-- ) {
732
                        udelay(100);
733
                }
734
 
735
                if ( cpu_callin_map[i] )
736
                {
737
                        printk("Processor %d found.\n", i);
738
                        PPCDBG(PPCDBG_SMP, "\tProcessor %d found.\n", i);
739
                        /* this sync's the decr's -- Cort */
740
                        smp_num_cpus++;
741
                } else {
742
                        printk("Processor %d is stuck.\n", i);
743
                        PPCDBG(PPCDBG_SMP, "\tProcessor %d is stuck.\n", i);
744
                }
745
        }
746
 
747
        /* Setup CPU 0 last (important) */
748
        ppc_md.smp_setup_cpu(0);
749
 
750
        if (smp_num_cpus < 2) {
751
                tb_last_stamp = get_tb();
752
                smp_tb_synchronized = 1;
753
        }
754
}
755
 
756
void __init smp_commence(void)
757
{
758
        /*
759
         *      Lets the callin's below out of their loop.
760
         */
761
        PPCDBG(PPCDBG_SMP, "smp_commence: start\n");
762
        wmb();
763
        smp_commenced = 1;
764
}
765
 
766
void __init smp_callin(void)
767
{
768
        int cpu = current->processor;
769
 
770
        smp_store_cpu_info(cpu);
771
        set_dec(paca[cpu].default_decr);
772
        cpu_callin_map[cpu] = 1;
773
 
774
        ppc_md.smp_setup_cpu(cpu);
775
 
776
        init_idle();
777
 
778
        set_bit(smp_processor_id(), &cpu_online_map);
779
 
780
        while(!smp_commenced) {
781
                barrier();
782
        }
783
        __sti();
784
}
785
 
786
/* intel needs this */
787
void __init initialize_secondary(void)
788
{
789
}
790
 
791
/* Activate a secondary processor. */
792
int start_secondary(void *unused)
793
{
794
        int cpu;
795
 
796
        cpu = current->processor;
797
        atomic_inc(&init_mm.mm_count);
798
        current->active_mm = &init_mm;
799
        smp_callin();
800
 
801
        get_paca()->yielded = 0;
802
 
803
        if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) {
804
                vpa_init(cpu);
805
        }
806
 
807
        /* Go into the idle loop. */
808
        return cpu_idle(NULL);
809
}
810
 
811
void __init smp_setup(char *str, int *ints)
812
{
813
}
814
 
815
int setup_profiling_timer(unsigned int multiplier)
816
{
817
        return 0;
818
}
819
 
820
/* this function is called for each processor
821
 */
822
void __init smp_store_cpu_info(int id)
823
{
824
        paca[id].pvr = _get_PVR();
825
}
826
 
827
static int __init maxcpus(char *str)
828
{
829
        get_option(&str, &max_cpus);
830
        return 1;
831
}
832
 
833
__setup("maxcpus=", maxcpus);

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.