OpenCores
URL https://opencores.org/ocsvn/or1k_soc_on_altera_embedded_dev_kit/or1k_soc_on_altera_embedded_dev_kit/trunk

Subversion Repositories or1k_soc_on_altera_embedded_dev_kit

[/] [or1k_soc_on_altera_embedded_dev_kit/] [trunk/] [linux-2.6/] [linux-2.6.24/] [arch/] [x86/] [kernel/] [smp_32.c] - Blame information for rev 19

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 3 xianfeng
/*
2
 *      Intel SMP support routines.
3
 *
4
 *      (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
5
 *      (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
6
 *
7
 *      This code is released under the GNU General Public License version 2 or
8
 *      later.
9
 */
10
 
11
#include <linux/init.h>
12
 
13
#include <linux/mm.h>
14
#include <linux/delay.h>
15
#include <linux/spinlock.h>
16
#include <linux/kernel_stat.h>
17
#include <linux/mc146818rtc.h>
18
#include <linux/cache.h>
19
#include <linux/interrupt.h>
20
#include <linux/cpu.h>
21
#include <linux/module.h>
22
 
23
#include <asm/mtrr.h>
24
#include <asm/tlbflush.h>
25
#include <asm/mmu_context.h>
26
#include <mach_apic.h>
27
 
28
/*
29
 *      Some notes on x86 processor bugs affecting SMP operation:
30
 *
31
 *      Pentium, Pentium Pro, II, III (and all CPUs) have bugs.
32
 *      The Linux implications for SMP are handled as follows:
33
 *
34
 *      Pentium III / [Xeon]
35
 *              None of the E1AP-E3AP errata are visible to the user.
36
 *
37
 *      E1AP.   see PII A1AP
38
 *      E2AP.   see PII A2AP
39
 *      E3AP.   see PII A3AP
40
 *
41
 *      Pentium II / [Xeon]
42
 *              None of the A1AP-A3AP errata are visible to the user.
43
 *
44
 *      A1AP.   see PPro 1AP
45
 *      A2AP.   see PPro 2AP
46
 *      A3AP.   see PPro 7AP
47
 *
48
 *      Pentium Pro
49
 *              None of 1AP-9AP errata are visible to the normal user,
50
 *      except occasional delivery of 'spurious interrupt' as trap #15.
51
 *      This is very rare and a non-problem.
52
 *
53
 *      1AP.    Linux maps APIC as non-cacheable
54
 *      2AP.    worked around in hardware
55
 *      3AP.    fixed in C0 and above steppings microcode update.
56
 *              Linux does not use excessive STARTUP_IPIs.
57
 *      4AP.    worked around in hardware
58
 *      5AP.    symmetric IO mode (normal Linux operation) not affected.
59
 *              'noapic' mode has vector 0xf filled out properly.
60
 *      6AP.    'noapic' mode might be affected - fixed in later steppings
61
 *      7AP.    We do not assume writes to the LVT deassering IRQs
62
 *      8AP.    We do not enable low power mode (deep sleep) during MP bootup
63
 *      9AP.    We do not use mixed mode
64
 *
65
 *      Pentium
66
 *              There is a marginal case where REP MOVS on 100MHz SMP
67
 *      machines with B stepping processors can fail. XXX should provide
68
 *      an L1cache=Writethrough or L1cache=off option.
69
 *
70
 *              B stepping CPUs may hang. There are hardware work arounds
71
 *      for this. We warn about it in case your board doesn't have the work
72
 *      arounds. Basically that's so I can tell anyone with a B stepping
73
 *      CPU and SMP problems "tough".
74
 *
75
 *      Specific items [From Pentium Processor Specification Update]
76
 *
77
 *      1AP.    Linux doesn't use remote read
78
 *      2AP.    Linux doesn't trust APIC errors
79
 *      3AP.    We work around this
80
 *      4AP.    Linux never generated 3 interrupts of the same priority
81
 *              to cause a lost local interrupt.
82
 *      5AP.    Remote read is never used
83
 *      6AP.    not affected - worked around in hardware
84
 *      7AP.    not affected - worked around in hardware
85
 *      8AP.    worked around in hardware - we get explicit CS errors if not
86
 *      9AP.    only 'noapic' mode affected. Might generate spurious
87
 *              interrupts, we log only the first one and count the
88
 *              rest silently.
89
 *      10AP.   not affected - worked around in hardware
90
 *      11AP.   Linux reads the APIC between writes to avoid this, as per
91
 *              the documentation. Make sure you preserve this as it affects
92
 *              the C stepping chips too.
93
 *      12AP.   not affected - worked around in hardware
94
 *      13AP.   not affected - worked around in hardware
95
 *      14AP.   we always deassert INIT during bootup
96
 *      15AP.   not affected - worked around in hardware
97
 *      16AP.   not affected - worked around in hardware
98
 *      17AP.   not affected - worked around in hardware
99
 *      18AP.   not affected - worked around in hardware
100
 *      19AP.   not affected - worked around in BIOS
101
 *
102
 *      If this sounds worrying believe me these bugs are either ___RARE___,
103
 *      or are signal timing bugs worked around in hardware and there's
104
 *      about nothing of note with C stepping upwards.
105
 */
106
 
107
DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0, };
108
 
109
/*
110
 * the following functions deal with sending IPIs between CPUs.
111
 *
112
 * We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
113
 */
114
 
115
static inline int __prepare_ICR (unsigned int shortcut, int vector)
116
{
117
        unsigned int icr = shortcut | APIC_DEST_LOGICAL;
118
 
119
        switch (vector) {
120
        default:
121
                icr |= APIC_DM_FIXED | vector;
122
                break;
123
        case NMI_VECTOR:
124
                icr |= APIC_DM_NMI;
125
                break;
126
        }
127
        return icr;
128
}
129
 
130
static inline int __prepare_ICR2 (unsigned int mask)
131
{
132
        return SET_APIC_DEST_FIELD(mask);
133
}
134
 
135
void __send_IPI_shortcut(unsigned int shortcut, int vector)
136
{
137
        /*
138
         * Subtle. In the case of the 'never do double writes' workaround
139
         * we have to lock out interrupts to be safe.  As we don't care
140
         * of the value read we use an atomic rmw access to avoid costly
141
         * cli/sti.  Otherwise we use an even cheaper single atomic write
142
         * to the APIC.
143
         */
144
        unsigned int cfg;
145
 
146
        /*
147
         * Wait for idle.
148
         */
149
        apic_wait_icr_idle();
150
 
151
        /*
152
         * No need to touch the target chip field
153
         */
154
        cfg = __prepare_ICR(shortcut, vector);
155
 
156
        /*
157
         * Send the IPI. The write to APIC_ICR fires this off.
158
         */
159
        apic_write_around(APIC_ICR, cfg);
160
}
161
 
162
void fastcall send_IPI_self(int vector)
163
{
164
        __send_IPI_shortcut(APIC_DEST_SELF, vector);
165
}
166
 
167
/*
168
 * This is used to send an IPI with no shorthand notation (the destination is
169
 * specified in bits 56 to 63 of the ICR).
170
 */
171
static inline void __send_IPI_dest_field(unsigned long mask, int vector)
172
{
173
        unsigned long cfg;
174
 
175
        /*
176
         * Wait for idle.
177
         */
178
        if (unlikely(vector == NMI_VECTOR))
179
                safe_apic_wait_icr_idle();
180
        else
181
                apic_wait_icr_idle();
182
 
183
        /*
184
         * prepare target chip field
185
         */
186
        cfg = __prepare_ICR2(mask);
187
        apic_write_around(APIC_ICR2, cfg);
188
 
189
        /*
190
         * program the ICR
191
         */
192
        cfg = __prepare_ICR(0, vector);
193
 
194
        /*
195
         * Send the IPI. The write to APIC_ICR fires this off.
196
         */
197
        apic_write_around(APIC_ICR, cfg);
198
}
199
 
200
/*
201
 * This is only used on smaller machines.
202
 */
203
void send_IPI_mask_bitmask(cpumask_t cpumask, int vector)
204
{
205
        unsigned long mask = cpus_addr(cpumask)[0];
206
        unsigned long flags;
207
 
208
        local_irq_save(flags);
209
        WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]);
210
        __send_IPI_dest_field(mask, vector);
211
        local_irq_restore(flags);
212
}
213
 
214
void send_IPI_mask_sequence(cpumask_t mask, int vector)
215
{
216
        unsigned long flags;
217
        unsigned int query_cpu;
218
 
219
        /*
220
         * Hack. The clustered APIC addressing mode doesn't allow us to send
221
         * to an arbitrary mask, so I do a unicasts to each CPU instead. This
222
         * should be modified to do 1 message per cluster ID - mbligh
223
         */
224
 
225
        local_irq_save(flags);
226
        for (query_cpu = 0; query_cpu < NR_CPUS; ++query_cpu) {
227
                if (cpu_isset(query_cpu, mask)) {
228
                        __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu),
229
                                              vector);
230
                }
231
        }
232
        local_irq_restore(flags);
233
}
234
 
235
#include <mach_ipi.h> /* must come after the send_IPI functions above for inlining */
236
 
237
/*
238
 *      Smarter SMP flushing macros.
239
 *              c/o Linus Torvalds.
240
 *
241
 *      These mean you can really definitely utterly forget about
242
 *      writing to user space from interrupts. (Its not allowed anyway).
243
 *
244
 *      Optimizations Manfred Spraul <manfred@colorfullife.com>
245
 */
246
 
247
static cpumask_t flush_cpumask;
248
static struct mm_struct * flush_mm;
249
static unsigned long flush_va;
250
static DEFINE_SPINLOCK(tlbstate_lock);
251
 
252
/*
253
 * We cannot call mmdrop() because we are in interrupt context,
254
 * instead update mm->cpu_vm_mask.
255
 *
256
 * We need to reload %cr3 since the page tables may be going
257
 * away from under us..
258
 */
259
void leave_mm(unsigned long cpu)
260
{
261
        if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
262
                BUG();
263
        cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask);
264
        load_cr3(swapper_pg_dir);
265
}
266
 
267
/*
268
 *
269
 * The flush IPI assumes that a thread switch happens in this order:
270
 * [cpu0: the cpu that switches]
271
 * 1) switch_mm() either 1a) or 1b)
272
 * 1a) thread switch to a different mm
273
 * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
274
 *      Stop ipi delivery for the old mm. This is not synchronized with
275
 *      the other cpus, but smp_invalidate_interrupt ignore flush ipis
276
 *      for the wrong mm, and in the worst case we perform a superfluous
277
 *      tlb flush.
278
 * 1a2) set cpu_tlbstate to TLBSTATE_OK
279
 *      Now the smp_invalidate_interrupt won't call leave_mm if cpu0
280
 *      was in lazy tlb mode.
281
 * 1a3) update cpu_tlbstate[].active_mm
282
 *      Now cpu0 accepts tlb flushes for the new mm.
283
 * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
284
 *      Now the other cpus will send tlb flush ipis.
285
 * 1a4) change cr3.
286
 * 1b) thread switch without mm change
287
 *      cpu_tlbstate[].active_mm is correct, cpu0 already handles
288
 *      flush ipis.
289
 * 1b1) set cpu_tlbstate to TLBSTATE_OK
290
 * 1b2) test_and_set the cpu bit in cpu_vm_mask.
291
 *      Atomically set the bit [other cpus will start sending flush ipis],
292
 *      and test the bit.
293
 * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
294
 * 2) switch %%esp, ie current
295
 *
296
 * The interrupt must handle 2 special cases:
297
 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
298
 * - the cpu performs speculative tlb reads, i.e. even if the cpu only
299
 *   runs in kernel space, the cpu could load tlb entries for user space
300
 *   pages.
301
 *
302
 * The good news is that cpu_tlbstate is local to each cpu, no
303
 * write/read ordering problems.
304
 */
305
 
306
/*
307
 * TLB flush IPI:
308
 *
309
 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
310
 * 2) Leave the mm if we are in the lazy tlb mode.
311
 */
312
 
313
fastcall void smp_invalidate_interrupt(struct pt_regs *regs)
314
{
315
        unsigned long cpu;
316
 
317
        cpu = get_cpu();
318
 
319
        if (!cpu_isset(cpu, flush_cpumask))
320
                goto out;
321
                /*
322
                 * This was a BUG() but until someone can quote me the
323
                 * line from the intel manual that guarantees an IPI to
324
                 * multiple CPUs is retried _only_ on the erroring CPUs
325
                 * its staying as a return
326
                 *
327
                 * BUG();
328
                 */
329
 
330
        if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) {
331
                if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) {
332
                        if (flush_va == TLB_FLUSH_ALL)
333
                                local_flush_tlb();
334
                        else
335
                                __flush_tlb_one(flush_va);
336
                } else
337
                        leave_mm(cpu);
338
        }
339
        ack_APIC_irq();
340
        smp_mb__before_clear_bit();
341
        cpu_clear(cpu, flush_cpumask);
342
        smp_mb__after_clear_bit();
343
out:
344
        put_cpu_no_resched();
345
        __get_cpu_var(irq_stat).irq_tlb_count++;
346
}
347
 
348
void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
349
                             unsigned long va)
350
{
351
        cpumask_t cpumask = *cpumaskp;
352
 
353
        /*
354
         * A couple of (to be removed) sanity checks:
355
         *
356
         * - current CPU must not be in mask
357
         * - mask must exist :)
358
         */
359
        BUG_ON(cpus_empty(cpumask));
360
        BUG_ON(cpu_isset(smp_processor_id(), cpumask));
361
        BUG_ON(!mm);
362
 
363
#ifdef CONFIG_HOTPLUG_CPU
364
        /* If a CPU which we ran on has gone down, OK. */
365
        cpus_and(cpumask, cpumask, cpu_online_map);
366
        if (unlikely(cpus_empty(cpumask)))
367
                return;
368
#endif
369
 
370
        /*
371
         * i'm not happy about this global shared spinlock in the
372
         * MM hot path, but we'll see how contended it is.
373
         * AK: x86-64 has a faster method that could be ported.
374
         */
375
        spin_lock(&tlbstate_lock);
376
 
377
        flush_mm = mm;
378
        flush_va = va;
379
        cpus_or(flush_cpumask, cpumask, flush_cpumask);
380
        /*
381
         * We have to send the IPI only to
382
         * CPUs affected.
383
         */
384
        send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR);
385
 
386
        while (!cpus_empty(flush_cpumask))
387
                /* nothing. lockup detection does not belong here */
388
                cpu_relax();
389
 
390
        flush_mm = NULL;
391
        flush_va = 0;
392
        spin_unlock(&tlbstate_lock);
393
}
394
 
395
void flush_tlb_current_task(void)
396
{
397
        struct mm_struct *mm = current->mm;
398
        cpumask_t cpu_mask;
399
 
400
        preempt_disable();
401
        cpu_mask = mm->cpu_vm_mask;
402
        cpu_clear(smp_processor_id(), cpu_mask);
403
 
404
        local_flush_tlb();
405
        if (!cpus_empty(cpu_mask))
406
                flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
407
        preempt_enable();
408
}
409
 
410
void flush_tlb_mm (struct mm_struct * mm)
411
{
412
        cpumask_t cpu_mask;
413
 
414
        preempt_disable();
415
        cpu_mask = mm->cpu_vm_mask;
416
        cpu_clear(smp_processor_id(), cpu_mask);
417
 
418
        if (current->active_mm == mm) {
419
                if (current->mm)
420
                        local_flush_tlb();
421
                else
422
                        leave_mm(smp_processor_id());
423
        }
424
        if (!cpus_empty(cpu_mask))
425
                flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
426
 
427
        preempt_enable();
428
}
429
 
430
void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
431
{
432
        struct mm_struct *mm = vma->vm_mm;
433
        cpumask_t cpu_mask;
434
 
435
        preempt_disable();
436
        cpu_mask = mm->cpu_vm_mask;
437
        cpu_clear(smp_processor_id(), cpu_mask);
438
 
439
        if (current->active_mm == mm) {
440
                if(current->mm)
441
                        __flush_tlb_one(va);
442
                 else
443
                        leave_mm(smp_processor_id());
444
        }
445
 
446
        if (!cpus_empty(cpu_mask))
447
                flush_tlb_others(cpu_mask, mm, va);
448
 
449
        preempt_enable();
450
}
451
EXPORT_SYMBOL(flush_tlb_page);
452
 
453
static void do_flush_tlb_all(void* info)
454
{
455
        unsigned long cpu = smp_processor_id();
456
 
457
        __flush_tlb_all();
458
        if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY)
459
                leave_mm(cpu);
460
}
461
 
462
void flush_tlb_all(void)
463
{
464
        on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
465
}
466
 
467
/*
468
 * this function sends a 'reschedule' IPI to another CPU.
469
 * it goes straight through and wastes no time serializing
470
 * anything. Worst case is that we lose a reschedule ...
471
 */
472
static void native_smp_send_reschedule(int cpu)
473
{
474
        WARN_ON(cpu_is_offline(cpu));
475
        send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
476
}
477
 
478
/*
479
 * Structure and data for smp_call_function(). This is designed to minimise
480
 * static memory requirements. It also looks cleaner.
481
 */
482
static DEFINE_SPINLOCK(call_lock);
483
 
484
struct call_data_struct {
485
        void (*func) (void *info);
486
        void *info;
487
        atomic_t started;
488
        atomic_t finished;
489
        int wait;
490
};
491
 
492
void lock_ipi_call_lock(void)
493
{
494
        spin_lock_irq(&call_lock);
495
}
496
 
497
void unlock_ipi_call_lock(void)
498
{
499
        spin_unlock_irq(&call_lock);
500
}
501
 
502
static struct call_data_struct *call_data;
503
 
504
static void __smp_call_function(void (*func) (void *info), void *info,
505
                                int nonatomic, int wait)
506
{
507
        struct call_data_struct data;
508
        int cpus = num_online_cpus() - 1;
509
 
510
        if (!cpus)
511
                return;
512
 
513
        data.func = func;
514
        data.info = info;
515
        atomic_set(&data.started, 0);
516
        data.wait = wait;
517
        if (wait)
518
                atomic_set(&data.finished, 0);
519
 
520
        call_data = &data;
521
        mb();
522
 
523
        /* Send a message to all other CPUs and wait for them to respond */
524
        send_IPI_allbutself(CALL_FUNCTION_VECTOR);
525
 
526
        /* Wait for response */
527
        while (atomic_read(&data.started) != cpus)
528
                cpu_relax();
529
 
530
        if (wait)
531
                while (atomic_read(&data.finished) != cpus)
532
                        cpu_relax();
533
}
534
 
535
 
536
/**
537
 * smp_call_function_mask(): Run a function on a set of other CPUs.
538
 * @mask: The set of cpus to run on.  Must not include the current cpu.
539
 * @func: The function to run. This must be fast and non-blocking.
540
 * @info: An arbitrary pointer to pass to the function.
541
 * @wait: If true, wait (atomically) until function has completed on other CPUs.
542
 *
543
  * Returns 0 on success, else a negative status code.
544
 *
545
 * If @wait is true, then returns once @func has returned; otherwise
546
 * it returns just before the target cpu calls @func.
547
 *
548
 * You must not call this function with disabled interrupts or from a
549
 * hardware interrupt handler or from a bottom half handler.
550
 */
551
static int
552
native_smp_call_function_mask(cpumask_t mask,
553
                              void (*func)(void *), void *info,
554
                              int wait)
555
{
556
        struct call_data_struct data;
557
        cpumask_t allbutself;
558
        int cpus;
559
 
560
        /* Can deadlock when called with interrupts disabled */
561
        WARN_ON(irqs_disabled());
562
 
563
        /* Holding any lock stops cpus from going down. */
564
        spin_lock(&call_lock);
565
 
566
        allbutself = cpu_online_map;
567
        cpu_clear(smp_processor_id(), allbutself);
568
 
569
        cpus_and(mask, mask, allbutself);
570
        cpus = cpus_weight(mask);
571
 
572
        if (!cpus) {
573
                spin_unlock(&call_lock);
574
                return 0;
575
        }
576
 
577
        data.func = func;
578
        data.info = info;
579
        atomic_set(&data.started, 0);
580
        data.wait = wait;
581
        if (wait)
582
                atomic_set(&data.finished, 0);
583
 
584
        call_data = &data;
585
        mb();
586
 
587
        /* Send a message to other CPUs */
588
        if (cpus_equal(mask, allbutself))
589
                send_IPI_allbutself(CALL_FUNCTION_VECTOR);
590
        else
591
                send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
592
 
593
        /* Wait for response */
594
        while (atomic_read(&data.started) != cpus)
595
                cpu_relax();
596
 
597
        if (wait)
598
                while (atomic_read(&data.finished) != cpus)
599
                        cpu_relax();
600
        spin_unlock(&call_lock);
601
 
602
        return 0;
603
}
604
 
605
static void stop_this_cpu (void * dummy)
606
{
607
        local_irq_disable();
608
        /*
609
         * Remove this CPU:
610
         */
611
        cpu_clear(smp_processor_id(), cpu_online_map);
612
        disable_local_APIC();
613
        if (cpu_data(smp_processor_id()).hlt_works_ok)
614
                for(;;) halt();
615
        for (;;);
616
}
617
 
618
/*
619
 * this function calls the 'stop' function on all other CPUs in the system.
620
 */
621
 
622
static void native_smp_send_stop(void)
623
{
624
        /* Don't deadlock on the call lock in panic */
625
        int nolock = !spin_trylock(&call_lock);
626
        unsigned long flags;
627
 
628
        local_irq_save(flags);
629
        __smp_call_function(stop_this_cpu, NULL, 0, 0);
630
        if (!nolock)
631
                spin_unlock(&call_lock);
632
        disable_local_APIC();
633
        local_irq_restore(flags);
634
}
635
 
636
/*
637
 * Reschedule call back. Nothing to do,
638
 * all the work is done automatically when
639
 * we return from the interrupt.
640
 */
641
fastcall void smp_reschedule_interrupt(struct pt_regs *regs)
642
{
643
        ack_APIC_irq();
644
        __get_cpu_var(irq_stat).irq_resched_count++;
645
}
646
 
647
fastcall void smp_call_function_interrupt(struct pt_regs *regs)
648
{
649
        void (*func) (void *info) = call_data->func;
650
        void *info = call_data->info;
651
        int wait = call_data->wait;
652
 
653
        ack_APIC_irq();
654
        /*
655
         * Notify initiating CPU that I've grabbed the data and am
656
         * about to execute the function
657
         */
658
        mb();
659
        atomic_inc(&call_data->started);
660
        /*
661
         * At this point the info structure may be out of scope unless wait==1
662
         */
663
        irq_enter();
664
        (*func)(info);
665
        __get_cpu_var(irq_stat).irq_call_count++;
666
        irq_exit();
667
 
668
        if (wait) {
669
                mb();
670
                atomic_inc(&call_data->finished);
671
        }
672
}
673
 
674
static int convert_apicid_to_cpu(int apic_id)
675
{
676
        int i;
677
 
678
        for (i = 0; i < NR_CPUS; i++) {
679
                if (per_cpu(x86_cpu_to_apicid, i) == apic_id)
680
                        return i;
681
        }
682
        return -1;
683
}
684
 
685
int safe_smp_processor_id(void)
686
{
687
        int apicid, cpuid;
688
 
689
        if (!boot_cpu_has(X86_FEATURE_APIC))
690
                return 0;
691
 
692
        apicid = hard_smp_processor_id();
693
        if (apicid == BAD_APICID)
694
                return 0;
695
 
696
        cpuid = convert_apicid_to_cpu(apicid);
697
 
698
        return cpuid >= 0 ? cpuid : 0;
699
}
700
 
701
struct smp_ops smp_ops = {
702
        .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
703
        .smp_prepare_cpus = native_smp_prepare_cpus,
704
        .cpu_up = native_cpu_up,
705
        .smp_cpus_done = native_smp_cpus_done,
706
 
707
        .smp_send_stop = native_smp_send_stop,
708
        .smp_send_reschedule = native_smp_send_reschedule,
709
        .smp_call_function_mask = native_smp_call_function_mask,
710
};
711
EXPORT_SYMBOL_GPL(smp_ops);

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.