OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [uclinux/] [uClinux-2.0.x/] [arch/] [i386/] [kernel/] [smp.c] - Blame information for rev 1767

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 199 simons
/*
2
 *      Intel MP v1.1/v1.4 specification support routines for multi-pentium
3
 *      hosts.
4
 *
5
 *      (c) 1995 Alan Cox, CymruNET Ltd  <alan@cymru.net>
6
 *      Supported by Caldera http://www.caldera.com.
7
 *      Much of the core SMP work is based on previous work by Thomas Radke, to
8
 *      whom a great many thanks are extended.
9
 *
10
 *      Thanks to Intel for making available several different Pentium and
11
 *      Pentium Pro MP machines.
12
 *
13
 *      This code is released under the GNU public license version 2 or
14
 *      later.
15
 *
16
 *      Fixes
17
 *              Felix Koop      :       NR_CPUS used properly
18
 *              Jose Renau      :       Handle single CPU case.
19
 *              Alan Cox        :       By repeated request 8) - Total BogoMIP report.
20
 *              Greg Wright     :       Fix for kernel stacks panic.
21
 *              Erich Boleyn    :       MP v1.4 and additional changes.
22
 */
23
 
24
#include <linux/kernel.h>
25
#include <linux/string.h>
26
#include <linux/timer.h>
27
#include <linux/sched.h>
28
#include <linux/mm.h>
29
#include <linux/kernel_stat.h>
30
#include <linux/delay.h>
31
#include <linux/mc146818rtc.h>
32
#include <asm/i82489.h>
33
#include <linux/smp.h>
34
#include <asm/pgtable.h>
35
#include <asm/bitops.h>
36
#include <asm/pgtable.h>
37
#include <asm/smp.h>
38
#ifdef CONFIG_MTRR
39
#include <asm/mtrr.h>
40
#endif
41
 
42
/*
43
 *      Why isn't this somewhere standard ??
44
 */
45
 
46
extern __inline int max(int a,int b)
47
{
48
        if(a>b)
49
                return a;
50
        return b;
51
}
52
 
53
 
54
int smp_found_config=0;                                  /* Have we found an SMP box                             */
55
 
56
unsigned long cpu_present_map = 0;                       /* Bitmask of existing CPU's                            */
57
int smp_num_cpus = 1;                                   /* Total count of live CPU's                            */
58
int smp_threads_ready=0;                         /* Set when the idlers are all forked                   */
59
volatile int cpu_number_map[NR_CPUS];                   /* which CPU maps to which logical number               */
60
volatile int cpu_logical_map[NR_CPUS];                  /* which logical number maps to which CPU               */
61
volatile unsigned long cpu_callin_map[NR_CPUS] = {0,};   /* We always use 0 the rest is ready for parallel delivery */
62
volatile unsigned long smp_invalidate_needed;           /* Used for the invalidate map that's also checked in the spinlock */
63
struct cpuinfo_x86 cpu_data[NR_CPUS];                   /* Per cpu bogomips and other parameters                */
64
static unsigned int num_processors = 1;                 /* Internal processor count                             */
65
static unsigned long io_apic_addr = 0xFEC00000;         /* Address of the I/O apic (not yet used)               */
66
unsigned char boot_cpu_id = 0;                           /* Processor that is doing the boot up                  */
67
static unsigned char *kstack_base,*kstack_end;          /* Kernel stack list pointers                           */
68
static int smp_activated = 0;                            /* Tripped once we need to start cross invalidating     */
69
int apic_version[NR_CPUS];                              /* APIC version number                                  */
70
static volatile int smp_commenced=0;                     /* Tripped when we start scheduling                     */
71
unsigned long apic_addr=0xFEE00000;                     /* Address of APIC (defaults to 0xFEE00000)             */
72
unsigned long nlong = 0;                         /* dummy used for apic_reg address + 0x20               */
73
unsigned char *apic_reg=((unsigned char *)(&nlong))-0x20;/* Later set to the vremap() of the APIC               */
74
unsigned long apic_retval;                              /* Just debugging the assembler..                       */
75
unsigned char *kernel_stacks[NR_CPUS];                  /* Kernel stack pointers for CPU's (debugging)          */
76
 
77
static volatile unsigned char smp_cpu_in_msg[NR_CPUS];  /* True if this processor is sending an IPI             */
78
static volatile unsigned long smp_msg_data;             /* IPI data pointer                                     */
79
static volatile int smp_src_cpu;                        /* IPI sender processor                                 */
80
static volatile int smp_msg_id;                         /* Message being sent                                   */
81
 
82
volatile unsigned long kernel_flag=0;                    /* Kernel spinlock                                      */
83
volatile unsigned char active_kernel_processor = NO_PROC_ID;    /* Processor holding kernel spinlock            */
84
volatile unsigned long kernel_counter=0;         /* Number of times the processor holds the lock         */
85
volatile unsigned long syscall_count=0;                  /* Number of times the processor holds the syscall lock */
86
 
87
volatile unsigned long ipi_count;                       /* Number of IPI's delivered                            */
88
#ifdef __SMP_PROF__
89
volatile unsigned long smp_spins[NR_CPUS]={0};          /* Count interrupt spins                                 */
90
volatile unsigned long smp_spins_syscall[NR_CPUS]={0};  /* Count syscall spins                                   */
91
volatile unsigned long smp_spins_syscall_cur[NR_CPUS]={0};/* Count spins for the actual syscall                 */
92
volatile unsigned long smp_spins_sys_idle[NR_CPUS]={0}; /* Count spins for sys_idle                              */
93
volatile unsigned long smp_idle_count[1+NR_CPUS]={0,};   /* Count idle ticks                                     */
94
#endif
95
#if defined (__SMP_PROF__)
96
volatile unsigned long smp_idle_map=0;                   /* Map for idle processors                              */
97
#endif
98
 
99
volatile unsigned long  smp_proc_in_lock[NR_CPUS] = {0,};/* for computing process time */
100
volatile unsigned long smp_process_available=0;
101
 
102
/*#define SMP_DEBUG*/
103
 
104
#ifdef SMP_DEBUG
105
#define SMP_PRINTK(x)   printk x
106
#else
107
#define SMP_PRINTK(x)
108
#endif
109
 
110
 
111
/*
112
 *      Checksum an MP configuration block.
113
 */
114
 
115
static int mpf_checksum(unsigned char *mp, int len)
116
{
117
        int sum=0;
118
        while(len--)
119
                sum+=*mp++;
120
        return sum&0xFF;
121
}
122
 
123
/*
124
 *      Processor encoding in an MP configuration block
125
 */
126
 
127
static char *mpc_family(int family,int model)
128
{
129
        static char n[32];
130
        static char *model_defs[]=
131
        {
132
                "80486DX","80486DX",
133
                "80486SX","80486DX/2 or 80487",
134
                "80486SL","Intel5X2(tm)",
135
                "Unknown","Unknown",
136
                "80486DX/4"
137
        };
138
        if(family==0x6)
139
                return("Pentium(tm) Pro");
140
        if(family==0x5)
141
                return("Pentium(tm)");
142
        if(family==0x0F && model==0x0F)
143
                return("Special controller");
144
        if(family==0x04 && model<9)
145
                return model_defs[model];
146
        sprintf(n,"Unknown CPU [%d:%d]",family, model);
147
        return n;
148
}
149
 
150
/*
151
 *      Read the MPC
152
 */
153
 
154
static int smp_read_mpc(struct mp_config_table *mpc)
155
{
156
        char str[16];
157
        int count=sizeof(*mpc);
158
        int apics=0;
159
        unsigned char *mpt=((unsigned char *)mpc)+count;
160
 
161
        if(memcmp(mpc->mpc_signature,MPC_SIGNATURE,4))
162
        {
163
                printk("Bad signature [%c%c%c%c].\n",
164
                        mpc->mpc_signature[0],
165
                        mpc->mpc_signature[1],
166
                        mpc->mpc_signature[2],
167
                        mpc->mpc_signature[3]);
168
                return 1;
169
        }
170
        if(mpf_checksum((unsigned char *)mpc,mpc->mpc_length))
171
        {
172
                printk("Checksum error.\n");
173
                return 1;
174
        }
175
        if(mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04)
176
        {
177
                printk("Bad Config Table version (%d)!!\n",mpc->mpc_spec);
178
                return 1;
179
        }
180
        memcpy(str,mpc->mpc_oem,8);
181
        str[8]=0;
182
        printk("OEM ID: %s ",str);
183
        memcpy(str,mpc->mpc_productid,12);
184
        str[12]=0;
185
        printk("Product ID: %s ",str);
186
        printk("APIC at: 0x%lX\n",mpc->mpc_lapic);
187
 
188
        /* set the local APIC address */
189
        apic_addr = mpc->mpc_lapic;
190
 
191
        /*
192
         *      Now process the configuration blocks.
193
         */
194
 
195
        while(count<mpc->mpc_length)
196
        {
197
                switch(*mpt)
198
                {
199
                        case MP_PROCESSOR:
200
                        {
201
                                struct mpc_config_processor *m=
202
                                        (struct mpc_config_processor *)mpt;
203
                                if(m->mpc_cpuflag&CPU_ENABLED)
204
                                {
205
                                        printk("Processor #%d %s APIC version %d\n",
206
                                                m->mpc_apicid,
207
                                                mpc_family((m->mpc_cpufeature&
208
                                                        CPU_FAMILY_MASK)>>8,
209
                                                        (m->mpc_cpufeature&
210
                                                                CPU_MODEL_MASK)>>4),
211
                                                m->mpc_apicver);
212
#ifdef SMP_DEBUG                                                
213
                                        if(m->mpc_featureflag&(1<<0))
214
                                                printk("    Floating point unit present.\n");
215
                                        if(m->mpc_featureflag&(1<<7))
216
                                                printk("    Machine Exception supported.\n");
217
                                        if(m->mpc_featureflag&(1<<8))
218
                                                printk("    64 bit compare & exchange supported.\n");
219
                                        if(m->mpc_featureflag&(1<<9))
220
                                                printk("    Internal APIC present.\n");
221
#endif                                          
222
                                        if(m->mpc_cpuflag&CPU_BOOTPROCESSOR)
223
                                        {
224
                                                SMP_PRINTK(("    Bootup CPU\n"));
225
                                                boot_cpu_id=m->mpc_apicid;
226
                                        }
227
                                        else    /* Boot CPU already counted */
228
                                                num_processors++;
229
 
230
                                        if(m->mpc_apicid>NR_CPUS)
231
                                                printk("Processor #%d unused. (Max %d processors).\n",m->mpc_apicid, NR_CPUS);
232
                                        else
233
                                        {
234
                                                cpu_present_map|=(1<<m->mpc_apicid);
235
                                                apic_version[m->mpc_apicid]=m->mpc_apicver;
236
                                        }
237
                                }
238
                                mpt+=sizeof(*m);
239
                                count+=sizeof(*m);
240
                                break;
241
                        }
242
                        case MP_BUS:
243
                        {
244
                                struct mpc_config_bus *m=
245
                                        (struct mpc_config_bus *)mpt;
246
                                memcpy(str,m->mpc_bustype,6);
247
                                str[6]=0;
248
                                SMP_PRINTK(("Bus #%d is %s\n",
249
                                        m->mpc_busid,
250
                                        str));
251
                                mpt+=sizeof(*m);
252
                                count+=sizeof(*m);
253
                                break;
254
                        }
255
                        case MP_IOAPIC:
256
                        {
257
                                struct mpc_config_ioapic *m=
258
                                        (struct mpc_config_ioapic *)mpt;
259
                                if(m->mpc_flags&MPC_APIC_USABLE)
260
                                {
261
                                        apics++;
262
                                        printk("I/O APIC #%d Version %d at 0x%lX.\n",
263
                                                m->mpc_apicid,m->mpc_apicver,
264
                                                m->mpc_apicaddr);
265
                                        io_apic_addr = m->mpc_apicaddr;
266
                                }
267
                                mpt+=sizeof(*m);
268
                                count+=sizeof(*m);
269
                                break;
270
                        }
271
                        case MP_INTSRC:
272
                        {
273
                                struct mpc_config_intsrc *m=
274
                                        (struct mpc_config_intsrc *)mpt;
275
 
276
                                mpt+=sizeof(*m);
277
                                count+=sizeof(*m);
278
                                break;
279
                        }
280
                        case MP_LINTSRC:
281
                        {
282
                                struct mpc_config_intlocal *m=
283
                                        (struct mpc_config_intlocal *)mpt;
284
                                mpt+=sizeof(*m);
285
                                count+=sizeof(*m);
286
                                break;
287
                        }
288
                }
289
        }
290
        if(apics>1)
291
                printk("Warning: Multiple APIC's not supported.\n");
292
        return num_processors;
293
}
294
 
295
/*
296
 *      Scan the memory blocks for an SMP configuration block.
297
 */
298
 
299
int smp_scan_config(unsigned long base, unsigned long length)
300
{
301
        unsigned long *bp=(unsigned long *)base;
302
        struct intel_mp_floating *mpf;
303
 
304
        SMP_PRINTK(("Scan SMP from %p for %ld bytes.\n",
305
                bp,length));
306
        if(sizeof(*mpf)!=16)
307
                printk("Error: MPF size\n");
308
 
309
        while(length>0)
310
        {
311
                if(*bp==SMP_MAGIC_IDENT)
312
                {
313
                        mpf=(struct intel_mp_floating *)bp;
314
                        if(mpf->mpf_length==1 &&
315
                                !mpf_checksum((unsigned char *)bp,16) &&
316
                                (mpf->mpf_specification == 1
317
                                 || mpf->mpf_specification == 4) )
318
                        {
319
                                printk("Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification);
320
                                if(mpf->mpf_feature2&(1<<7))
321
                                        printk("    IMCR and PIC compatibility mode.\n");
322
                                else
323
                                        printk("    Virtual Wire compatibility mode.\n");
324
                                smp_found_config=1;
325
                                /*
326
                                 *      Now see if we need to read further.
327
                                 */
328
                                if(mpf->mpf_feature1!=0)
329
                                {
330
                                        unsigned long cfg;
331
 
332
                                        /*
333
                                         *      We need to know what the local
334
                                         *      APIC id of the boot CPU is!
335
                                         */
336
 
337
/*
338
 *
339
 *      HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK HACK
340
 *
341
 *      It's not just a crazy hack...  ;-)
342
 */
343
                                        /*
344
                                         *      Standard page mapping
345
                                         *      functions don't work yet.
346
                                         *      We know that page 0 is not
347
                                         *      used.  Steal it for now!
348
                                         */
349
 
350
                                        cfg=pg0[0];
351
                                        pg0[0] = (apic_addr | 7);
352
                                        local_flush_tlb();
353
 
354
                                        boot_cpu_id = GET_APIC_ID(*((volatile unsigned long *) APIC_ID));
355
 
356
                                        /*
357
                                         *      Give it back
358
                                         */
359
 
360
                                        pg0[0]= cfg;
361
                                        local_flush_tlb();
362
 
363
/*
364
 *
365
 *      END OF HACK   END OF HACK   END OF HACK   END OF HACK   END OF HACK
366
 *
367
 */
368
                                        /*
369
                                         *      2 CPUs, numbered 0 & 1.
370
                                         */
371
                                        cpu_present_map=3;
372
                                        num_processors=2;
373
                                        printk("I/O APIC at 0xFEC00000.\n");
374
                                        printk("Bus#0 is ");
375
                                }
376
                                switch(mpf->mpf_feature1)
377
                                {
378
                                        case 1:
379
                                        case 5:
380
                                                printk("ISA\n");
381
                                                break;
382
                                        case 2:
383
                                                printk("EISA with no IRQ8 chaining\n");
384
                                                break;
385
                                        case 6:
386
                                        case 3:
387
                                                printk("EISA\n");
388
                                                break;
389
                                        case 4:
390
                                        case 7:
391
                                                printk("MCA\n");
392
                                                break;
393
                                        case 0:
394
                                                break;
395
                                        default:
396
                                                printk("???\nUnknown standard configuration %d\n",
397
                                                        mpf->mpf_feature1);
398
                                                return 1;
399
                                }
400
                                if(mpf->mpf_feature1>4)
401
                                {
402
                                        printk("Bus #1 is PCI\n");
403
 
404
                                        /*
405
                                         *      Set local APIC version to
406
                                         *      the integrated form.
407
                                         *      It's initialized to zero
408
                                         *      otherwise, representing
409
                                         *      a discrete 82489DX.
410
                                         */
411
                                        apic_version[0] = 0x10;
412
                                        apic_version[1] = 0x10;
413
                                }
414
                                /*
415
                                 *      Read the physical hardware table.
416
                                 *      Anything here will override the
417
                                 *      defaults.
418
                                 */
419
                                if(mpf->mpf_physptr)
420
                                        smp_read_mpc((void *)mpf->mpf_physptr);
421
 
422
                                /*
423
                                 *      Now that the boot CPU id is known,
424
                                 *      set some other information about it.
425
                                 */
426
                                nlong = boot_cpu_id<<24;        /* Dummy 'self' for bootup */
427
                                cpu_logical_map[0] = boot_cpu_id;
428
 
429
                                printk("Processors: %d\n", num_processors);
430
                                /*
431
                                 *      Only use the first configuration found.
432
                                 */
433
                                return 1;
434
                        }
435
                }
436
                bp+=4;
437
                length-=16;
438
        }
439
 
440
        return 0;
441
}
442
 
443
/*
444
 *      Trampoline 80x86 program as an array.
445
 */
446
 
447
static unsigned char trampoline_data[]={
448
#include  "trampoline.hex"
449
};
450
 
451
/*
452
 *      Currently trivial. Write the real->protected mode
453
 *      bootstrap into the page concerned. The caller
454
 *      has made sure it's suitably aligned.
455
 */
456
 
457
static void install_trampoline(unsigned char *mp)
458
{
459
        memcpy(mp,trampoline_data,sizeof(trampoline_data));
460
}
461
 
462
/*
463
 *      We are called very early to get the low memory for the trampoline/kernel stacks
464
 *      This has to be done by mm/init.c to parcel us out nice low memory. We allocate
465
 *      the kernel stacks at 4K, 8K, 12K... currently (0-03FF is preserved for SMM and
466
 *      other things).
467
 */
468
 
469
unsigned long smp_alloc_memory(unsigned long mem_base)
470
{
471
        int size=(num_processors-1)*PAGE_SIZE;          /* Number of stacks needed */
472
        /*
473
         *      Our stacks have to be below the 1Mb line, and mem_base on entry
474
         *      is 4K aligned.
475
         */
476
 
477
        if(mem_base+size>=0x9F000)
478
                panic("smp_alloc_memory: Insufficient low memory for kernel stacks.\n");
479
        kstack_base=(void *)mem_base;
480
        mem_base+=size;
481
        kstack_end=(void *)mem_base;
482
        return mem_base;
483
}
484
 
485
/*
486
 *      Hand out stacks one at a time.
487
 */
488
 
489
static void *get_kernel_stack(void)
490
{
491
        void *stack=kstack_base;
492
        if(kstack_base>=kstack_end)
493
                return NULL;
494
        kstack_base+=PAGE_SIZE;
495
        return stack;
496
}
497
 
498
 
499
/*
500
 *      The bootstrap kernel entry code has set these up. Save them for
501
 *      a given CPU
502
 */
503
 
504
void smp_store_cpu_info(int id)
505
{
506
        struct cpuinfo_x86 *c=&cpu_data[id];
507
        c->hard_math=hard_math;                 /* Always assumed same currently */
508
        c->x86=x86;
509
        c->x86_model=x86_model;
510
        c->x86_mask=x86_mask;
511
        c->x86_capability=x86_capability;
512
        c->fdiv_bug=fdiv_bug;
513
        c->wp_works_ok=wp_works_ok;             /* Always assumed the same currently */
514
        c->hlt_works_ok=hlt_works_ok;
515
        c->have_cpuid=have_cpuid;
516
        c->udelay_val=loops_per_sec;
517
        strcpy(c->x86_vendor_id, x86_vendor_id);
518
}
519
 
520
/*
521
 *      Architecture specific routine called by the kernel just before init is
522
 *      fired off. This allows the BP to have everything in order [we hope].
523
 *      At the end of this all the AP's will hit the system scheduling and off
524
 *      we go. Each AP will load the system gdt's and jump through the kernel
525
 *      init into idle(). At this point the scheduler will one day take over
526
 *      and give them jobs to do. smp_callin is a standard routine
527
 *      we use to track CPU's as they power up.
528
 */
529
 
530
void smp_commence(void)
531
{
532
        /*
533
         *      Lets the callin's below out of their loop.
534
         */
535
        smp_commenced=1;
536
}
537
 
538
void smp_callin(void)
539
{
540
        extern void calibrate_delay(void);
541
        int cpuid=GET_APIC_ID(apic_read(APIC_ID));
542
        unsigned long l;
543
        extern struct desc_struct idt_descriptor;
544
        extern int pentium_f00f_bug;
545
 
546
        if (pentium_f00f_bug) {
547
                __asm__ __volatile__("\tlidt %0": "=m" (idt_descriptor));
548
        }
549
 
550
        /*
551
         *      Activate our APIC
552
         */
553
 
554
        SMP_PRINTK(("CALLIN %d\n",smp_processor_id()));
555
        l=apic_read(APIC_SPIV);
556
        l|=(1<<8);              /* Enable */
557
        apic_write(APIC_SPIV,l);
558
 
559
#ifdef CONFIG_MTRR
560
        /*
561
         * checks the MTRR configuration of this application processor
562
         */
563
        check_mtrr_config();
564
#endif
565
 
566
        sti();
567
        /*
568
         *      Get our bogomips.
569
         */
570
        calibrate_delay();
571
        /*
572
         *      Save our processor parameters
573
         */
574
        smp_store_cpu_info(cpuid);
575
        /*
576
         *      Allow the master to continue.
577
         */
578
        set_bit(cpuid, (unsigned long *)&cpu_callin_map[0]);
579
        /*
580
         *      Until we are ready for SMP scheduling
581
         */
582
        load_ldt(0);
583
/*      printk("Testing faulting...\n");
584
        *(long *)0=1;            OOPS... */
585
        local_flush_tlb();
586
        while(!smp_commenced);
587
        if (cpu_number_map[cpuid] == -1)
588
                while(1);
589
        local_flush_tlb();
590
        SMP_PRINTK(("Commenced..\n"));
591
 
592
        load_TR(cpu_number_map[cpuid]);
593
/*      while(1);*/
594
}
595
 
596
/*
597
 *      Cycle through the processors sending pentium IPI's to boot each.
598
 */
599
 
600
void smp_boot_cpus(void)
601
{
602
        int i;
603
        int cpucount=0;
604
        unsigned long cfg;
605
        void *stack;
606
        extern unsigned long init_user_stack[];
607
 
608
        /*
609
         *      Initialize the logical to physical cpu number mapping
610
         */
611
 
612
        for (i = 0; i < NR_CPUS; i++)
613
                cpu_number_map[i] = -1;
614
 
615
        /*
616
         *      Setup boot CPU information
617
         */
618
 
619
        kernel_stacks[boot_cpu_id]=(void *)init_user_stack;     /* Set up for boot processor first */
620
 
621
        smp_store_cpu_info(boot_cpu_id);                        /* Final full version of the data */
622
 
623
        cpu_present_map |= (1 << smp_processor_id());
624
        cpu_number_map[boot_cpu_id] = 0;
625
        active_kernel_processor=boot_cpu_id;
626
 
627
        /*
628
         *      If we don't conform to the Intel MPS standard, get out
629
         *      of here now!
630
         */
631
 
632
        if (!smp_found_config)
633
                return;
634
 
635
        /*
636
         *      Map the local APIC into kernel space
637
         */
638
 
639
        apic_reg = vremap(apic_addr,4096);
640
 
641
        if(apic_reg == NULL)
642
                panic("Unable to map local apic.\n");
643
 
644
#ifdef SMP_DEBUG                
645
        {
646
                int reg;
647
 
648
                /*
649
                 *      This is to verify that we're looking at
650
                 *      a real local APIC.  Check these against
651
                 *      your board if the CPUs aren't getting
652
                 *      started for no apparent reason.
653
                 */
654
 
655
                reg = apic_read(APIC_VERSION);
656
                SMP_PRINTK(("Getting VERSION: %x\n", reg));
657
 
658
                apic_write(APIC_VERSION, 0);
659
                reg = apic_read(APIC_VERSION);
660
                SMP_PRINTK(("Getting VERSION: %x\n", reg));
661
 
662
                /*
663
                 *      The two version reads above should print the same
664
                 *      NON-ZERO!!! numbers.  If the second one is zero,
665
                 *      there is a problem with the APIC write/read
666
                 *      definitions.
667
                 *
668
                 *      The next two are just to see if we have sane values.
669
                 *      They're only really relevant if we're in Virtual Wire
670
                 *      compatibility mode, but most boxes are anymore.
671
                 */
672
 
673
 
674
                reg = apic_read(APIC_LVT0);
675
                SMP_PRINTK(("Getting LVT0: %x\n", reg));
676
 
677
                reg = apic_read(APIC_LVT1);
678
                SMP_PRINTK(("Getting LVT1: %x\n", reg));
679
        }
680
#endif
681
 
682
        /*
683
         *      Enable the local APIC
684
         */
685
 
686
        cfg=apic_read(APIC_SPIV);
687
        cfg|=(1<<8);            /* Enable APIC */
688
        apic_write(APIC_SPIV,cfg);
689
 
690
        udelay(10);
691
 
692
        /*
693
         *      Now scan the cpu present map and fire up the other CPUs.
694
         */
695
 
696
        SMP_PRINTK(("CPU map: %lx\n", cpu_present_map));
697
 
698
        for(i=0;i<NR_CPUS;i++)
699
        {
700
                /*
701
                 *      Don't even attempt to start the boot CPU!
702
                 */
703
                if (i == boot_cpu_id)
704
                        continue;
705
 
706
                if (cpu_present_map & (1 << i))
707
                {
708
                        unsigned long send_status, accept_status;
709
                        int timeout, num_starts, j;
710
 
711
                        /*
712
                         *      We need a kernel stack for each processor.
713
                         */
714
 
715
                        stack=get_kernel_stack();       /* We allocated these earlier */
716
                        if(stack==NULL)
717
                                panic("No memory for processor stacks.\n");
718
                        kernel_stacks[i]=stack;
719
                        install_trampoline(stack);
720
 
721
                        printk("Booting processor %d stack %p: ",i,stack);                      /* So we set what's up   */
722
 
723
                        /*
724
                         *      This grunge runs the startup process for
725
                         *      the targeted processor.
726
                         */
727
 
728
                        SMP_PRINTK(("Setting warm reset code and vector.\n"));
729
 
730
                        /*
731
                         *      Install a writable page 0 entry.
732
                         */
733
 
734
                        cfg=pg0[0];
735
 
736
                        CMOS_WRITE(0xa, 0xf);
737
                        pg0[0]=7;
738
                        local_flush_tlb();
739
                        *((volatile unsigned short *) 0x469) = ((unsigned long)stack)>>4;
740
                        *((volatile unsigned short *) 0x467) = 0;
741
 
742
                        /*
743
                         *      Protect it again
744
                         */
745
 
746
                        pg0[0]= cfg;
747
                        local_flush_tlb();
748
 
749
                        /*
750
                         *      Be paranoid about clearing APIC errors.
751
                         */
752
 
753
                        if ( apic_version[i] & 0xF0 )
754
                        {
755
                                apic_write(APIC_ESR, 0);
756
                                accept_status = (apic_read(APIC_ESR) & 0xEF);
757
                        }
758
 
759
                        /*
760
                         *      Status is now clean
761
                         */
762
 
763
                        send_status =   0;
764
                        accept_status = 0;
765
 
766
                        /*
767
                         *      Starting actual IPI sequence...
768
                         */
769
 
770
                        SMP_PRINTK(("Asserting INIT.\n"));
771
 
772
                        /*
773
                         *      Turn INIT on
774
                         */
775
 
776
                        cfg=apic_read(APIC_ICR2);
777
                        cfg&=0x00FFFFFF;
778
                        apic_write(APIC_ICR2, cfg|SET_APIC_DEST_FIELD(i));                      /* Target chip          */
779
                        cfg=apic_read(APIC_ICR);
780
                        cfg&=~0xCDFFF;                                                          /* Clear bits           */
781
                        cfg |= (APIC_DEST_FIELD | APIC_DEST_LEVELTRIG
782
                                | APIC_DEST_ASSERT | APIC_DEST_DM_INIT);
783
                        apic_write(APIC_ICR, cfg);                                              /* Send IPI */
784
 
785
                        udelay(200);
786
                        SMP_PRINTK(("Deasserting INIT.\n"));
787
 
788
                        cfg=apic_read(APIC_ICR2);
789
                        cfg&=0x00FFFFFF;
790
                        apic_write(APIC_ICR2, cfg|SET_APIC_DEST_FIELD(i));                      /* Target chip          */
791
                        cfg=apic_read(APIC_ICR);
792
                        cfg&=~0xCDFFF;                                                          /* Clear bits           */
793
                        cfg |= (APIC_DEST_FIELD | APIC_DEST_LEVELTRIG
794
                                | APIC_DEST_DM_INIT);
795
                        apic_write(APIC_ICR, cfg);                                              /* Send IPI */
796
 
797
                        /*
798
                         *      Should we send STARTUP IPIs ?
799
                         *
800
                         *      Determine this based on the APIC version.
801
                         *      If we don't have an integrated APIC, don't
802
                         *      send the STARTUP IPIs.
803
                         */
804
 
805
                        if ( apic_version[i] & 0xF0 )
806
                                num_starts = 2;
807
                        else
808
                                num_starts = 0;
809
 
810
                        /*
811
                         *      Run STARTUP IPI loop.
812
                         */
813
 
814
                        for (j = 1; !(send_status || accept_status)
815
                                    && (j <= num_starts) ; j++)
816
                        {
817
                                SMP_PRINTK(("Sending STARTUP #%d.\n",j));
818
 
819
                                apic_write(APIC_ESR, 0);
820
 
821
                                /*
822
                                 *      STARTUP IPI
823
                                 */
824
 
825
                                cfg=apic_read(APIC_ICR2);
826
                                cfg&=0x00FFFFFF;
827
                                apic_write(APIC_ICR2, cfg|SET_APIC_DEST_FIELD(i));                      /* Target chip          */
828
                                cfg=apic_read(APIC_ICR);
829
                                cfg&=~0xCDFFF;                                                          /* Clear bits           */
830
                                cfg |= (APIC_DEST_FIELD
831
                                        | APIC_DEST_DM_STARTUP
832
                                        | (((int) stack) >> 12) );                                      /* Boot on the stack    */
833
                                apic_write(APIC_ICR, cfg);                                              /* Kick the second      */
834
 
835
                                timeout = 0;
836
                                do {
837
                                        udelay(10);
838
                                } while ( (send_status = (apic_read(APIC_ICR) & 0x1000))
839
                                          && (timeout++ < 1000));
840
                                udelay(200);
841
 
842
                                accept_status = (apic_read(APIC_ESR) & 0xEF);
843
                        }
844
 
845
                        if (send_status)                /* APIC never delivered?? */
846
                                printk("APIC never delivered???\n");
847
                        if (accept_status)              /* Send accept error */
848
                                printk("APIC delivery error (%lx).\n", accept_status);
849
 
850
                        if( !(send_status || accept_status) )
851
                        {
852
                                for(timeout=0;timeout<50000;timeout++)
853
                                {
854
                                        if(cpu_callin_map[0]&(1<<i))
855
                                                break;                          /* It has booted */
856
                                        udelay(100);                            /* Wait 5s total for a response */
857
                                }
858
                                if(cpu_callin_map[0]&(1<<i))
859
                                {
860
                                        cpucount++;
861
                                        /* number CPUs logically, starting from 1 (BSP is 0) */
862
                                        cpu_number_map[i] = cpucount;
863
                                        cpu_logical_map[cpucount] = i;
864
                                }
865
                                else
866
                                {
867
                                        if(*((volatile unsigned char *)8192)==0xA5)
868
                                                printk("Stuck ??\n");
869
                                        else
870
                                                printk("Not responding.\n");
871
                                }
872
                        }
873
 
874
                        /* mark "stuck" area as not stuck */
875
                        *((volatile unsigned long *)8192) = 0;
876
                }
877
 
878
                /*
879
                 *      Make sure we unmap all failed CPUs
880
                 */
881
 
882
                if (cpu_number_map[i] == -1)
883
                        cpu_present_map &= ~(1 << i);
884
        }
885
 
886
        /*
887
         *      Cleanup possible dangling ends...
888
         */
889
 
890
        /*
891
         *      Install writable page 0 entry.
892
         */
893
 
894
        cfg = pg0[0];
895
        pg0[0] = 3;      /* writeable, present, addr 0 */
896
        local_flush_tlb();
897
 
898
        /*
899
         *      Paranoid:  Set warm reset code and vector here back
900
         *      to default values.
901
         */
902
 
903
        CMOS_WRITE(0, 0xf);
904
 
905
        *((volatile long *) 0x467) = 0;
906
 
907
        /*
908
         *      Restore old page 0 entry.
909
         */
910
 
911
        pg0[0] = cfg;
912
        local_flush_tlb();
913
 
914
        /*
915
         *      Allow the user to impress friends.
916
         */
917
 
918
        if(cpucount==0)
919
        {
920
                printk("Error: only one processor found.\n");
921
                cpu_present_map=(1<<smp_processor_id());
922
        }
923
        else
924
        {
925
                unsigned long bogosum=0;
926
                for(i=0;i<32;i++)
927
                {
928
                        if(cpu_present_map&(1<<i))
929
                                bogosum+=cpu_data[i].udelay_val;
930
                }
931
                printk("Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
932
                        cpucount+1,
933
                        (bogosum+2500)/500000,
934
                        ((bogosum+2500)/5000)%100);
935
                smp_activated=1;
936
                smp_num_cpus=cpucount+1;
937
        }
938
}
939
 
940
 
941
/*
942
 *      A non wait message cannot pass data or cpu source info. This current setup
943
 *      is only safe because the kernel lock owner is the only person who can send a message.
944
 *
945
 *      Wrapping this whole block in a spinlock is not the safe answer either. A processor may
946
 *      get stuck with irq's off waiting to send a message and thus not replying to the person
947
 *      spinning for a reply....
948
 *
949
 *      In the end flush tlb ought to be the NMI and a very very short function (to avoid the old
950
 *      IDE disk problems), and other messages sent with IRQ's enabled in a civilised fashion. That
951
 *      will also boost performance.
952
 */
953
 
954
void smp_message_pass(int target, int msg, unsigned long data, int wait)
955
{
956
        unsigned long cfg;
957
        unsigned long target_map;
958
        int p=smp_processor_id();
959
        int irq=0x2d;                                                           /* IRQ 13 */
960
        int ct=0;
961
        static volatile int message_cpu = NO_PROC_ID;
962
 
963
        /*
964
         *      During boot up send no messages
965
         */
966
 
967
        if(!smp_activated || !smp_commenced)
968
                return;
969
 
970
 
971
        /*
972
         *      Skip the reschedule if we are waiting to clear a
973
         *      message at this time. The reschedule cannot wait
974
         *      but is not critical.
975
         */
976
 
977
        if(msg==MSG_RESCHEDULE)                                                 /* Reschedules we do via trap 0x30 */
978
        {
979
                irq=0x30;
980
                if(smp_cpu_in_msg[p])
981
                        return;
982
        }
983
 
984
        /*
985
         *      Sanity check we don't re-enter this across CPU's. Only the kernel
986
         *      lock holder may send messages. For a STOP_CPU we are bringing the
987
         *      entire box to the fastest halt we can.. A reschedule carries
988
         *      no data and can occur during a flush.. guess what panic
989
         *      I got to notice this bug...
990
         */
991
 
992
        if(message_cpu!=NO_PROC_ID && msg!=MSG_STOP_CPU && msg!=MSG_RESCHEDULE)
993
        {
994
                panic("CPU #%d: Message pass %d but pass in progress by %d of %d\n",
995
                        smp_processor_id(),msg,message_cpu, smp_msg_id);
996
        }
997
        message_cpu=smp_processor_id();
998
 
999
 
1000
        /*
1001
         *      We are busy
1002
         */
1003
 
1004
        smp_cpu_in_msg[p]++;
1005
 
1006
        /*
1007
         *      Reschedule is currently special
1008
         */
1009
 
1010
        if(msg!=MSG_RESCHEDULE)
1011
        {
1012
                smp_src_cpu=p;
1013
                smp_msg_id=msg;
1014
                smp_msg_data=data;
1015
        }
1016
 
1017
/*      printk("SMP message pass #%d to %d of %d\n",
1018
                p, msg, target);*/
1019
 
1020
        /*
1021
         *      Wait for the APIC to become ready - this should never occur. Its
1022
         *      a debugging check really.
1023
         */
1024
 
1025
        while(ct<1000)
1026
        {
1027
                cfg=apic_read(APIC_ICR);
1028
                if(!(cfg&(1<<12)))
1029
                        break;
1030
                ct++;
1031
                udelay(10);
1032
        }
1033
 
1034
        /*
1035
         *      Just pray... there is nothing more we can do
1036
         */
1037
 
1038
        if(ct==1000)
1039
                printk("CPU #%d: previous IPI still not cleared after 10ms\n", smp_processor_id());
1040
 
1041
        /*
1042
         *      Program the APIC to deliver the IPI
1043
         */
1044
 
1045
        cfg=apic_read(APIC_ICR2);
1046
        cfg&=0x00FFFFFF;
1047
        apic_write(APIC_ICR2, cfg|SET_APIC_DEST_FIELD(target));                 /* Target chip                  */
1048
        cfg=apic_read(APIC_ICR);
1049
        cfg&=~0xFDFFF;                                                          /* Clear bits                   */
1050
        cfg|=APIC_DEST_FIELD|APIC_DEST_DM_FIXED|irq;                            /* Send an IRQ 13               */
1051
 
1052
        /*
1053
         *      Set the target requirement
1054
         */
1055
 
1056
        if(target==MSG_ALL_BUT_SELF)
1057
        {
1058
                cfg|=APIC_DEST_ALLBUT;
1059
                target_map=cpu_present_map;
1060
                cpu_callin_map[0]=(1<<smp_src_cpu);
1061
        }
1062
        else if(target==MSG_ALL)
1063
        {
1064
                cfg|=APIC_DEST_ALLINC;
1065
                target_map=cpu_present_map;
1066
                cpu_callin_map[0]=0;
1067
        }
1068
        else
1069
        {
1070
                target_map=(1<<target);
1071
                cpu_callin_map[0]=0;
1072
        }
1073
 
1074
        /*
1075
         *      Send the IPI. The write to APIC_ICR fires this off.
1076
         */
1077
 
1078
        apic_write(APIC_ICR, cfg);
1079
 
1080
        /*
1081
         *      Spin waiting for completion
1082
         */
1083
 
1084
        switch(wait)
1085
        {
1086
                case 1:
1087
                        while(cpu_callin_map[0]!=target_map);            /* Spin on the pass             */
1088
                        break;
1089
                case 2:
1090
                        while(smp_invalidate_needed);                   /* Wait for invalidate map to clear */
1091
                        break;
1092
        }
1093
 
1094
        /*
1095
         *      Record our completion
1096
         */
1097
 
1098
        smp_cpu_in_msg[p]--;
1099
        message_cpu=NO_PROC_ID;
1100
}
1101
 
1102
/*
1103
 *      This is fraught with deadlocks. Linus does a flush tlb at a whim
1104
 *      even with IRQ's off. We have to avoid a pair of crossing flushes
1105
 *      or we are doomed.  See the notes about smp_message_pass.
1106
 */
1107
 
1108
void smp_flush_tlb(void)
1109
{
1110
        unsigned long flags;
1111
        if(smp_activated && smp_processor_id()!=active_kernel_processor)
1112
                panic("CPU #%d:Attempted flush tlb IPI when not AKP(=%d)\n",smp_processor_id(),active_kernel_processor);
1113
/*      printk("SMI-");*/
1114
 
1115
        /*
1116
         *      The assignment is safe because it's volatile so the compiler cannot reorder it,
1117
         *      because the i586 has strict memory ordering and because only the kernel lock holder
1118
         *      may issue a tlb flush. If you break any one of those three change this to an atomic
1119
         *      bus locked or.
1120
         */
1121
 
1122
        smp_invalidate_needed=cpu_present_map&~(1<<smp_processor_id());
1123
 
1124
        /*
1125
         *      Processors spinning on the lock will see this IRQ late. The smp_invalidate_needed map will
1126
         *      ensure they don't do a spurious flush tlb or miss one.
1127
         */
1128
 
1129
        save_flags(flags);
1130
        cli();
1131
        smp_message_pass(MSG_ALL_BUT_SELF, MSG_INVALIDATE_TLB, 0L, 2);
1132
 
1133
        /*
1134
         *      Flush the local TLB
1135
         */
1136
 
1137
        local_flush_tlb();
1138
 
1139
        restore_flags(flags);
1140
 
1141
        /*
1142
         *      Completed.
1143
         */
1144
 
1145
/*      printk("SMID\n");*/
1146
}
1147
 
1148
/*
1149
 *      Reschedule call back
1150
 */
1151
 
1152
void smp_reschedule_irq(int cpl, struct pt_regs *regs)
1153
{
1154
#ifdef DEBUGGING_SMP_RESCHED
1155
        static int ct=0;
1156
        if(ct==0)
1157
        {
1158
                printk("Beginning scheduling on CPU#%d\n",smp_processor_id());
1159
                ct=1;
1160
        }
1161
#endif  
1162
        if(smp_processor_id()!=active_kernel_processor)
1163
                panic("SMP Reschedule on CPU #%d, but #%d is active.\n",
1164
                        smp_processor_id(), active_kernel_processor);
1165
 
1166
        need_resched=1;
1167
 
1168
        /*
1169
         *      Clear the IPI
1170
         */
1171
        apic_read(APIC_SPIV);           /* Dummy read */
1172
        apic_write(APIC_EOI, 0); /* Docs say use 0 for future compatibility */
1173
}
1174
 
1175
/*
1176
 *      Message call back.
1177
 */
1178
 
1179
void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs)
1180
{
1181
        int i=smp_processor_id();
1182
/*      static int n=0;
1183
        if(n++<NR_CPUS)
1184
                printk("IPI %d->%d(%d,%ld)\n",smp_src_cpu,i,smp_msg_id,smp_msg_data);*/
1185
        switch(smp_msg_id)
1186
        {
1187
                case 0:  /* IRQ 13 testing - boring */
1188
                        return;
1189
 
1190
                /*
1191
                 *      A TLB flush is needed.
1192
                 */
1193
 
1194
                case MSG_INVALIDATE_TLB:
1195
                        if(clear_bit(i,(unsigned long *)&smp_invalidate_needed))
1196
                                local_flush_tlb();
1197
                        set_bit(i, (unsigned long *)&cpu_callin_map[0]);
1198
                /*      cpu_callin_map[0]|=1<<smp_processor_id();*/
1199
                        break;
1200
 
1201
                /*
1202
                 *      Halt other CPU's for a panic or reboot
1203
                 */
1204
                case MSG_STOP_CPU:
1205
                        while(1)
1206
                        {
1207
                                if(cpu_data[smp_processor_id()].hlt_works_ok)
1208
                                        __asm__("hlt");
1209
                        }
1210
                default:
1211
                        printk("CPU #%d sent invalid cross CPU message to CPU #%d: %X(%lX).\n",
1212
                                smp_src_cpu,smp_processor_id(),smp_msg_id,smp_msg_data);
1213
                        break;
1214
        }
1215
        /*
1216
         *      Clear the IPI, so we can receive future IPI's
1217
         */
1218
 
1219
        apic_read(APIC_SPIV);           /* Dummy read */
1220
        apic_write(APIC_EOI, 0); /* Docs say use 0 for future compatibility */
1221
}
1222
 
1223
void irq_deadlock_detected(void)
1224
{
1225
  printk("IRQ DEADLOCK DETECTED BY CPU %d\n", smp_processor_id());
1226
  __asm__("hlt");
1227
}
1228
 
1229
void non_irq_deadlock_detected(void)
1230
{
1231
  printk("NON-IRQ DEADLOCK DETECTED BY CPU %d\n", smp_processor_id());
1232
  __asm__("hlt");
1233
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.