1 |
1275 |
phoenix |
#ifndef __ASM_SMPBOOT_H
|
2 |
|
|
#define __ASM_SMPBOOT_H
|
3 |
|
|
|
4 |
|
|
/*emum for clustered_apic_mode values*/
|
5 |
|
|
enum{
|
6 |
|
|
CLUSTERED_APIC_NONE = 0,
|
7 |
|
|
CLUSTERED_APIC_XAPIC,
|
8 |
|
|
CLUSTERED_APIC_NUMAQ
|
9 |
|
|
};
|
10 |
|
|
|
11 |
|
|
#ifdef CONFIG_X86_CLUSTERED_APIC
|
12 |
|
|
extern unsigned int apic_broadcast_id;
|
13 |
|
|
extern unsigned char clustered_apic_mode;
|
14 |
|
|
extern unsigned char esr_disable;
|
15 |
|
|
extern unsigned char int_delivery_mode;
|
16 |
|
|
extern unsigned int int_dest_addr_mode;
|
17 |
|
|
extern int cyclone_setup(char*);
|
18 |
|
|
|
19 |
|
|
static inline void detect_clustered_apic(char* oem, char* prod)
|
20 |
|
|
{
|
21 |
|
|
/*
|
22 |
|
|
* Can't recognize Summit xAPICs at present, so use the OEM ID.
|
23 |
|
|
*/
|
24 |
|
|
if (!strncmp(oem, "IBM ENSW", 8) && !strncmp(prod, "VIGIL SMP", 9)){
|
25 |
|
|
clustered_apic_mode = CLUSTERED_APIC_XAPIC;
|
26 |
|
|
apic_broadcast_id = APIC_BROADCAST_ID_XAPIC;
|
27 |
|
|
int_dest_addr_mode = APIC_DEST_PHYSICAL;
|
28 |
|
|
int_delivery_mode = dest_Fixed;
|
29 |
|
|
esr_disable = 1;
|
30 |
|
|
/*Start cyclone clock*/
|
31 |
|
|
cyclone_setup(0);
|
32 |
|
|
/* check for ACPI tables */
|
33 |
|
|
} else if (!strncmp(oem, "IBM", 3) &&
|
34 |
|
|
(!strncmp(prod, "SERVIGIL", 8) ||
|
35 |
|
|
!strncmp(prod, "EXA", 3) ||
|
36 |
|
|
!strncmp(prod, "RUTHLESS", 8))){
|
37 |
|
|
clustered_apic_mode = CLUSTERED_APIC_XAPIC;
|
38 |
|
|
apic_broadcast_id = APIC_BROADCAST_ID_XAPIC;
|
39 |
|
|
int_dest_addr_mode = APIC_DEST_PHYSICAL;
|
40 |
|
|
int_delivery_mode = dest_Fixed;
|
41 |
|
|
esr_disable = 1;
|
42 |
|
|
/*Start cyclone clock*/
|
43 |
|
|
cyclone_setup(0);
|
44 |
|
|
} else if (!strncmp(oem, "IBM NUMA", 8)){
|
45 |
|
|
clustered_apic_mode = CLUSTERED_APIC_NUMAQ;
|
46 |
|
|
apic_broadcast_id = APIC_BROADCAST_ID_APIC;
|
47 |
|
|
int_dest_addr_mode = APIC_DEST_LOGICAL;
|
48 |
|
|
int_delivery_mode = dest_LowestPrio;
|
49 |
|
|
esr_disable = 1;
|
50 |
|
|
}
|
51 |
|
|
}
|
52 |
|
|
#define INT_DEST_ADDR_MODE (int_dest_addr_mode)
|
53 |
|
|
#define INT_DELIVERY_MODE (int_delivery_mode)
|
54 |
|
|
#else /* CONFIG_X86_CLUSTERED_APIC */
|
55 |
|
|
#define apic_broadcast_id (APIC_BROADCAST_ID_APIC)
|
56 |
|
|
#define clustered_apic_mode (CLUSTERED_APIC_NONE)
|
57 |
|
|
#define esr_disable (0)
|
58 |
|
|
#define detect_clustered_apic(x,y)
|
59 |
|
|
#define INT_DEST_ADDR_MODE (APIC_DEST_LOGICAL) /* logical delivery */
|
60 |
|
|
#define INT_DELIVERY_MODE (dest_LowestPrio)
|
61 |
|
|
#endif /* CONFIG_X86_CLUSTERED_APIC */
|
62 |
|
|
#define BAD_APICID 0xFFu
|
63 |
|
|
|
64 |
|
|
#define TRAMPOLINE_LOW phys_to_virt((clustered_apic_mode == CLUSTERED_APIC_NUMAQ)?0x8:0x467)
|
65 |
|
|
#define TRAMPOLINE_HIGH phys_to_virt((clustered_apic_mode == CLUSTERED_APIC_NUMAQ)?0xa:0x469)
|
66 |
|
|
|
67 |
|
|
#define boot_cpu_apicid ((clustered_apic_mode == CLUSTERED_APIC_NUMAQ)?boot_cpu_logical_apicid:boot_cpu_physical_apicid)
|
68 |
|
|
|
69 |
|
|
extern unsigned char raw_phys_apicid[NR_CPUS];
|
70 |
|
|
|
71 |
|
|
/*
|
72 |
|
|
* How to map from the cpu_present_map
|
73 |
|
|
*/
|
74 |
|
|
static inline int cpu_present_to_apicid(int mps_cpu)
|
75 |
|
|
{
|
76 |
|
|
if (clustered_apic_mode == CLUSTERED_APIC_XAPIC)
|
77 |
|
|
return raw_phys_apicid[mps_cpu];
|
78 |
|
|
if(clustered_apic_mode == CLUSTERED_APIC_NUMAQ)
|
79 |
|
|
return (mps_cpu/4)*16 + (1<<(mps_cpu%4));
|
80 |
|
|
return mps_cpu;
|
81 |
|
|
}
|
82 |
|
|
|
83 |
|
|
static inline unsigned long apicid_to_phys_cpu_present(int apicid)
|
84 |
|
|
{
|
85 |
|
|
if(clustered_apic_mode)
|
86 |
|
|
return 1UL << (((apicid >> 4) << 2) + (apicid & 0x3));
|
87 |
|
|
return 1UL << apicid;
|
88 |
|
|
}
|
89 |
|
|
|
90 |
|
|
#define physical_to_logical_apicid(phys_apic) ( (1ul << (phys_apic & 0x3)) | (phys_apic & 0xF0u) )
|
91 |
|
|
|
92 |
|
|
/*
|
93 |
|
|
* Mappings between logical cpu number and logical / physical apicid
|
94 |
|
|
* The first four macros are trivial, but it keeps the abstraction consistent
|
95 |
|
|
*/
|
96 |
|
|
extern volatile int logical_apicid_2_cpu[];
|
97 |
|
|
extern volatile int cpu_2_logical_apicid[];
|
98 |
|
|
extern volatile int physical_apicid_2_cpu[];
|
99 |
|
|
extern volatile int cpu_2_physical_apicid[];
|
100 |
|
|
|
101 |
|
|
#define logical_apicid_to_cpu(apicid) logical_apicid_2_cpu[apicid]
|
102 |
|
|
#define cpu_to_logical_apicid(cpu) cpu_2_logical_apicid[cpu]
|
103 |
|
|
#define physical_apicid_to_cpu(apicid) physical_apicid_2_cpu[apicid]
|
104 |
|
|
#define cpu_to_physical_apicid(cpu) cpu_2_physical_apicid[cpu]
|
105 |
|
|
#ifdef CONFIG_MULTIQUAD /* use logical IDs to bootstrap */
|
106 |
|
|
#define boot_apicid_to_cpu(apicid) logical_apicid_2_cpu[apicid]
|
107 |
|
|
#define cpu_to_boot_apicid(cpu) cpu_2_logical_apicid[cpu]
|
108 |
|
|
#else /* !CONFIG_MULTIQUAD */ /* use physical IDs to bootstrap */
|
109 |
|
|
#define boot_apicid_to_cpu(apicid) physical_apicid_2_cpu[apicid]
|
110 |
|
|
#define cpu_to_boot_apicid(cpu) cpu_2_physical_apicid[cpu]
|
111 |
|
|
#endif /* CONFIG_MULTIQUAD */
|
112 |
|
|
|
113 |
|
|
#ifdef CONFIG_X86_CLUSTERED_APIC
|
114 |
|
|
static inline int target_cpus(void)
|
115 |
|
|
{
|
116 |
|
|
static int cpu;
|
117 |
|
|
switch(clustered_apic_mode){
|
118 |
|
|
case CLUSTERED_APIC_NUMAQ:
|
119 |
|
|
/* Broadcast intrs to local quad only. */
|
120 |
|
|
return APIC_BROADCAST_ID_APIC;
|
121 |
|
|
case CLUSTERED_APIC_XAPIC:
|
122 |
|
|
/*round robin the interrupts*/
|
123 |
|
|
cpu = (cpu+1)%smp_num_cpus;
|
124 |
|
|
return cpu_to_physical_apicid(cpu);
|
125 |
|
|
default:
|
126 |
|
|
}
|
127 |
|
|
return cpu_online_map;
|
128 |
|
|
}
|
129 |
|
|
#else
|
130 |
|
|
#define target_cpus() (cpu_online_map)
|
131 |
|
|
#endif
|
132 |
|
|
#endif
|