1 |
1624 |
jcastillo |
/* smp.c: Sparc SMP support.
|
2 |
|
|
*
|
3 |
|
|
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
|
4 |
|
|
*/
|
5 |
|
|
|
6 |
|
|
#include <asm/head.h>
|
7 |
|
|
#include <asm/ptrace.h>
|
8 |
|
|
|
9 |
|
|
#include <linux/kernel.h>
|
10 |
|
|
#include <linux/tasks.h>
|
11 |
|
|
#include <linux/smp.h>
|
12 |
|
|
|
13 |
|
|
#include <asm/delay.h>
|
14 |
|
|
#include <asm/irq.h>
|
15 |
|
|
#include <asm/page.h>
|
16 |
|
|
#include <asm/pgtable.h>
|
17 |
|
|
#include <asm/oplib.h>
|
18 |
|
|
|
19 |
|
|
extern ctxd_t *srmmu_ctx_table_phys;
|
20 |
|
|
extern int linux_num_cpus;
|
21 |
|
|
|
22 |
|
|
struct tlog {
|
23 |
|
|
unsigned long pc;
|
24 |
|
|
unsigned long psr;
|
25 |
|
|
};
|
26 |
|
|
|
27 |
|
|
struct tlog trap_log[4][256];
|
28 |
|
|
unsigned long trap_log_ent[4] = { 0, 0, 0, 0, };
|
29 |
|
|
|
30 |
|
|
extern void calibrate_delay(void);
|
31 |
|
|
|
32 |
|
|
volatile unsigned long stuck_pc = 0;
|
33 |
|
|
volatile int smp_processors_ready = 0;
|
34 |
|
|
|
35 |
|
|
int smp_found_config = 0;
|
36 |
|
|
unsigned long cpu_present_map = 0;
|
37 |
|
|
int smp_num_cpus = 1;
|
38 |
|
|
int smp_threads_ready=0;
|
39 |
|
|
unsigned char mid_xlate[NR_CPUS] = { 0, 0, 0, 0, };
|
40 |
|
|
volatile unsigned long cpu_callin_map[NR_CPUS] = {0,};
|
41 |
|
|
volatile unsigned long smp_invalidate_needed[NR_CPUS] = { 0, };
|
42 |
|
|
volatile unsigned long smp_spinning[NR_CPUS] = { 0, };
|
43 |
|
|
struct cpuinfo_sparc cpu_data[NR_CPUS];
|
44 |
|
|
unsigned char boot_cpu_id = 0;
|
45 |
|
|
static int smp_activated = 0;
|
46 |
|
|
static volatile unsigned char smp_cpu_in_msg[NR_CPUS];
|
47 |
|
|
static volatile unsigned long smp_msg_data;
|
48 |
|
|
static volatile int smp_src_cpu;
|
49 |
|
|
static volatile int smp_msg_id;
|
50 |
|
|
volatile int cpu_number_map[NR_CPUS];
|
51 |
|
|
volatile int cpu_logical_map[NR_CPUS];
|
52 |
|
|
|
53 |
|
|
/* The only guaranteed locking primitive available on all Sparc
|
54 |
|
|
* processors is 'ldstub [%reg + immediate], %dest_reg' which atomically
|
55 |
|
|
* places the current byte at the effective address into dest_reg and
|
56 |
|
|
* places 0xff there afterwards. Pretty lame locking primitive
|
57 |
|
|
* compared to the Alpha and the intel no? Most Sparcs have 'swap'
|
58 |
|
|
* instruction which is much better...
|
59 |
|
|
*/
|
60 |
|
|
klock_t kernel_flag = KLOCK_CLEAR;
|
61 |
|
|
volatile unsigned char active_kernel_processor = NO_PROC_ID;
|
62 |
|
|
volatile unsigned long kernel_counter = 0;
|
63 |
|
|
volatile unsigned long syscall_count = 0;
|
64 |
|
|
volatile unsigned long ipi_count;
|
65 |
|
|
#ifdef __SMP_PROF__
|
66 |
|
|
volatile unsigned long smp_spins[NR_CPUS]={0};
|
67 |
|
|
volatile unsigned long smp_spins_syscall[NR_CPUS]={0};
|
68 |
|
|
volatile unsigned long smp_spins_syscall_cur[NR_CPUS]={0};
|
69 |
|
|
volatile unsigned long smp_spins_sys_idle[NR_CPUS]={0};
|
70 |
|
|
volatile unsigned long smp_idle_count[1+NR_CPUS]={0,};
|
71 |
|
|
#endif
|
72 |
|
|
#if defined (__SMP_PROF__)
|
73 |
|
|
volatile unsigned long smp_idle_map=0;
|
74 |
|
|
#endif
|
75 |
|
|
|
76 |
|
|
volatile unsigned long smp_proc_in_lock[NR_CPUS] = {0,};
|
77 |
|
|
volatile int smp_process_available=0;
|
78 |
|
|
|
79 |
|
|
/*#define SMP_DEBUG*/
|
80 |
|
|
|
81 |
|
|
#ifdef SMP_DEBUG
|
82 |
|
|
#define SMP_PRINTK(x) printk x
|
83 |
|
|
#else
|
84 |
|
|
#define SMP_PRINTK(x)
|
85 |
|
|
#endif
|
86 |
|
|
|
87 |
|
|
static volatile int smp_commenced = 0;
|
88 |
|
|
|
89 |
|
|
static char smp_buf[512];
|
90 |
|
|
|
91 |
|
|
char *smp_info(void)
|
92 |
|
|
{
|
93 |
|
|
sprintf(smp_buf,
|
94 |
|
|
"\n CPU0\t\tCPU1\t\tCPU2\t\tCPU3\n"
|
95 |
|
|
"State: %s\t\t%s\t\t%s\t\t%s\n"
|
96 |
|
|
"Lock: %08lx\t\t%08lx\t%08lx\t%08lx\n"
|
97 |
|
|
"\n"
|
98 |
|
|
"klock: %x\n",
|
99 |
|
|
(cpu_present_map & 1) ? ((active_kernel_processor == 0) ? "akp" : "online") : "offline",
|
100 |
|
|
(cpu_present_map & 2) ? ((active_kernel_processor == 1) ? "akp" : "online") : "offline",
|
101 |
|
|
(cpu_present_map & 4) ? ((active_kernel_processor == 2) ? "akp" : "online") : "offline",
|
102 |
|
|
(cpu_present_map & 8) ? ((active_kernel_processor == 3) ? "akp" : "online") : "offline",
|
103 |
|
|
smp_proc_in_lock[0], smp_proc_in_lock[1], smp_proc_in_lock[2],
|
104 |
|
|
smp_proc_in_lock[3],
|
105 |
|
|
kernel_flag);
|
106 |
|
|
return smp_buf;
|
107 |
|
|
}
|
108 |
|
|
|
109 |
|
|
static inline unsigned long swap(volatile unsigned long *ptr, unsigned long val)
|
110 |
|
|
{
|
111 |
|
|
__asm__ __volatile__("swap [%1], %0\n\t" :
|
112 |
|
|
"=&r" (val), "=&r" (ptr) :
|
113 |
|
|
"0" (val), "1" (ptr));
|
114 |
|
|
return val;
|
115 |
|
|
}
|
116 |
|
|
|
117 |
|
|
/*
|
118 |
|
|
* The bootstrap kernel entry code has set these up. Save them for
|
119 |
|
|
* a given CPU
|
120 |
|
|
*/
|
121 |
|
|
|
122 |
|
|
void smp_store_cpu_info(int id)
|
123 |
|
|
{
|
124 |
|
|
cpu_data[id].udelay_val = loops_per_sec; /* this is it on sparc. */
|
125 |
|
|
}
|
126 |
|
|
|
127 |
|
|
/*
|
128 |
|
|
* Architecture specific routine called by the kernel just before init is
|
129 |
|
|
* fired off. This allows the BP to have everything in order [we hope].
|
130 |
|
|
* At the end of this all the AP's will hit the system scheduling and off
|
131 |
|
|
* we go. Each AP will load the system gdt's and jump through the kernel
|
132 |
|
|
* init into idle(). At this point the scheduler will one day take over
|
133 |
|
|
* and give them jobs to do. smp_callin is a standard routine
|
134 |
|
|
* we use to track CPU's as they power up.
|
135 |
|
|
*/
|
136 |
|
|
|
137 |
|
|
void smp_commence(void)
|
138 |
|
|
{
|
139 |
|
|
/*
|
140 |
|
|
* Lets the callin's below out of their loop.
|
141 |
|
|
*/
|
142 |
|
|
local_flush_cache_all();
|
143 |
|
|
local_flush_tlb_all();
|
144 |
|
|
smp_commenced = 1;
|
145 |
|
|
local_flush_cache_all();
|
146 |
|
|
local_flush_tlb_all();
|
147 |
|
|
}
|
148 |
|
|
|
149 |
|
|
void smp_callin(void)
|
150 |
|
|
{
|
151 |
|
|
int cpuid = smp_processor_id();
|
152 |
|
|
|
153 |
|
|
sti();
|
154 |
|
|
local_flush_cache_all();
|
155 |
|
|
local_flush_tlb_all();
|
156 |
|
|
calibrate_delay();
|
157 |
|
|
smp_store_cpu_info(cpuid);
|
158 |
|
|
local_flush_cache_all();
|
159 |
|
|
local_flush_tlb_all();
|
160 |
|
|
cli();
|
161 |
|
|
|
162 |
|
|
/* Allow master to continue. */
|
163 |
|
|
swap((unsigned long *)&cpu_callin_map[cpuid], 1);
|
164 |
|
|
local_flush_cache_all();
|
165 |
|
|
local_flush_tlb_all();
|
166 |
|
|
while(!smp_commenced)
|
167 |
|
|
barrier();
|
168 |
|
|
local_flush_cache_all();
|
169 |
|
|
local_flush_tlb_all();
|
170 |
|
|
|
171 |
|
|
/* Fix idle thread fields. */
|
172 |
|
|
current->mm->mmap->vm_page_prot = PAGE_SHARED;
|
173 |
|
|
current->mm->mmap->vm_start = KERNBASE;
|
174 |
|
|
current->mm->mmap->vm_end = init_task.mm->mmap->vm_end;
|
175 |
|
|
|
176 |
|
|
local_flush_cache_all();
|
177 |
|
|
local_flush_tlb_all();
|
178 |
|
|
|
179 |
|
|
sti();
|
180 |
|
|
}
|
181 |
|
|
|
182 |
|
|
void cpu_panic(void)
|
183 |
|
|
{
|
184 |
|
|
printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
|
185 |
|
|
panic("SMP bolixed\n");
|
186 |
|
|
}
|
187 |
|
|
|
188 |
|
|
/*
|
189 |
|
|
* Cycle through the processors asking the PROM to start each one.
|
190 |
|
|
*/
|
191 |
|
|
|
192 |
|
|
extern struct prom_cpuinfo linux_cpus[NCPUS];
|
193 |
|
|
static struct linux_prom_registers penguin_ctable;
|
194 |
|
|
|
195 |
|
|
void smp_boot_cpus(void)
|
196 |
|
|
{
|
197 |
|
|
int cpucount = 0;
|
198 |
|
|
int i = 0;
|
199 |
|
|
|
200 |
|
|
printk("Entering SparclinuxMultiPenguin(SMP) Mode...\n");
|
201 |
|
|
|
202 |
|
|
penguin_ctable.which_io = 0;
|
203 |
|
|
penguin_ctable.phys_addr = (char *) srmmu_ctx_table_phys;
|
204 |
|
|
penguin_ctable.reg_size = 0;
|
205 |
|
|
|
206 |
|
|
sti();
|
207 |
|
|
cpu_present_map |= (1 << smp_processor_id());
|
208 |
|
|
cpu_present_map = 0;
|
209 |
|
|
for(i=0; i < linux_num_cpus; i++)
|
210 |
|
|
cpu_present_map |= (1<<i);
|
211 |
|
|
for(i=0; i < NR_CPUS; i++)
|
212 |
|
|
cpu_number_map[i] = -1;
|
213 |
|
|
for(i=0; i < NR_CPUS; i++)
|
214 |
|
|
cpu_logical_map[i] = -1;
|
215 |
|
|
mid_xlate[boot_cpu_id] = (linux_cpus[boot_cpu_id].mid & ~8);
|
216 |
|
|
cpu_number_map[boot_cpu_id] = 0;
|
217 |
|
|
cpu_logical_map[0] = boot_cpu_id;
|
218 |
|
|
active_kernel_processor = boot_cpu_id;
|
219 |
|
|
smp_store_cpu_info(boot_cpu_id);
|
220 |
|
|
set_irq_udt(0);
|
221 |
|
|
local_flush_cache_all();
|
222 |
|
|
if(linux_num_cpus == 1)
|
223 |
|
|
return; /* Not an MP box. */
|
224 |
|
|
for(i = 0; i < NR_CPUS; i++) {
|
225 |
|
|
if(i == boot_cpu_id)
|
226 |
|
|
continue;
|
227 |
|
|
|
228 |
|
|
if(cpu_present_map & (1 << i)) {
|
229 |
|
|
extern unsigned long sparc_cpu_startup;
|
230 |
|
|
unsigned long *entry = &sparc_cpu_startup;
|
231 |
|
|
int timeout;
|
232 |
|
|
|
233 |
|
|
/* See trampoline.S for details... */
|
234 |
|
|
entry += ((i-1) * 6);
|
235 |
|
|
|
236 |
|
|
/* whirrr, whirrr, whirrrrrrrrr... */
|
237 |
|
|
printk("Starting CPU %d at %p\n", i, entry);
|
238 |
|
|
mid_xlate[i] = (linux_cpus[i].mid & ~8);
|
239 |
|
|
local_flush_cache_all();
|
240 |
|
|
prom_startcpu(linux_cpus[i].prom_node,
|
241 |
|
|
&penguin_ctable, 0, (char *)entry);
|
242 |
|
|
|
243 |
|
|
/* wheee... it's going... */
|
244 |
|
|
for(timeout = 0; timeout < 5000000; timeout++) {
|
245 |
|
|
if(cpu_callin_map[i])
|
246 |
|
|
break;
|
247 |
|
|
udelay(100);
|
248 |
|
|
}
|
249 |
|
|
if(cpu_callin_map[i]) {
|
250 |
|
|
/* Another "Red Snapper". */
|
251 |
|
|
cpucount++;
|
252 |
|
|
cpu_number_map[i] = i;
|
253 |
|
|
cpu_logical_map[i] = i;
|
254 |
|
|
} else {
|
255 |
|
|
printk("Penguin %d is stuck in the bottle.\n", i);
|
256 |
|
|
}
|
257 |
|
|
}
|
258 |
|
|
if(!(cpu_callin_map[i])) {
|
259 |
|
|
cpu_present_map &= ~(1 << i);
|
260 |
|
|
cpu_number_map[i] = -1;
|
261 |
|
|
}
|
262 |
|
|
}
|
263 |
|
|
local_flush_cache_all();
|
264 |
|
|
if(cpucount == 0) {
|
265 |
|
|
printk("Error: only one Penguin found.\n");
|
266 |
|
|
cpu_present_map = (1 << smp_processor_id());
|
267 |
|
|
} else {
|
268 |
|
|
unsigned long bogosum = 0;
|
269 |
|
|
for(i = 0; i < NR_CPUS; i++) {
|
270 |
|
|
if(cpu_present_map & (1 << i))
|
271 |
|
|
bogosum += cpu_data[i].udelay_val;
|
272 |
|
|
}
|
273 |
|
|
printk("Total of %d Penguins activated (%lu.%02lu PenguinMIPS).\n",
|
274 |
|
|
cpucount + 1,
|
275 |
|
|
(bogosum + 2500)/500000,
|
276 |
|
|
((bogosum + 2500)/5000)%100);
|
277 |
|
|
smp_activated = 1;
|
278 |
|
|
smp_num_cpus = cpucount + 1;
|
279 |
|
|
}
|
280 |
|
|
smp_processors_ready = 1;
|
281 |
|
|
}
|
282 |
|
|
|
283 |
|
|
static inline void send_ipi(unsigned long target_map, int irq)
|
284 |
|
|
{
|
285 |
|
|
int i;
|
286 |
|
|
|
287 |
|
|
for(i = 0; i < 4; i++) {
|
288 |
|
|
if((1<<i) & target_map)
|
289 |
|
|
set_cpu_int(mid_xlate[i], irq);
|
290 |
|
|
}
|
291 |
|
|
}
|
292 |
|
|
|
293 |
|
|
/*
|
294 |
|
|
* A non wait message cannot pass data or cpu source info. This current
|
295 |
|
|
* setup is only safe because the kernel lock owner is the only person
|
296 |
|
|
* who can send a message.
|
297 |
|
|
*
|
298 |
|
|
* Wrapping this whole block in a spinlock is not the safe answer either.
|
299 |
|
|
* A processor may get stuck with irq's off waiting to send a message and
|
300 |
|
|
* thus not replying to the person spinning for a reply....
|
301 |
|
|
*
|
302 |
|
|
* In the end invalidate ought to be the NMI and a very very short
|
303 |
|
|
* function (to avoid the old IDE disk problems), and other messages sent
|
304 |
|
|
* with IRQ's enabled in a civilised fashion. That will also boost
|
305 |
|
|
* performance.
|
306 |
|
|
*/
|
307 |
|
|
|
308 |
|
|
static volatile int message_cpu = NO_PROC_ID;
|
309 |
|
|
|
310 |
|
|
void smp_message_pass(int target, int msg, unsigned long data, int wait)
|
311 |
|
|
{
|
312 |
|
|
unsigned long target_map;
|
313 |
|
|
int p = smp_processor_id();
|
314 |
|
|
int irq = 15;
|
315 |
|
|
int i;
|
316 |
|
|
|
317 |
|
|
/* Before processors have been placed into their initial
|
318 |
|
|
* patterns do not send messages.
|
319 |
|
|
*/
|
320 |
|
|
if(!smp_processors_ready)
|
321 |
|
|
return;
|
322 |
|
|
|
323 |
|
|
/* Skip the reschedule if we are waiting to clear a
|
324 |
|
|
* message at this time. The reschedule cannot wait
|
325 |
|
|
* but is not critical.
|
326 |
|
|
*/
|
327 |
|
|
if(msg == MSG_RESCHEDULE) {
|
328 |
|
|
irq = 13;
|
329 |
|
|
if(smp_cpu_in_msg[p])
|
330 |
|
|
return;
|
331 |
|
|
}
|
332 |
|
|
|
333 |
|
|
/* Sanity check we don't re-enter this across CPU's. Only the kernel
|
334 |
|
|
* lock holder may send messages. For a STOP_CPU we are bringing the
|
335 |
|
|
* entire box to the fastest halt we can.. A reschedule carries
|
336 |
|
|
* no data and can occur during a flush.. guess what panic
|
337 |
|
|
* I got to notice this bug...
|
338 |
|
|
*/
|
339 |
|
|
if(message_cpu != NO_PROC_ID && msg != MSG_STOP_CPU && msg != MSG_RESCHEDULE) {
|
340 |
|
|
printk("CPU #%d: Message pass %d but pass in progress by %d of %d\n",
|
341 |
|
|
smp_processor_id(),msg,message_cpu, smp_msg_id);
|
342 |
|
|
|
343 |
|
|
/* I don't know how to gracefully die so that debugging
|
344 |
|
|
* this doesn't completely eat up my filesystems...
|
345 |
|
|
* let's try this...
|
346 |
|
|
*/
|
347 |
|
|
smp_cpu_in_msg[p] = 0; /* In case we come back here... */
|
348 |
|
|
intr_count = 0; /* and so panic don't barf... */
|
349 |
|
|
smp_swap(&message_cpu, NO_PROC_ID); /* push the store buffer */
|
350 |
|
|
sti();
|
351 |
|
|
printk("spinning, please L1-A, type ctrace and send output to davem\n");
|
352 |
|
|
while(1)
|
353 |
|
|
barrier();
|
354 |
|
|
}
|
355 |
|
|
smp_swap(&message_cpu, smp_processor_id()); /* store buffers... */
|
356 |
|
|
|
357 |
|
|
/* We are busy. */
|
358 |
|
|
smp_cpu_in_msg[p]++;
|
359 |
|
|
|
360 |
|
|
/* Reschedule is currently special. */
|
361 |
|
|
if(msg != MSG_RESCHEDULE) {
|
362 |
|
|
smp_src_cpu = p;
|
363 |
|
|
smp_msg_id = msg;
|
364 |
|
|
smp_msg_data = data;
|
365 |
|
|
}
|
366 |
|
|
|
367 |
|
|
#if 0
|
368 |
|
|
printk("SMP message pass from cpu %d to cpu %d msg %d\n", p, target, msg);
|
369 |
|
|
#endif
|
370 |
|
|
|
371 |
|
|
/* Set the target requirement. */
|
372 |
|
|
for(i = 0; i < smp_num_cpus; i++)
|
373 |
|
|
swap((unsigned long *) &cpu_callin_map[i], 0);
|
374 |
|
|
if(target == MSG_ALL_BUT_SELF) {
|
375 |
|
|
target_map = (cpu_present_map & ~(1<<p));
|
376 |
|
|
swap((unsigned long *) &cpu_callin_map[p], 1);
|
377 |
|
|
} else if(target == MSG_ALL) {
|
378 |
|
|
target_map = cpu_present_map;
|
379 |
|
|
} else {
|
380 |
|
|
for(i = 0; i < smp_num_cpus; i++)
|
381 |
|
|
if(i != target)
|
382 |
|
|
swap((unsigned long *) &cpu_callin_map[i], 1);
|
383 |
|
|
target_map = (1<<target);
|
384 |
|
|
}
|
385 |
|
|
|
386 |
|
|
/* Fire it off. */
|
387 |
|
|
send_ipi(target_map, irq);
|
388 |
|
|
|
389 |
|
|
switch(wait) {
|
390 |
|
|
case 1:
|
391 |
|
|
for(i = 0; i < smp_num_cpus; i++)
|
392 |
|
|
while(!cpu_callin_map[i])
|
393 |
|
|
barrier();
|
394 |
|
|
break;
|
395 |
|
|
case 2:
|
396 |
|
|
for(i = 0; i < smp_num_cpus; i++)
|
397 |
|
|
while(smp_invalidate_needed[i])
|
398 |
|
|
barrier();
|
399 |
|
|
break;
|
400 |
|
|
case 3:
|
401 |
|
|
/* For cross calls we hold message_cpu and smp_cpu_in_msg[]
|
402 |
|
|
* until all processors disperse. Else we have _big_ problems.
|
403 |
|
|
*/
|
404 |
|
|
return;
|
405 |
|
|
}
|
406 |
|
|
smp_cpu_in_msg[p]--;
|
407 |
|
|
smp_swap(&message_cpu, NO_PROC_ID);
|
408 |
|
|
}
|
409 |
|
|
|
410 |
|
|
struct smp_funcall {
|
411 |
|
|
smpfunc_t func;
|
412 |
|
|
unsigned long arg1;
|
413 |
|
|
unsigned long arg2;
|
414 |
|
|
unsigned long arg3;
|
415 |
|
|
unsigned long arg4;
|
416 |
|
|
unsigned long arg5;
|
417 |
|
|
unsigned long processors_in[NR_CPUS]; /* Set when ipi entered. */
|
418 |
|
|
unsigned long processors_out[NR_CPUS]; /* Set when ipi exited. */
|
419 |
|
|
} ccall_info;
|
420 |
|
|
|
421 |
|
|
/* Returns failure code if for example any of the cpu's failed to respond
|
422 |
|
|
* within a certain timeout period.
|
423 |
|
|
*/
|
424 |
|
|
|
425 |
|
|
#define CCALL_TIMEOUT 5000000 /* enough for initial testing */
|
426 |
|
|
|
427 |
|
|
/* #define DEBUG_CCALL */
|
428 |
|
|
|
429 |
|
|
/* Some nice day when we really thread the kernel I'd like to synchronize
|
430 |
|
|
* this with either a broadcast conditional variable, a resource adaptive
|
431 |
|
|
* generic mutex, or a convoy semaphore scheme of some sort. No reason
|
432 |
|
|
* we can't let multiple processors in here if the appropriate locking
|
433 |
|
|
* is done. Note that such a scheme assumes we will have a
|
434 |
|
|
* prioritized ipi scheme using different software level irq's.
|
435 |
|
|
*/
|
436 |
|
|
void smp_cross_call(smpfunc_t func, unsigned long arg1, unsigned long arg2,
|
437 |
|
|
unsigned long arg3, unsigned long arg4, unsigned long arg5)
|
438 |
|
|
{
|
439 |
|
|
unsigned long me = smp_processor_id();
|
440 |
|
|
unsigned long flags;
|
441 |
|
|
int i, timeout;
|
442 |
|
|
|
443 |
|
|
#ifdef DEBUG_CCALL
|
444 |
|
|
printk("xc%d<", me);
|
445 |
|
|
#endif
|
446 |
|
|
if(smp_processors_ready) {
|
447 |
|
|
save_flags(flags); cli();
|
448 |
|
|
if(me != active_kernel_processor)
|
449 |
|
|
goto cross_call_not_master;
|
450 |
|
|
|
451 |
|
|
/* Init function glue. */
|
452 |
|
|
ccall_info.func = func;
|
453 |
|
|
ccall_info.arg1 = arg1;
|
454 |
|
|
ccall_info.arg2 = arg2;
|
455 |
|
|
ccall_info.arg3 = arg3;
|
456 |
|
|
ccall_info.arg4 = arg4;
|
457 |
|
|
ccall_info.arg5 = arg5;
|
458 |
|
|
|
459 |
|
|
/* Init receive/complete mapping. */
|
460 |
|
|
for(i = 0; i < smp_num_cpus; i++) {
|
461 |
|
|
ccall_info.processors_in[i] = 0;
|
462 |
|
|
ccall_info.processors_out[i] = 0;
|
463 |
|
|
}
|
464 |
|
|
ccall_info.processors_in[me] = 1;
|
465 |
|
|
ccall_info.processors_out[me] = 1;
|
466 |
|
|
|
467 |
|
|
/* Fire it off. */
|
468 |
|
|
smp_message_pass(MSG_ALL_BUT_SELF, MSG_CROSS_CALL, 0, 3);
|
469 |
|
|
|
470 |
|
|
/* For debugging purposes right now we can timeout
|
471 |
|
|
* on both callin and callexit.
|
472 |
|
|
*/
|
473 |
|
|
timeout = CCALL_TIMEOUT;
|
474 |
|
|
for(i = 0; i < smp_num_cpus; i++) {
|
475 |
|
|
while(!ccall_info.processors_in[i] && timeout-- > 0)
|
476 |
|
|
barrier();
|
477 |
|
|
if(!ccall_info.processors_in[i])
|
478 |
|
|
goto procs_time_out;
|
479 |
|
|
}
|
480 |
|
|
#ifdef DEBUG_CCALL
|
481 |
|
|
printk("I");
|
482 |
|
|
#endif
|
483 |
|
|
|
484 |
|
|
/* Run local copy. */
|
485 |
|
|
func(arg1, arg2, arg3, arg4, arg5);
|
486 |
|
|
|
487 |
|
|
/* Spin on proc dispersion. */
|
488 |
|
|
timeout = CCALL_TIMEOUT;
|
489 |
|
|
for(i = 0; i < smp_num_cpus; i++) {
|
490 |
|
|
while(!ccall_info.processors_out[i] && timeout-- > 0)
|
491 |
|
|
barrier();
|
492 |
|
|
if(!ccall_info.processors_out[i])
|
493 |
|
|
goto procs_time_out;
|
494 |
|
|
}
|
495 |
|
|
#ifdef DEBUG_CCALL
|
496 |
|
|
printk("O>");
|
497 |
|
|
#endif
|
498 |
|
|
/* See wait case 3 in smp_message_pass()... */
|
499 |
|
|
smp_cpu_in_msg[me]--;
|
500 |
|
|
smp_swap(&message_cpu, NO_PROC_ID); /* store buffers... */
|
501 |
|
|
restore_flags(flags);
|
502 |
|
|
return; /* made it... */
|
503 |
|
|
|
504 |
|
|
procs_time_out:
|
505 |
|
|
printk("smp: Wheee, penguin drops off the bus\n");
|
506 |
|
|
smp_cpu_in_msg[me]--;
|
507 |
|
|
message_cpu = NO_PROC_ID;
|
508 |
|
|
restore_flags(flags);
|
509 |
|
|
return; /* why me... why me... */
|
510 |
|
|
}
|
511 |
|
|
|
512 |
|
|
/* Just need to run local copy. */
|
513 |
|
|
func(arg1, arg2, arg3, arg4, arg5);
|
514 |
|
|
return;
|
515 |
|
|
|
516 |
|
|
cross_call_not_master:
|
517 |
|
|
printk("Cross call initiated by non master cpu\n");
|
518 |
|
|
printk("akp=%x me=%08lx\n", active_kernel_processor, me);
|
519 |
|
|
restore_flags(flags);
|
520 |
|
|
panic("penguin cross call");
|
521 |
|
|
}
|
522 |
|
|
|
523 |
|
|
void smp_flush_cache_all(void)
|
524 |
|
|
{ xc0((smpfunc_t) local_flush_cache_all); }
|
525 |
|
|
|
526 |
|
|
void smp_flush_tlb_all(void)
|
527 |
|
|
{ xc0((smpfunc_t) local_flush_tlb_all); }
|
528 |
|
|
|
529 |
|
|
void smp_flush_cache_mm(struct mm_struct *mm)
|
530 |
|
|
{
|
531 |
|
|
if(mm->context != NO_CONTEXT)
|
532 |
|
|
xc1((smpfunc_t) local_flush_cache_mm, (unsigned long) mm);
|
533 |
|
|
}
|
534 |
|
|
|
535 |
|
|
void smp_flush_tlb_mm(struct mm_struct *mm)
|
536 |
|
|
{
|
537 |
|
|
if(mm->context != NO_CONTEXT)
|
538 |
|
|
xc1((smpfunc_t) local_flush_tlb_mm, (unsigned long) mm);
|
539 |
|
|
}
|
540 |
|
|
|
541 |
|
|
void smp_flush_cache_range(struct mm_struct *mm, unsigned long start,
|
542 |
|
|
unsigned long end)
|
543 |
|
|
{
|
544 |
|
|
if(mm->context != NO_CONTEXT)
|
545 |
|
|
xc3((smpfunc_t) local_flush_cache_range, (unsigned long) mm,
|
546 |
|
|
start, end);
|
547 |
|
|
}
|
548 |
|
|
|
549 |
|
|
void smp_flush_tlb_range(struct mm_struct *mm, unsigned long start,
|
550 |
|
|
unsigned long end)
|
551 |
|
|
{
|
552 |
|
|
if(mm->context != NO_CONTEXT)
|
553 |
|
|
xc3((smpfunc_t) local_flush_tlb_range, (unsigned long) mm,
|
554 |
|
|
start, end);
|
555 |
|
|
}
|
556 |
|
|
|
557 |
|
|
void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
|
558 |
|
|
{ xc2((smpfunc_t) local_flush_cache_page, (unsigned long) vma, page); }
|
559 |
|
|
|
560 |
|
|
void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
|
561 |
|
|
{ xc2((smpfunc_t) local_flush_tlb_page, (unsigned long) vma, page); }
|
562 |
|
|
|
563 |
|
|
void smp_flush_page_to_ram(unsigned long page)
|
564 |
|
|
{ xc1((smpfunc_t) local_flush_page_to_ram, page); }
|
565 |
|
|
|
566 |
|
|
/* Reschedule call back. */
|
567 |
|
|
void smp_reschedule_irq(void)
|
568 |
|
|
{
|
569 |
|
|
if(smp_processor_id() != active_kernel_processor)
|
570 |
|
|
panic("SMP Reschedule on CPU #%d, but #%d is active.\n",
|
571 |
|
|
smp_processor_id(), active_kernel_processor);
|
572 |
|
|
|
573 |
|
|
need_resched=1;
|
574 |
|
|
}
|
575 |
|
|
|
576 |
|
|
/* XXX FIXME: this still doesn't work right... XXX */
|
577 |
|
|
|
578 |
|
|
/* #define DEBUG_CAPTURE */
|
579 |
|
|
|
580 |
|
|
static volatile unsigned long release = 1;
|
581 |
|
|
static volatile int capture_level = 0;
|
582 |
|
|
|
583 |
|
|
void smp_capture(void)
|
584 |
|
|
{
|
585 |
|
|
unsigned long flags;
|
586 |
|
|
|
587 |
|
|
if(!smp_activated || !smp_commenced)
|
588 |
|
|
return;
|
589 |
|
|
#ifdef DEBUG_CAPTURE
|
590 |
|
|
printk("C<%d>", smp_processor_id());
|
591 |
|
|
#endif
|
592 |
|
|
save_flags(flags); cli();
|
593 |
|
|
if(!capture_level) {
|
594 |
|
|
release = 0;
|
595 |
|
|
smp_message_pass(MSG_ALL_BUT_SELF, MSG_CAPTURE, 0, 1);
|
596 |
|
|
}
|
597 |
|
|
capture_level++;
|
598 |
|
|
restore_flags(flags);
|
599 |
|
|
}
|
600 |
|
|
|
601 |
|
|
void smp_release(void)
|
602 |
|
|
{
|
603 |
|
|
unsigned long flags;
|
604 |
|
|
int i;
|
605 |
|
|
|
606 |
|
|
if(!smp_activated || !smp_commenced)
|
607 |
|
|
return;
|
608 |
|
|
#ifdef DEBUG_CAPTURE
|
609 |
|
|
printk("R<%d>", smp_processor_id());
|
610 |
|
|
#endif
|
611 |
|
|
save_flags(flags); cli();
|
612 |
|
|
if(!(capture_level - 1)) {
|
613 |
|
|
release = 1;
|
614 |
|
|
for(i = 0; i < smp_num_cpus; i++)
|
615 |
|
|
while(cpu_callin_map[i])
|
616 |
|
|
barrier();
|
617 |
|
|
}
|
618 |
|
|
capture_level -= 1;
|
619 |
|
|
restore_flags(flags);
|
620 |
|
|
}
|
621 |
|
|
|
622 |
|
|
/* Park a processor, we must watch for more IPI's to invalidate
|
623 |
|
|
* our cache's and TLB's. And also note we can only wait for
|
624 |
|
|
* "lock-less" IPI's and process those, as a result of such IPI's
|
625 |
|
|
* being non-maskable traps being on is enough to receive them.
|
626 |
|
|
*/
|
627 |
|
|
|
628 |
|
|
/* Message call back. */
|
629 |
|
|
void smp_message_irq(void)
|
630 |
|
|
{
|
631 |
|
|
int i=smp_processor_id();
|
632 |
|
|
|
633 |
|
|
switch(smp_msg_id) {
|
634 |
|
|
case MSG_CROSS_CALL:
|
635 |
|
|
/* Do it to it. */
|
636 |
|
|
ccall_info.processors_in[i] = 1;
|
637 |
|
|
ccall_info.func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3,
|
638 |
|
|
ccall_info.arg4, ccall_info.arg5);
|
639 |
|
|
ccall_info.processors_out[i] = 1;
|
640 |
|
|
break;
|
641 |
|
|
|
642 |
|
|
/*
|
643 |
|
|
* Halt other CPU's for a panic or reboot
|
644 |
|
|
*/
|
645 |
|
|
case MSG_STOP_CPU:
|
646 |
|
|
sti();
|
647 |
|
|
while(1)
|
648 |
|
|
barrier();
|
649 |
|
|
|
650 |
|
|
default:
|
651 |
|
|
printk("CPU #%d sent invalid cross CPU message to CPU #%d: %X(%lX).\n",
|
652 |
|
|
smp_src_cpu,smp_processor_id(),smp_msg_id,smp_msg_data);
|
653 |
|
|
break;
|
654 |
|
|
}
|
655 |
|
|
}
|