OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [arch/] [ia64/] [kernel/] [mca.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 * File:        mca.c
3
 * Purpose:     Generic MCA handling layer
4
 *
5
 * Updated for latest kernel
6
 * Copyright (C) 2003 Hewlett-Packard Co
7
 *      David Mosberger-Tang <davidm@hpl.hp.com>
8
 *
9
 * Copyright (C) 2002 Dell Inc.
10
 * Copyright (C) Matt Domsch (Matt_Domsch@dell.com)
11
 *
12
 * Copyright (C) 2002 Intel
13
 * Copyright (C) Jenna Hall (jenna.s.hall@intel.com)
14
 *
15
 * Copyright (C) 2001 Intel
16
 * Copyright (C) Fred Lewis (frederick.v.lewis@intel.com)
17
 *
18
 * Copyright (C) 2000 Intel
19
 * Copyright (C) Chuck Fleckenstein (cfleck@co.intel.com)
20
 *
21
 * Copyright (C) 1999, 2004 Silicon Graphics, Inc.
22
 * Copyright (C) Vijay Chander(vijay@engr.sgi.com)
23
 *
24
 * 03/04/15 D. Mosberger Added INIT backtrace support.
25
 * 02/03/25 M. Domsch   GUID cleanups
26
 *
27
 * 02/01/04 J. Hall     Aligned MCA stack to 16 bytes, added platform vs. CPU
28
 *                      error flag, set SAL default return values, changed
29
 *                      error record structure to linked list, added init call
30
 *                      to sal_get_state_info_size().
31
 *
32
 * 01/01/03 F. Lewis    Added setup of CMCI and CPEI IRQs, logging of corrected
33
 *                      platform errors, completed code for logging of
34
 *                      corrected & uncorrected machine check errors, and
35
 *                      updated for conformance with Nov. 2000 revision of the
36
 *                      SAL 3.0 spec.
37
 * 00/03/29 C. Fleckenstein  Fixed PAL/SAL update issues, began MCA bug fixes, logging issues,
38
 *                           added min save state dump, added INIT handler.
39
 *
40
 * 2003-12-08 Keith Owens <kaos@sgi.com>
41
 *            smp_call_function() must not be called from interrupt context (can
42
 *            deadlock on tasklist_lock).  Use keventd to call smp_call_function().
43
 *
44
 * 2004-02-01 Keith Owens <kaos@sgi.com>
45
 *            Avoid deadlock when using printk() for MCA and INIT records.
46
 *            Delete all record printing code, moved to salinfo_decode in user space.
47
 *            Mark variables and functions static where possible.
48
 *            Delete dead variables and functions.
49
 *            Reorder to remove the need for forward declarations and to consolidate
50
 *            related code.
51
 */
52
#include <linux/config.h>
53
#include <linux/types.h>
54
#include <linux/init.h>
55
#include <linux/sched.h>
56
#include <linux/interrupt.h>
57
#include <linux/irq.h>
58
#include <linux/smp_lock.h>
59
#include <linux/bootmem.h>
60
#include <linux/acpi.h>
61
#include <linux/timer.h>
62
#include <linux/module.h>
63
#include <linux/kernel.h>
64
#include <linux/smp.h>
65
#include <linux/tqueue.h>
66
 
67
#include <asm/delay.h>
68
#include <asm/machvec.h>
69
#include <asm/page.h>
70
#include <asm/ptrace.h>
71
#include <asm/system.h>
72
#include <asm/sal.h>
73
#include <asm/mca.h>
74
 
75
#include <asm/irq.h>
76
#include <asm/hw_irq.h>
77
 
78
#if defined(IA64_MCA_DEBUG_INFO)
79
# define IA64_MCA_DEBUG(fmt...) printk(fmt)
80
#else
81
# define IA64_MCA_DEBUG(fmt...)
82
#endif
83
 
84
extern void show_stack(struct task_struct *);
85
 
86
typedef struct ia64_fptr {
87
        unsigned long fp;
88
        unsigned long gp;
89
} ia64_fptr_t;
90
 
91
/* Used by mca_asm.S */
92
ia64_mca_sal_to_os_state_t      ia64_sal_to_os_handoff_state;
93
ia64_mca_os_to_sal_state_t      ia64_os_to_sal_handoff_state;
94
u64                             ia64_mca_proc_state_dump[512];
95
u64                             ia64_mca_stack[1024] __attribute__((aligned(16)));
96
u64                             ia64_mca_stackframe[32];
97
u64                             ia64_mca_bspstore[1024];
98
u64                             ia64_init_stack[INIT_TASK_SIZE/8] __attribute__((aligned(16)));
99
u64                             ia64_mca_serialize;
100
 
101
/* In mca_asm.S */
102
extern void                     ia64_monarch_init_handler (void);
103
extern void                     ia64_slave_init_handler (void);
104
 
105
static ia64_mc_info_t           ia64_mc_info;
106
 
107
extern struct hw_interrupt_type irq_type_iosapic_level;
108
 
109
struct ia64_mca_tlb_info ia64_mca_tlb_list[NR_CPUS];
110
 
111
#define MAX_CPE_POLL_INTERVAL (15*60*HZ) /* 15 minutes */
112
#define MIN_CPE_POLL_INTERVAL (2*60*HZ)  /* 2 minutes */
113
#define CMC_POLL_INTERVAL     (1*60*HZ)  /* 1 minute */
114
#define CMC_HISTORY_LENGTH    5
115
 
116
static struct timer_list cpe_poll_timer;
117
static struct timer_list cmc_poll_timer;
118
/*
119
 * This variable tells whether we are currently in polling mode.
120
 * Start with this in the wrong state so we won't play w/ timers
121
 * before the system is ready.
122
 */
123
static int cmc_polling_enabled = 1;
124
 
125
/*
126
 * Clearing this variable prevents CPE polling from getting activated
127
 * in mca_late_init.  Use it if your system doesn't provide a CPEI,
128
 * but encounters problems retrieving CPE logs.  This should only be
129
 * necessary for debugging.
130
 */
131
static int cpe_poll_enabled = 1;
132
 
133
extern void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe);
134
 
135
static struct tq_struct cmc_disable_tq, cmc_enable_tq;
136
 
137
/*
138
 * IA64_MCA log support
139
 */
140
#define IA64_MAX_LOGS           2       /* Double-buffering for nested MCAs */
141
#define IA64_MAX_LOG_TYPES      4   /* MCA, INIT, CMC, CPE */
142
 
143
typedef struct ia64_state_log_s
144
{
145
        spinlock_t      isl_lock;
146
        int             isl_index;
147
        unsigned long   isl_count;
148
        ia64_err_rec_t  *isl_log[IA64_MAX_LOGS]; /* need space to store header + error log */
149
} ia64_state_log_t;
150
 
151
static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES];
152
 
153
#define IA64_LOG_ALLOCATE(it, size) \
154
        {ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = \
155
                (ia64_err_rec_t *)alloc_bootmem(size); \
156
        ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = \
157
                (ia64_err_rec_t *)alloc_bootmem(size);}
158
#define IA64_LOG_LOCK_INIT(it) spin_lock_init(&ia64_state_log[it].isl_lock)
159
#define IA64_LOG_LOCK(it)      spin_lock_irqsave(&ia64_state_log[it].isl_lock, s)
160
#define IA64_LOG_UNLOCK(it)    spin_unlock_irqrestore(&ia64_state_log[it].isl_lock,s)
161
#define IA64_LOG_NEXT_INDEX(it)    ia64_state_log[it].isl_index
162
#define IA64_LOG_CURR_INDEX(it)    1 - ia64_state_log[it].isl_index
163
#define IA64_LOG_INDEX_INC(it) \
164
    {ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index; \
165
    ia64_state_log[it].isl_count++;}
166
#define IA64_LOG_INDEX_DEC(it) \
167
    ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index
168
#define IA64_LOG_NEXT_BUFFER(it)   (void *)((ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)]))
169
#define IA64_LOG_CURR_BUFFER(it)   (void *)((ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)]))
170
#define IA64_LOG_COUNT(it)         ia64_state_log[it].isl_count
171
 
172
/*
173
 * ia64_log_init
174
 *      Reset the OS ia64 log buffer
175
 * Inputs   :   info_type   (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
176
 * Outputs      :       None
177
 */
178
static void
179
ia64_log_init(int sal_info_type)
180
{
181
        u64     max_size = 0;
182
 
183
        IA64_LOG_NEXT_INDEX(sal_info_type) = 0;
184
        IA64_LOG_LOCK_INIT(sal_info_type);
185
 
186
        // SAL will tell us the maximum size of any error record of this type
187
        max_size = ia64_sal_get_state_info_size(sal_info_type);
188
        if (!max_size)
189
                /* alloc_bootmem() doesn't like zero-sized allocations! */
190
                return;
191
 
192
        // set up OS data structures to hold error info
193
        IA64_LOG_ALLOCATE(sal_info_type, max_size);
194
        memset(IA64_LOG_CURR_BUFFER(sal_info_type), 0, max_size);
195
        memset(IA64_LOG_NEXT_BUFFER(sal_info_type), 0, max_size);
196
}
197
 
198
/*
199
 * ia64_log_get
200
 *
201
 *      Get the current MCA log from SAL and copy it into the OS log buffer.
202
 *
203
 *  Inputs  :   info_type   (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
204
 *              irq_safe    whether you can use printk at this point
205
 *  Outputs :   size        (total record length)
206
 *              *buffer     (ptr to error record)
207
 *
208
 */
209
static u64
210
ia64_log_get(int sal_info_type, u8 **buffer, int irq_safe)
211
{
212
        sal_log_record_header_t     *log_buffer;
213
        u64                         total_len = 0;
214
        int                         s;
215
 
216
        IA64_LOG_LOCK(sal_info_type);
217
 
218
        /* Get the process state information */
219
        log_buffer = IA64_LOG_NEXT_BUFFER(sal_info_type);
220
 
221
        total_len = ia64_sal_get_state_info(sal_info_type, (u64 *)log_buffer);
222
 
223
        if (total_len) {
224
                IA64_LOG_INDEX_INC(sal_info_type);
225
                IA64_LOG_UNLOCK(sal_info_type);
226
                if (irq_safe) {
227
                        IA64_MCA_DEBUG("%s: SAL error record type %d retrieved. "
228
                                       "Record length = %ld\n", __FUNCTION__, sal_info_type, total_len);
229
                }
230
                *buffer = (u8 *) log_buffer;
231
                return total_len;
232
        } else {
233
                IA64_LOG_UNLOCK(sal_info_type);
234
                return 0;
235
        }
236
}
237
 
238
/*
239
 *  ia64_mca_log_sal_error_record
240
 *
241
 *  This function retrieves a specified error record type from SAL
242
 *  and wakes up any processes waiting for error records.
243
 *
244
 *  Inputs  :   sal_info_type   (Type of error record MCA/CMC/CPE/INIT)
245
 */
246
static void
247
ia64_mca_log_sal_error_record(int sal_info_type)
248
{
249
        u8 *buffer;
250
        u64 size;
251
        int irq_safe = sal_info_type != SAL_INFO_TYPE_MCA && sal_info_type != SAL_INFO_TYPE_INIT;
252
        static const char * const rec_name[] = { "MCA", "INIT", "CMC", "CPE" };
253
 
254
        size = ia64_log_get(sal_info_type, &buffer, irq_safe);
255
        if (!size)
256
                return;
257
 
258
        salinfo_log_wakeup(sal_info_type, buffer, size, irq_safe);
259
 
260
        if (irq_safe)
261
                printk(KERN_INFO "CPU %d: SAL log contains %s error record\n",
262
                        smp_processor_id(),
263
                        sal_info_type < ARRAY_SIZE(rec_name) ? rec_name[sal_info_type] : "UNKNOWN");
264
 
265
        /* Clear logs from corrected errors in case there's no user-level logger */
266
        if (sal_info_type == SAL_INFO_TYPE_CPE || sal_info_type == SAL_INFO_TYPE_CMC)
267
                ia64_sal_clear_state_info(sal_info_type);
268
}
269
 
270
/*
271
 * platform dependent error handling
272
 */
273
#ifndef PLATFORM_MCA_HANDLERS
274
 
275
static void
276
ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs)
277
{
278
        IA64_MCA_DEBUG("%s: received interrupt. CPU:%d vector = %#x\n",
279
                       __FUNCTION__, smp_processor_id(), cpe_irq);
280
 
281
        /* SAL spec states this should run w/ interrupts enabled */
282
        local_irq_enable();
283
 
284
        /* Get the CMC error record and log it */
285
        ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE);
286
}
287
 
288
#define print_symbol(fmt, addr) printk(fmt, "(no symbol)");
289
 
290
static void
291
show_min_state (pal_min_state_area_t *minstate)
292
{
293
        u64 iip = minstate->pmsa_iip + ((struct ia64_psr *)(&minstate->pmsa_ipsr))->ri;
294
        u64 xip = minstate->pmsa_xip + ((struct ia64_psr *)(&minstate->pmsa_xpsr))->ri;
295
 
296
        printk("NaT bits\t%016lx\n", minstate->pmsa_nat_bits);
297
        printk("pr\t\t%016lx\n", minstate->pmsa_pr);
298
        printk("b0\t\t%016lx ", minstate->pmsa_br0); print_symbol("%s\n", minstate->pmsa_br0);
299
        printk("ar.rsc\t\t%016lx\n", minstate->pmsa_rsc);
300
        printk("cr.iip\t\t%016lx ", iip); print_symbol("%s\n", iip);
301
        printk("cr.ipsr\t\t%016lx\n", minstate->pmsa_ipsr);
302
        printk("cr.ifs\t\t%016lx\n", minstate->pmsa_ifs);
303
        printk("xip\t\t%016lx ", xip); print_symbol("%s\n", xip);
304
        printk("xpsr\t\t%016lx\n", minstate->pmsa_xpsr);
305
        printk("xfs\t\t%016lx\n", minstate->pmsa_xfs);
306
        printk("b1\t\t%016lx ", minstate->pmsa_br1);
307
        print_symbol("%s\n", minstate->pmsa_br1);
308
 
309
        printk("\nstatic registers r0-r15:\n");
310
        printk(" r0- 3 %016lx %016lx %016lx %016lx\n",
311
               0UL, minstate->pmsa_gr[0], minstate->pmsa_gr[1], minstate->pmsa_gr[2]);
312
        printk(" r4- 7 %016lx %016lx %016lx %016lx\n",
313
               minstate->pmsa_gr[3], minstate->pmsa_gr[4],
314
               minstate->pmsa_gr[5], minstate->pmsa_gr[6]);
315
        printk(" r8-11 %016lx %016lx %016lx %016lx\n",
316
               minstate->pmsa_gr[7], minstate->pmsa_gr[8],
317
               minstate->pmsa_gr[9], minstate->pmsa_gr[10]);
318
        printk("r12-15 %016lx %016lx %016lx %016lx\n",
319
               minstate->pmsa_gr[11], minstate->pmsa_gr[12],
320
               minstate->pmsa_gr[13], minstate->pmsa_gr[14]);
321
 
322
        printk("\nbank 0:\n");
323
        printk("r16-19 %016lx %016lx %016lx %016lx\n",
324
               minstate->pmsa_bank0_gr[0], minstate->pmsa_bank0_gr[1],
325
               minstate->pmsa_bank0_gr[2], minstate->pmsa_bank0_gr[3]);
326
        printk("r20-23 %016lx %016lx %016lx %016lx\n",
327
               minstate->pmsa_bank0_gr[4], minstate->pmsa_bank0_gr[5],
328
               minstate->pmsa_bank0_gr[6], minstate->pmsa_bank0_gr[7]);
329
        printk("r24-27 %016lx %016lx %016lx %016lx\n",
330
               minstate->pmsa_bank0_gr[8], minstate->pmsa_bank0_gr[9],
331
               minstate->pmsa_bank0_gr[10], minstate->pmsa_bank0_gr[11]);
332
        printk("r28-31 %016lx %016lx %016lx %016lx\n",
333
               minstate->pmsa_bank0_gr[12], minstate->pmsa_bank0_gr[13],
334
               minstate->pmsa_bank0_gr[14], minstate->pmsa_bank0_gr[15]);
335
 
336
        printk("\nbank 1:\n");
337
        printk("r16-19 %016lx %016lx %016lx %016lx\n",
338
               minstate->pmsa_bank1_gr[0], minstate->pmsa_bank1_gr[1],
339
               minstate->pmsa_bank1_gr[2], minstate->pmsa_bank1_gr[3]);
340
        printk("r20-23 %016lx %016lx %016lx %016lx\n",
341
               minstate->pmsa_bank1_gr[4], minstate->pmsa_bank1_gr[5],
342
               minstate->pmsa_bank1_gr[6], minstate->pmsa_bank1_gr[7]);
343
        printk("r24-27 %016lx %016lx %016lx %016lx\n",
344
               minstate->pmsa_bank1_gr[8], minstate->pmsa_bank1_gr[9],
345
               minstate->pmsa_bank1_gr[10], minstate->pmsa_bank1_gr[11]);
346
        printk("r28-31 %016lx %016lx %016lx %016lx\n",
347
               minstate->pmsa_bank1_gr[12], minstate->pmsa_bank1_gr[13],
348
               minstate->pmsa_bank1_gr[14], minstate->pmsa_bank1_gr[15]);
349
}
350
 
351
static void
352
fetch_min_state (pal_min_state_area_t *ms, struct pt_regs *pt, struct switch_stack *sw)
353
{
354
        u64 *dst_banked, *src_banked, bit, shift, nat_bits;
355
        int i;
356
 
357
        /*
358
         * First, update the pt-regs and switch-stack structures with the contents stored
359
         * in the min-state area:
360
         */
361
        if (((struct ia64_psr *) &ms->pmsa_ipsr)->ic == 0) {
362
                pt->cr_ipsr = ms->pmsa_xpsr;
363
                pt->cr_iip = ms->pmsa_xip;
364
                pt->cr_ifs = ms->pmsa_xfs;
365
        } else {
366
                pt->cr_ipsr = ms->pmsa_ipsr;
367
                pt->cr_iip = ms->pmsa_iip;
368
                pt->cr_ifs = ms->pmsa_ifs;
369
        }
370
        pt->ar_rsc = ms->pmsa_rsc;
371
        pt->pr = ms->pmsa_pr;
372
        pt->r1 = ms->pmsa_gr[0];
373
        pt->r2 = ms->pmsa_gr[1];
374
        pt->r3 = ms->pmsa_gr[2];
375
        sw->r4 = ms->pmsa_gr[3];
376
        sw->r5 = ms->pmsa_gr[4];
377
        sw->r6 = ms->pmsa_gr[5];
378
        sw->r7 = ms->pmsa_gr[6];
379
        pt->r8 = ms->pmsa_gr[7];
380
        pt->r9 = ms->pmsa_gr[8];
381
        pt->r10 = ms->pmsa_gr[9];
382
        pt->r11 = ms->pmsa_gr[10];
383
        pt->r12 = ms->pmsa_gr[11];
384
        pt->r13 = ms->pmsa_gr[12];
385
        pt->r14 = ms->pmsa_gr[13];
386
        pt->r15 = ms->pmsa_gr[14];
387
        dst_banked = &pt->r16;          /* r16-r31 are contiguous in struct pt_regs */
388
        src_banked = ms->pmsa_bank1_gr;
389
        for (i = 0; i < 16; ++i)
390
                dst_banked[i] = src_banked[i];
391
        pt->b0 = ms->pmsa_br0;
392
        sw->b1 = ms->pmsa_br1;
393
 
394
        /* construct the NaT bits for the pt-regs structure: */
395
#       define PUT_NAT_BIT(dst, addr)                                   \
396
        do {                                                            \
397
                bit = nat_bits & 1; nat_bits >>= 1;                     \
398
                shift = ((unsigned long) addr >> 3) & 0x3f;             \
399
                dst = ((dst) & ~(1UL << shift)) | (bit << shift);       \
400
        } while (0)
401
 
402
        /* Rotate the saved NaT bits such that bit 0 corresponds to pmsa_gr[0]: */
403
        shift = ((unsigned long) &ms->pmsa_gr[0] >> 3) & 0x3f;
404
        nat_bits = (ms->pmsa_nat_bits >> shift) | (ms->pmsa_nat_bits << (64 - shift));
405
 
406
        PUT_NAT_BIT(sw->caller_unat, &pt->r1);
407
        PUT_NAT_BIT(sw->caller_unat, &pt->r2);
408
        PUT_NAT_BIT(sw->caller_unat, &pt->r3);
409
        PUT_NAT_BIT(sw->ar_unat, &sw->r4);
410
        PUT_NAT_BIT(sw->ar_unat, &sw->r5);
411
        PUT_NAT_BIT(sw->ar_unat, &sw->r6);
412
        PUT_NAT_BIT(sw->ar_unat, &sw->r7);
413
        PUT_NAT_BIT(sw->caller_unat, &pt->r8);  PUT_NAT_BIT(sw->caller_unat, &pt->r9);
414
        PUT_NAT_BIT(sw->caller_unat, &pt->r10); PUT_NAT_BIT(sw->caller_unat, &pt->r11);
415
        PUT_NAT_BIT(sw->caller_unat, &pt->r12); PUT_NAT_BIT(sw->caller_unat, &pt->r13);
416
        PUT_NAT_BIT(sw->caller_unat, &pt->r14); PUT_NAT_BIT(sw->caller_unat, &pt->r15);
417
        nat_bits >>= 16;        /* skip over bank0 NaT bits */
418
        PUT_NAT_BIT(sw->caller_unat, &pt->r16); PUT_NAT_BIT(sw->caller_unat, &pt->r17);
419
        PUT_NAT_BIT(sw->caller_unat, &pt->r18); PUT_NAT_BIT(sw->caller_unat, &pt->r19);
420
        PUT_NAT_BIT(sw->caller_unat, &pt->r20); PUT_NAT_BIT(sw->caller_unat, &pt->r21);
421
        PUT_NAT_BIT(sw->caller_unat, &pt->r22); PUT_NAT_BIT(sw->caller_unat, &pt->r23);
422
        PUT_NAT_BIT(sw->caller_unat, &pt->r24); PUT_NAT_BIT(sw->caller_unat, &pt->r25);
423
        PUT_NAT_BIT(sw->caller_unat, &pt->r26); PUT_NAT_BIT(sw->caller_unat, &pt->r27);
424
        PUT_NAT_BIT(sw->caller_unat, &pt->r28); PUT_NAT_BIT(sw->caller_unat, &pt->r29);
425
        PUT_NAT_BIT(sw->caller_unat, &pt->r30); PUT_NAT_BIT(sw->caller_unat, &pt->r31);
426
}
427
 
428
static void
429
init_handler_platform (pal_min_state_area_t *ms,
430
                       struct pt_regs *pt, struct switch_stack *sw)
431
{
432
        struct unw_frame_info info;
433
 
434
        /* if a kernel debugger is available call it here else just dump the registers */
435
 
436
        /*
437
         * Wait for a bit.  On some machines (e.g., HP's zx2000 and zx6000, INIT can be
438
         * generated via the BMC's command-line interface, but since the console is on the
439
         * same serial line, the user will need some time to switch out of the BMC before
440
         * the dump begins.
441
         */
442
        printk("Delaying for 5 seconds...\n");
443
        udelay(5*1000000);
444
        show_min_state(ms);
445
 
446
        printk("Backtrace of current task (pid %d, %s)\n", current->pid, current->comm);
447
        fetch_min_state(ms, pt, sw);
448
        unw_init_from_interruption(&info, current, pt, sw);
449
        ia64_do_show_stack(&info, NULL);
450
 
451
#ifdef CONFIG_SMP
452
        /* read_trylock() would be handy... */
453
        if (!tasklist_lock.write_lock)
454
                read_lock(&tasklist_lock);
455
#endif
456
        {
457
                struct task_struct *t;
458
                for_each_task(t) {
459
                        if (t == current)
460
                                continue;
461
 
462
                        printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm);
463
                        show_stack(t);
464
                }
465
        }
466
#ifdef CONFIG_SMP
467
        if (!tasklist_lock.write_lock)
468
                read_unlock(&tasklist_lock);
469
#endif
470
 
471
        printk("\nINIT dump complete.  Please reboot now.\n");
472
        while (1);                      /* hang city if no debugger */
473
}
474
 
475
#ifdef CONFIG_ACPI
476
/*
477
 * ia64_mca_register_cpev
478
 *
479
 *  Register the corrected platform error vector with SAL.
480
 *
481
 *  Inputs
482
 *      cpev        Corrected Platform Error Vector number
483
 *
484
 *  Outputs
485
 *      None
486
 */
487
static void
488
ia64_mca_register_cpev (int cpev)
489
{
490
        /* Register the CPE interrupt vector with SAL */
491
        struct ia64_sal_retval isrv;
492
 
493
        isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_CPE_INT, SAL_MC_PARAM_MECHANISM_INT, cpev, 0, 0);
494
        if (isrv.status) {
495
                printk(KERN_ERR "Failed to register Corrected Platform "
496
                       "Error interrupt vector with SAL (status %ld)\n", isrv.status);
497
                return;
498
        }
499
 
500
        IA64_MCA_DEBUG("%s: corrected platform error "
501
                       "vector %#x setup and enabled\n", __FUNCTION__, cpev);
502
}
503
#endif /* CONFIG_ACPI */
504
 
505
#endif /* PLATFORM_MCA_HANDLERS */
506
 
507
/*
508
 * ia64_mca_cmc_vector_setup
509
 *
510
 *  Setup the corrected machine check vector register in the processor and
511
 *  unmask interrupt.  This function is invoked on a per-processor basis.
512
 *
513
 * Inputs
514
 *      None
515
 *
516
 * Outputs
517
 *      None
518
 */
519
void
520
ia64_mca_cmc_vector_setup (void)
521
{
522
        cmcv_reg_t      cmcv;
523
 
524
        cmcv.cmcv_regval        = 0;
525
        cmcv.cmcv_mask          = 0;        /* Unmask/enable interrupt */
526
        cmcv.cmcv_vector        = IA64_CMC_VECTOR;
527
        ia64_set_cmcv(cmcv.cmcv_regval);
528
 
529
        IA64_MCA_DEBUG("%s: CPU %d corrected "
530
                       "machine check vector %#x setup and enabled.\n",
531
                       __FUNCTION__, smp_processor_id(), IA64_CMC_VECTOR);
532
 
533
        IA64_MCA_DEBUG("%s: CPU %d CMCV = %#016lx\n",
534
                       __FUNCTION__, smp_processor_id(), ia64_get_cmcv());
535
}
536
 
537
/*
538
 * ia64_mca_cmc_vector_disable
539
 *
540
 *  Mask the corrected machine check vector register in the processor.
541
 *  This function is invoked on a per-processor basis.
542
 *
543
 * Inputs
544
 *      dummy(unused)
545
 *
546
 * Outputs
547
 *      None
548
 */
549
static void
550
ia64_mca_cmc_vector_disable (void *dummy)
551
{
552
        cmcv_reg_t      cmcv;
553
 
554
        cmcv = (cmcv_reg_t)ia64_get_cmcv();
555
 
556
        cmcv.cmcv_mask = 1; /* Mask/disable interrupt */
557
        ia64_set_cmcv(cmcv.cmcv_regval);
558
 
559
        IA64_MCA_DEBUG("%s: CPU %d corrected "
560
                       "machine check vector %#x disabled.\n",
561
                       __FUNCTION__, smp_processor_id(), cmcv.cmcv_vector);
562
}
563
 
564
/*
565
 * ia64_mca_cmc_vector_enable
566
 *
567
 *  Unmask the corrected machine check vector register in the processor.
568
 *  This function is invoked on a per-processor basis.
569
 *
570
 * Inputs
571
 *      dummy(unused)
572
 *
573
 * Outputs
574
 *      None
575
 */
576
static void
577
ia64_mca_cmc_vector_enable (void *dummy)
578
{
579
        cmcv_reg_t      cmcv;
580
 
581
        cmcv = (cmcv_reg_t)ia64_get_cmcv();
582
 
583
        cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */
584
        ia64_set_cmcv(cmcv.cmcv_regval);
585
 
586
        IA64_MCA_DEBUG("%s: CPU %d corrected "
587
                       "machine check vector %#x enabled.\n",
588
                       __FUNCTION__, smp_processor_id(), cmcv.cmcv_vector);
589
}
590
 
591
/*
592
 * ia64_mca_cmc_vector_disable_keventd
593
 *
594
 * Called via keventd (smp_call_function() is not safe in interrupt context) to
595
 * disable the cmc interrupt vector.
596
 *
597
 * Note: needs preempt_disable() if you apply the preempt patch to 2.4.
598
 */
599
static void
600
ia64_mca_cmc_vector_disable_keventd(void *unused)
601
{
602
        ia64_mca_cmc_vector_disable(NULL);
603
        smp_call_function(ia64_mca_cmc_vector_disable, NULL, 1, 0);
604
}
605
 
606
/*
607
 * ia64_mca_cmc_vector_enable_keventd
608
 *
609
 * Called via keventd (smp_call_function() is not safe in interrupt context) to
610
 * enable the cmc interrupt vector.
611
 *
612
 * Note: needs preempt_disable() if you apply the preempt patch to 2.4.
613
 */
614
static void
615
ia64_mca_cmc_vector_enable_keventd(void *unused)
616
{
617
        smp_call_function(ia64_mca_cmc_vector_enable, NULL, 1, 0);
618
        ia64_mca_cmc_vector_enable(NULL);
619
}
620
 
621
/*
622
 * ia64_mca_wakeup_ipi_wait
623
 *
624
 *      Wait for the inter-cpu interrupt to be sent by the
625
 *      monarch processor once it is done with handling the
626
 *      MCA.
627
 *
628
 *  Inputs  :   None
629
 *  Outputs :   None
630
 */
631
static void
632
ia64_mca_wakeup_ipi_wait(void)
633
{
634
        int     irr_num = (IA64_MCA_WAKEUP_VECTOR >> 6);
635
        int     irr_bit = (IA64_MCA_WAKEUP_VECTOR & 0x3f);
636
        u64     irr = 0;
637
 
638
        do {
639
                switch(irr_num) {
640
                      case 0:
641
                        irr = ia64_get_irr0();
642
                        break;
643
                      case 1:
644
                        irr = ia64_get_irr1();
645
                        break;
646
                      case 2:
647
                        irr = ia64_get_irr2();
648
                        break;
649
                      case 3:
650
                        irr = ia64_get_irr3();
651
                        break;
652
                }
653
        } while (!(irr & (1UL << irr_bit))) ;
654
}
655
 
656
/*
657
 * ia64_mca_wakeup
658
 *
659
 *      Send an inter-cpu interrupt to wake-up a particular cpu
660
 *      and mark that cpu to be out of rendez.
661
 *
662
 *  Inputs  :   cpuid
663
 *  Outputs :   None
664
 */
665
static void
666
ia64_mca_wakeup(int cpu)
667
{
668
        platform_send_ipi(cpu, IA64_MCA_WAKEUP_VECTOR, IA64_IPI_DM_INT, 0);
669
        ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
670
 
671
}
672
 
673
/*
674
 * ia64_mca_wakeup_all
675
 *
676
 *      Wakeup all the cpus which have rendez'ed previously.
677
 *
678
 *  Inputs  :   None
679
 *  Outputs :   None
680
 */
681
static void
682
ia64_mca_wakeup_all(void)
683
{
684
        int cpu;
685
 
686
        /* Clear the Rendez checkin flag for all cpus */
687
        for(cpu = 0; cpu < NR_CPUS; cpu++) {
688
                if (!cpu_online(cpu))
689
                        continue;
690
                if (ia64_mc_info.imi_rendez_checkin[cpu] == IA64_MCA_RENDEZ_CHECKIN_DONE)
691
                        ia64_mca_wakeup(cpu);
692
        }
693
 
694
}
695
 
696
/*
697
 * ia64_mca_rendez_interrupt_handler
698
 *
699
 *      This is handler used to put slave processors into spinloop
700
 *      while the monarch processor does the mca handling and later
701
 *      wake each slave up once the monarch is done.
702
 *
703
 *  Inputs  :   None
704
 *  Outputs :   None
705
 */
706
static void
707
ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *ptregs)
708
{
709
        unsigned long flags;
710
        int cpu = smp_processor_id();
711
 
712
        /* Mask all interrupts */
713
        local_irq_save(flags);
714
 
715
        ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE;
716
        /* Register with the SAL monarch that the slave has
717
         * reached SAL
718
         */
719
        ia64_sal_mc_rendez();
720
 
721
        /* Wait for the wakeup IPI from the monarch
722
         * This waiting is done by polling on the wakeup-interrupt
723
         * vector bit in the processor's IRRs
724
         */
725
        ia64_mca_wakeup_ipi_wait();
726
 
727
        /* Enable all interrupts */
728
        local_irq_restore(flags);
729
}
730
 
731
/*
732
 * ia64_mca_wakeup_int_handler
733
 *
734
 *      The interrupt handler for processing the inter-cpu interrupt to the
735
 *      slave cpu which was spinning in the rendez loop.
736
 *      Since this spinning is done by turning off the interrupts and
737
 *      polling on the wakeup-interrupt bit in the IRR, there is
738
 *      nothing useful to be done in the handler.
739
 *
740
 *  Inputs  :   wakeup_irq  (Wakeup-interrupt bit)
741
 *      arg             (Interrupt handler specific argument)
742
 *      ptregs          (Exception frame at the time of the interrupt)
743
 *  Outputs :   None
744
 *
745
 */
746
static void
747
ia64_mca_wakeup_int_handler(int wakeup_irq, void *arg, struct pt_regs *ptregs)
748
{
749
}
750
 
751
/*
752
 * ia64_return_to_sal_check
753
 *
754
 *      This is function called before going back from the OS_MCA handler
755
 *      to the OS_MCA dispatch code which finally takes the control back
756
 *      to the SAL.
757
 *      The main purpose of this routine is to setup the OS_MCA to SAL
758
 *      return state which can be used by the OS_MCA dispatch code
759
 *      just before going back to SAL.
760
 *
761
 *  Inputs  :   None
762
 *  Outputs :   None
763
 */
764
 
765
static void
766
ia64_return_to_sal_check(int recover)
767
{
768
 
769
        /* Copy over some relevant stuff from the sal_to_os_mca_handoff
770
         * so that it can be used at the time of os_mca_to_sal_handoff
771
         */
772
        ia64_os_to_sal_handoff_state.imots_sal_gp =
773
                ia64_sal_to_os_handoff_state.imsto_sal_gp;
774
 
775
        ia64_os_to_sal_handoff_state.imots_sal_check_ra =
776
                ia64_sal_to_os_handoff_state.imsto_sal_check_ra;
777
 
778
        if (recover)
779
                ia64_os_to_sal_handoff_state.imots_os_status = IA64_MCA_CORRECTED;
780
        else
781
                ia64_os_to_sal_handoff_state.imots_os_status = IA64_MCA_COLD_BOOT;
782
 
783
        /* Default = tell SAL to return to same context */
784
        ia64_os_to_sal_handoff_state.imots_context = IA64_MCA_SAME_CONTEXT;
785
 
786
        ia64_os_to_sal_handoff_state.imots_new_min_state =
787
                (u64 *)ia64_sal_to_os_handoff_state.pal_min_state;
788
 
789
}
790
 
791
/*
792
 * ia64_mca_ucmc_handler
793
 *
794
 *      This is uncorrectable machine check handler called from OS_MCA
795
 *      dispatch code which is in turn called from SAL_CHECK().
796
 *      This is the place where the core of OS MCA handling is done.
797
 *      Right now the logs are extracted and displayed in a well-defined
798
 *      format. This handler code is supposed to be run only on the
799
 *      monarch processor. Once the monarch is done with MCA handling
800
 *      further MCA logging is enabled by clearing logs.
801
 *      Monarch also has the duty of sending wakeup-IPIs to pull the
802
 *      slave processors out of rendezvous spinloop.
803
 *
804
 *  Inputs  :   None
805
 *  Outputs :   None
806
 */
807
void
808
ia64_mca_ucmc_handler(void)
809
{
810
        pal_processor_state_info_t *psp = (pal_processor_state_info_t *)
811
                &ia64_sal_to_os_handoff_state.proc_state_param;
812
        int recover = psp->tc && !(psp->cc || psp->bc || psp->rc || psp->uc);
813
 
814
        /* Get the MCA error record and log it */
815
        ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA);
816
 
817
        /*
818
         *  Wakeup all the processors which are spinning in the rendezvous
819
         *  loop.
820
         */
821
        ia64_mca_wakeup_all();
822
 
823
        /* Return to SAL */
824
        ia64_return_to_sal_check(recover);
825
}
826
 
827
/*
828
 * ia64_mca_cmc_int_handler
829
 *
830
 *  This is corrected machine check interrupt handler.
831
 *      Right now the logs are extracted and displayed in a well-defined
832
 *      format.
833
 *
834
 * Inputs
835
 *      interrupt number
836
 *      client data arg ptr
837
 *      saved registers ptr
838
 *
839
 * Outputs
840
 *      None
841
 */
842
static void
843
ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs)
844
{
845
        static unsigned long    cmc_history[CMC_HISTORY_LENGTH];
846
        static int              index;
847
        static spinlock_t       cmc_history_lock = SPIN_LOCK_UNLOCKED;
848
 
849
        IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",
850
                       __FUNCTION__, cmc_irq, smp_processor_id());
851
 
852
        /* SAL spec states this should run w/ interrupts enabled */
853
        local_irq_enable();
854
 
855
        /* Get the CMC error record and log it */
856
        ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CMC);
857
 
858
        spin_lock(&cmc_history_lock);
859
        if (!cmc_polling_enabled) {
860
                int i, count = 1; /* we know 1 happened now */
861
                unsigned long now = jiffies;
862
 
863
                for (i = 0; i < CMC_HISTORY_LENGTH; i++) {
864
                        if (now - cmc_history[i] <= HZ)
865
                                count++;
866
                }
867
 
868
                IA64_MCA_DEBUG(KERN_INFO "CMC threshold %d/%d\n", count, CMC_HISTORY_LENGTH);
869
                if (count >= CMC_HISTORY_LENGTH) {
870
 
871
                        cmc_polling_enabled = 1;
872
                        spin_unlock(&cmc_history_lock);
873
                        schedule_task(&cmc_disable_tq);
874
 
875
                        /*
876
                         * Corrected errors will still be corrected, but
877
                         * make sure there's a log somewhere that indicates
878
                         * something is generating more than we can handle.
879
                         */
880
                        printk(KERN_WARNING "WARNING: Switching to polling CMC handler; error records may be lost\n");
881
 
882
                        mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL);
883
 
884
                        /* lock already released, get out now */
885
                        return;
886
                } else {
887
                        cmc_history[index++] = now;
888
                        if (index == CMC_HISTORY_LENGTH)
889
                                index = 0;
890
                }
891
        }
892
        spin_unlock(&cmc_history_lock);
893
}
894
 
895
/*
896
 *  ia64_mca_cmc_int_caller
897
 *
898
 *      Triggered by sw interrupt from CMC polling routine.  Calls
899
 *      real interrupt handler and either triggers a sw interrupt
900
 *      on the next cpu or does cleanup at the end.
901
 *
902
 * Inputs
903
 *      interrupt number
904
 *      client data arg ptr
905
 *      saved registers ptr
906
 * Outputs
907
 *      None
908
 */
909
static void
910
ia64_mca_cmc_int_caller(int cpe_irq, void *arg, struct pt_regs *ptregs)
911
{
912
        static int start_count = -1;
913
        unsigned int cpuid;
914
 
915
        cpuid = smp_processor_id();
916
 
917
        /* If first cpu, update count */
918
        if (start_count == -1)
919
                start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CMC);
920
 
921
        ia64_mca_cmc_int_handler(cpe_irq, arg, ptregs);
922
 
923
        for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++);
924
 
925
        if (cpuid < NR_CPUS) {
926
                platform_send_ipi(cpuid, IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
927
        } else {
928
                /* If no log record, switch out of polling mode */
929
                if (start_count == IA64_LOG_COUNT(SAL_INFO_TYPE_CMC)) {
930
 
931
                        printk(KERN_WARNING "Returning to interrupt driven CMC handler\n");
932
                        schedule_task(&cmc_enable_tq);
933
                        cmc_polling_enabled = 0;
934
 
935
                } else {
936
 
937
                        mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL);
938
                }
939
 
940
                start_count = -1;
941
        }
942
}
943
 
944
/*
945
 *  ia64_mca_cmc_poll
946
 *
947
 *      Poll for Corrected Machine Checks (CMCs)
948
 *
949
 * Inputs   :   dummy(unused)
950
 * Outputs  :   None
951
 *
952
 */
953
static void
954
ia64_mca_cmc_poll (unsigned long dummy)
955
{
956
        /* Trigger a CMC interrupt cascade  */
957
        platform_send_ipi(__ffs(cpu_online_map), IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
958
}
959
 
960
/*
961
 *  ia64_mca_cpe_int_caller
962
 *
963
 *      Triggered by sw interrupt from CPE polling routine.  Calls
964
 *      real interrupt handler and either triggers a sw interrupt
965
 *      on the next cpu or does cleanup at the end.
966
 *
967
 * Inputs
968
 *      interrupt number
969
 *      client data arg ptr
970
 *      saved registers ptr
971
 * Outputs
972
 *      None
973
 */
974
static void
975
ia64_mca_cpe_int_caller(int cpe_irq, void *arg, struct pt_regs *ptregs)
976
{
977
        static int start_count = -1;
978
        static int poll_time = MAX_CPE_POLL_INTERVAL;
979
        unsigned int cpuid;
980
 
981
        cpuid = smp_processor_id();
982
 
983
        /* If first cpu, update count */
984
        if (start_count == -1)
985
                start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CPE);
986
 
987
        ia64_mca_cpe_int_handler(cpe_irq, arg, ptregs);
988
 
989
        for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++);
990
 
991
        if (cpuid < NR_CPUS) {
992
                platform_send_ipi(cpuid, IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
993
        } else {
994
                /*
995
                 * If a log was recorded, increase our polling frequency,
996
                 * otherwise, backoff.
997
                 */
998
                if (start_count != IA64_LOG_COUNT(SAL_INFO_TYPE_CPE)) {
999
                        poll_time = max(MIN_CPE_POLL_INTERVAL, poll_time / 2);
1000
                } else {
1001
                        poll_time = min(MAX_CPE_POLL_INTERVAL, poll_time * 2);
1002
                }
1003
                start_count = -1;
1004
                mod_timer(&cpe_poll_timer, jiffies + poll_time);
1005
        }
1006
}
1007
 
1008
/*
1009
 *  ia64_mca_cpe_poll
1010
 *
1011
 *      Poll for Corrected Platform Errors (CPEs), trigger interrupt
1012
 *      on first cpu, from there it will trickle through all the cpus.
1013
 *
1014
 * Inputs   :   dummy(unused)
1015
 * Outputs  :   None
1016
 *
1017
 */
1018
static void
1019
ia64_mca_cpe_poll (unsigned long dummy)
1020
{
1021
        /* Trigger a CPE interrupt cascade  */
1022
        platform_send_ipi(__ffs(cpu_online_map), IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
1023
}
1024
 
1025
/*
1026
 * C portion of the OS INIT handler
1027
 *
1028
 * Called from ia64_monarch_init_handler
1029
 *
1030
 * Inputs: pointer to pt_regs where processor info was saved.
1031
 *
1032
 * Returns:
1033
 *   0 if SAL must warm boot the System
1034
 *   1 if SAL must return to interrupted context using PAL_MC_RESUME
1035
 *
1036
 */
1037
void
1038
ia64_init_handler (struct pt_regs *pt, struct switch_stack *sw)
1039
{
1040
        pal_min_state_area_t *ms;
1041
 
1042
        oops_in_progress = 1;   /* avoid deadlock in printk, but it makes recovery dodgy */
1043
 
1044
        printk(KERN_INFO "Entered OS INIT handler. PSP=%lx\n",
1045
                ia64_sal_to_os_handoff_state.proc_state_param);
1046
 
1047
        /*
1048
         * Address of minstate area provided by PAL is physical,
1049
         * uncacheable (bit 63 set). Convert to Linux virtual
1050
         * address in region 6.
1051
         */
1052
        ms = (pal_min_state_area_t *)(ia64_sal_to_os_handoff_state.pal_min_state | (6ul<<61));
1053
 
1054
        init_handler_platform(ms, pt, sw);      /* call platform specific routines */
1055
}
1056
 
1057
static int __init
1058
ia64_mca_disable_cpe_polling(char *str)
1059
{
1060
        cpe_poll_enabled = 0;
1061
        return 1;
1062
}
1063
 
1064
__setup("disable_cpe_poll", ia64_mca_disable_cpe_polling);
1065
 
1066
static struct irqaction cmci_irqaction = {
1067
        .handler =      ia64_mca_cmc_int_handler,
1068
        .flags =        SA_INTERRUPT,
1069
        .name =         "cmc_hndlr"
1070
};
1071
 
1072
static struct irqaction cmcp_irqaction = {
1073
        .handler =      ia64_mca_cmc_int_caller,
1074
        .flags =        SA_INTERRUPT,
1075
        .name =         "cmc_poll"
1076
};
1077
 
1078
static struct irqaction mca_rdzv_irqaction = {
1079
        .handler =      ia64_mca_rendez_int_handler,
1080
        .flags =        SA_INTERRUPT,
1081
        .name =         "mca_rdzv"
1082
};
1083
 
1084
static struct irqaction mca_wkup_irqaction = {
1085
        .handler =      ia64_mca_wakeup_int_handler,
1086
        .flags =        SA_INTERRUPT,
1087
        .name =         "mca_wkup"
1088
};
1089
 
1090
#ifdef CONFIG_ACPI
1091
static struct irqaction mca_cpe_irqaction = {
1092
        .handler =      ia64_mca_cpe_int_handler,
1093
        .flags =        SA_INTERRUPT,
1094
        .name =         "cpe_hndlr"
1095
};
1096
 
1097
static struct irqaction mca_cpep_irqaction = {
1098
        .handler =      ia64_mca_cpe_int_caller,
1099
        .flags =        SA_INTERRUPT,
1100
        .name =         "cpe_poll"
1101
};
1102
#endif /* CONFIG_ACPI */
1103
 
1104
/*
1105
 * ia64_mca_init
1106
 *
1107
 *  Do all the system level mca specific initialization.
1108
 *
1109
 *      1. Register spinloop and wakeup request interrupt vectors
1110
 *
1111
 *      2. Register OS_MCA handler entry point
1112
 *
1113
 *      3. Register OS_INIT handler entry point
1114
 *
1115
 *  4. Initialize MCA/CMC/INIT related log buffers maintained by the OS.
1116
 *
1117
 *  Note that this initialization is done very early before some kernel
1118
 *  services are available.
1119
 *
1120
 *  Inputs  :   None
1121
 *
1122
 *  Outputs :   None
1123
 */
1124
void __init
1125
ia64_mca_init(void)
1126
{
1127
        ia64_fptr_t *mon_init_ptr = (ia64_fptr_t *)ia64_monarch_init_handler;
1128
        ia64_fptr_t *slave_init_ptr = (ia64_fptr_t *)ia64_slave_init_handler;
1129
        ia64_fptr_t *mca_hldlr_ptr = (ia64_fptr_t *)ia64_os_mca_dispatch;
1130
        int i;
1131
        s64 rc;
1132
        struct ia64_sal_retval isrv;
1133
        u64 timeout = IA64_MCA_RENDEZ_TIMEOUT;  /* platform specific */
1134
 
1135
        IA64_MCA_DEBUG("%s: begin\n", __FUNCTION__);
1136
 
1137
        INIT_TQUEUE(&cmc_disable_tq, ia64_mca_cmc_vector_disable_keventd, NULL);
1138
        INIT_TQUEUE(&cmc_enable_tq, ia64_mca_cmc_vector_enable_keventd, NULL);
1139
 
1140
        /* Clear the Rendez checkin flag for all cpus */
1141
        for(i = 0 ; i < NR_CPUS; i++)
1142
                ia64_mc_info.imi_rendez_checkin[i] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
1143
 
1144
        /*
1145
         * Register the rendezvous spinloop and wakeup mechanism with SAL
1146
         */
1147
 
1148
        /* Register the rendezvous interrupt vector with SAL */
1149
        while (1) {
1150
                isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_INT,
1151
                                              SAL_MC_PARAM_MECHANISM_INT,
1152
                                              IA64_MCA_RENDEZ_VECTOR,
1153
                                              timeout,
1154
                                              SAL_MC_PARAM_RZ_ALWAYS);
1155
                rc = isrv.status;
1156
                if (rc == 0)
1157
                        break;
1158
                if (rc == -2) {
1159
                        printk(KERN_INFO "Increasing MCA rendezvous timeout from "
1160
                                "%ld to %ld milliseconds\n", timeout, isrv.v0);
1161
                        timeout = isrv.v0;
1162
                        continue;
1163
                }
1164
                printk(KERN_ERR "Failed to register rendezvous interrupt "
1165
                       "with SAL (status %ld)\n", rc);
1166
                return;
1167
        }
1168
 
1169
        /* Register the wakeup interrupt vector with SAL */
1170
        isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_WAKEUP,
1171
                                      SAL_MC_PARAM_MECHANISM_INT,
1172
                                      IA64_MCA_WAKEUP_VECTOR,
1173
                                      0, 0);
1174
        rc = isrv.status;
1175
        if (rc) {
1176
                printk(KERN_ERR "Failed to register wakeup interrupt with SAL "
1177
                       "(status %ld)\n", rc);
1178
                return;
1179
        }
1180
 
1181
        IA64_MCA_DEBUG("%s: registered MCA rendezvous spinloop and wakeup mech.\n", __FUNCTION__);
1182
 
1183
        ia64_mc_info.imi_mca_handler        = ia64_tpa(mca_hldlr_ptr->fp);
1184
        /*
1185
         * XXX - disable SAL checksum by setting size to 0; should be
1186
         *      ia64_tpa(ia64_os_mca_dispatch_end) - ia64_tpa(ia64_os_mca_dispatch);
1187
         */
1188
        ia64_mc_info.imi_mca_handler_size       = 0;
1189
 
1190
        /* Register the os mca handler with SAL */
1191
        if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_MCA,
1192
                                       ia64_mc_info.imi_mca_handler,
1193
                                       ia64_tpa(mca_hldlr_ptr->gp),
1194
                                       ia64_mc_info.imi_mca_handler_size,
1195
                                       0, 0, 0)))
1196
        {
1197
                printk(KERN_ERR "Failed to register OS MCA handler with SAL "
1198
                       "(status %ld)\n", rc);
1199
                return;
1200
        }
1201
 
1202
        IA64_MCA_DEBUG("%s: registered OS MCA handler with SAL at 0x%lx, gp = 0x%lx\n", __FUNCTION__,
1203
                       ia64_mc_info.imi_mca_handler, ia64_tpa(mca_hldlr_ptr->gp));
1204
 
1205
        /*
1206
         * XXX - disable SAL checksum by setting size to 0, should be
1207
         * size of the actual init handler in mca_asm.S.
1208
         */
1209
        ia64_mc_info.imi_monarch_init_handler           = ia64_tpa(mon_init_ptr->fp);
1210
        ia64_mc_info.imi_monarch_init_handler_size      = 0;
1211
        ia64_mc_info.imi_slave_init_handler             = ia64_tpa(slave_init_ptr->fp);
1212
        ia64_mc_info.imi_slave_init_handler_size        = 0;
1213
 
1214
        IA64_MCA_DEBUG("%s: OS INIT handler at %lx\n", __FUNCTION__,
1215
                       ia64_mc_info.imi_monarch_init_handler);
1216
 
1217
        /* Register the os init handler with SAL */
1218
        if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_INIT,
1219
                                       ia64_mc_info.imi_monarch_init_handler,
1220
                                       ia64_tpa(ia64_get_gp()),
1221
                                       ia64_mc_info.imi_monarch_init_handler_size,
1222
                                       ia64_mc_info.imi_slave_init_handler,
1223
                                       ia64_tpa(ia64_get_gp()),
1224
                                       ia64_mc_info.imi_slave_init_handler_size)))
1225
        {
1226
                printk(KERN_ERR "Failed to register m/s INIT handlers with SAL "
1227
                       "(status %ld)\n", rc);
1228
                return;
1229
        }
1230
 
1231
        IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __FUNCTION__);
1232
 
1233
        /*
1234
         *  Configure the CMCI/P vector and handler. Interrupts for CMC are
1235
         *  per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c).
1236
         */
1237
        register_percpu_irq(IA64_CMC_VECTOR, &cmci_irqaction);
1238
        register_percpu_irq(IA64_CMCP_VECTOR, &cmcp_irqaction);
1239
        ia64_mca_cmc_vector_setup();       /* Setup vector on BSP & enable */
1240
 
1241
        /* Setup the MCA rendezvous interrupt vector */
1242
        register_percpu_irq(IA64_MCA_RENDEZ_VECTOR, &mca_rdzv_irqaction);
1243
 
1244
        /* Setup the MCA wakeup interrupt vector */
1245
        register_percpu_irq(IA64_MCA_WAKEUP_VECTOR, &mca_wkup_irqaction);
1246
 
1247
#ifdef CONFIG_ACPI
1248
        /* Setup the CPE interrupt vector */
1249
        {
1250
                irq_desc_t *desc;
1251
                unsigned int irq;
1252
                int cpev = acpi_request_vector(ACPI_INTERRUPT_CPEI);
1253
 
1254
                if (cpev >= 0) {
1255
                        for (irq = 0; irq < NR_IRQS; ++irq)
1256
                                if (irq_to_vector(irq) == cpev) {
1257
                                        desc = irq_desc(irq);
1258
                                        desc->status |= IRQ_PER_CPU;
1259
                                        desc->handler = &irq_type_iosapic_level;
1260
                                        setup_irq(irq, &mca_cpe_irqaction);
1261
                                }
1262
                        ia64_mca_register_cpev(cpev);
1263
                }
1264
        }
1265
#endif
1266
 
1267
        /* Initialize the areas set aside by the OS to buffer the
1268
         * platform/processor error states for MCA/INIT/CMC
1269
         * handling.
1270
         */
1271
        ia64_log_init(SAL_INFO_TYPE_MCA);
1272
        ia64_log_init(SAL_INFO_TYPE_INIT);
1273
        ia64_log_init(SAL_INFO_TYPE_CMC);
1274
        ia64_log_init(SAL_INFO_TYPE_CPE);
1275
 
1276
        printk(KERN_INFO "MCA related initialization done\n");
1277
}
1278
 
1279
/*
1280
 * ia64_mca_late_init
1281
 *
1282
 *      Opportunity to setup things that require initialization later
1283
 *      than ia64_mca_init.  Setup a timer to poll for CPEs if the
1284
 *      platform doesn't support an interrupt driven mechanism.
1285
 *
1286
 *  Inputs  :   None
1287
 *  Outputs :   Status
1288
 */
1289
static int __init
1290
ia64_mca_late_init(void)
1291
{
1292
        init_timer(&cmc_poll_timer);
1293
        cmc_poll_timer.function = ia64_mca_cmc_poll;
1294
 
1295
        /* Reset to the correct state */
1296
        cmc_polling_enabled = 0;
1297
 
1298
        init_timer(&cpe_poll_timer);
1299
        cpe_poll_timer.function = ia64_mca_cpe_poll;
1300
 
1301
#ifdef CONFIG_ACPI
1302
        /* If platform doesn't support CPEI, get the timer going. */
1303
        if (acpi_request_vector(ACPI_INTERRUPT_CPEI) < 0 && cpe_poll_enabled) {
1304
                register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);
1305
                ia64_mca_cpe_poll(0UL);
1306
        }
1307
#endif
1308
 
1309
        return 0;
1310
}
1311
 
1312
module_init(ia64_mca_late_init);

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.