OpenCores
URL https://opencores.org/ocsvn/or1k_soc_on_altera_embedded_dev_kit/or1k_soc_on_altera_embedded_dev_kit/trunk

Subversion Repositories or1k_soc_on_altera_embedded_dev_kit

[/] [or1k_soc_on_altera_embedded_dev_kit/] [trunk/] [linux-2.6/] [linux-2.6.24/] [arch/] [mips/] [kernel/] [traps.c] - Blame information for rev 3

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 3 xianfeng
/*
2
 * This file is subject to the terms and conditions of the GNU General Public
3
 * License.  See the file "COPYING" in the main directory of this archive
4
 * for more details.
5
 *
6
 * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle
7
 * Copyright (C) 1995, 1996 Paul M. Antoine
8
 * Copyright (C) 1998 Ulf Carlsson
9
 * Copyright (C) 1999 Silicon Graphics, Inc.
10
 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
11
 * Copyright (C) 2000, 01 MIPS Technologies, Inc.
12
 * Copyright (C) 2002, 2003, 2004, 2005, 2007  Maciej W. Rozycki
13
 */
14
#include <linux/bug.h>
15
#include <linux/compiler.h>
16
#include <linux/init.h>
17
#include <linux/mm.h>
18
#include <linux/module.h>
19
#include <linux/sched.h>
20
#include <linux/smp.h>
21
#include <linux/spinlock.h>
22
#include <linux/kallsyms.h>
23
#include <linux/bootmem.h>
24
#include <linux/interrupt.h>
25
 
26
#include <asm/bootinfo.h>
27
#include <asm/branch.h>
28
#include <asm/break.h>
29
#include <asm/cpu.h>
30
#include <asm/dsp.h>
31
#include <asm/fpu.h>
32
#include <asm/mipsregs.h>
33
#include <asm/mipsmtregs.h>
34
#include <asm/module.h>
35
#include <asm/pgtable.h>
36
#include <asm/ptrace.h>
37
#include <asm/sections.h>
38
#include <asm/system.h>
39
#include <asm/tlbdebug.h>
40
#include <asm/traps.h>
41
#include <asm/uaccess.h>
42
#include <asm/mmu_context.h>
43
#include <asm/types.h>
44
#include <asm/stacktrace.h>
45
 
46
extern asmlinkage void handle_int(void);
47
extern asmlinkage void handle_tlbm(void);
48
extern asmlinkage void handle_tlbl(void);
49
extern asmlinkage void handle_tlbs(void);
50
extern asmlinkage void handle_adel(void);
51
extern asmlinkage void handle_ades(void);
52
extern asmlinkage void handle_ibe(void);
53
extern asmlinkage void handle_dbe(void);
54
extern asmlinkage void handle_sys(void);
55
extern asmlinkage void handle_bp(void);
56
extern asmlinkage void handle_ri(void);
57
extern asmlinkage void handle_ri_rdhwr_vivt(void);
58
extern asmlinkage void handle_ri_rdhwr(void);
59
extern asmlinkage void handle_cpu(void);
60
extern asmlinkage void handle_ov(void);
61
extern asmlinkage void handle_tr(void);
62
extern asmlinkage void handle_fpe(void);
63
extern asmlinkage void handle_mdmx(void);
64
extern asmlinkage void handle_watch(void);
65
extern asmlinkage void handle_mt(void);
66
extern asmlinkage void handle_dsp(void);
67
extern asmlinkage void handle_mcheck(void);
68
extern asmlinkage void handle_reserved(void);
69
 
70
extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
71
        struct mips_fpu_struct *ctx, int has_fpu);
72
 
73
void (*board_watchpoint_handler)(struct pt_regs *regs);
74
void (*board_be_init)(void);
75
int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
76
void (*board_nmi_handler_setup)(void);
77
void (*board_ejtag_handler_setup)(void);
78
void (*board_bind_eic_interrupt)(int irq, int regset);
79
 
80
 
81
static void show_raw_backtrace(unsigned long reg29)
82
{
83
        unsigned long *sp = (unsigned long *)reg29;
84
        unsigned long addr;
85
 
86
        printk("Call Trace:");
87
#ifdef CONFIG_KALLSYMS
88
        printk("\n");
89
#endif
90
        while (!kstack_end(sp)) {
91
                addr = *sp++;
92
                if (__kernel_text_address(addr))
93
                        print_ip_sym(addr);
94
        }
95
        printk("\n");
96
}
97
 
98
#ifdef CONFIG_KALLSYMS
99
int raw_show_trace;
100
static int __init set_raw_show_trace(char *str)
101
{
102
        raw_show_trace = 1;
103
        return 1;
104
}
105
__setup("raw_show_trace", set_raw_show_trace);
106
#endif
107
 
108
static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
109
{
110
        unsigned long sp = regs->regs[29];
111
        unsigned long ra = regs->regs[31];
112
        unsigned long pc = regs->cp0_epc;
113
 
114
        if (raw_show_trace || !__kernel_text_address(pc)) {
115
                show_raw_backtrace(sp);
116
                return;
117
        }
118
        printk("Call Trace:\n");
119
        do {
120
                print_ip_sym(pc);
121
                pc = unwind_stack(task, &sp, pc, &ra);
122
        } while (pc);
123
        printk("\n");
124
}
125
 
126
/*
127
 * This routine abuses get_user()/put_user() to reference pointers
128
 * with at least a bit of error checking ...
129
 */
130
static void show_stacktrace(struct task_struct *task,
131
        const struct pt_regs *regs)
132
{
133
        const int field = 2 * sizeof(unsigned long);
134
        long stackdata;
135
        int i;
136
        unsigned long __user *sp = (unsigned long __user *)regs->regs[29];
137
 
138
        printk("Stack :");
139
        i = 0;
140
        while ((unsigned long) sp & (PAGE_SIZE - 1)) {
141
                if (i && ((i % (64 / field)) == 0))
142
                        printk("\n       ");
143
                if (i > 39) {
144
                        printk(" ...");
145
                        break;
146
                }
147
 
148
                if (__get_user(stackdata, sp++)) {
149
                        printk(" (Bad stack address)");
150
                        break;
151
                }
152
 
153
                printk(" %0*lx", field, stackdata);
154
                i++;
155
        }
156
        printk("\n");
157
        show_backtrace(task, regs);
158
}
159
 
160
void show_stack(struct task_struct *task, unsigned long *sp)
161
{
162
        struct pt_regs regs;
163
        if (sp) {
164
                regs.regs[29] = (unsigned long)sp;
165
                regs.regs[31] = 0;
166
                regs.cp0_epc = 0;
167
        } else {
168
                if (task && task != current) {
169
                        regs.regs[29] = task->thread.reg29;
170
                        regs.regs[31] = 0;
171
                        regs.cp0_epc = task->thread.reg31;
172
                } else {
173
                        prepare_frametrace(&regs);
174
                }
175
        }
176
        show_stacktrace(task, &regs);
177
}
178
 
179
/*
180
 * The architecture-independent dump_stack generator
181
 */
182
void dump_stack(void)
183
{
184
        struct pt_regs regs;
185
 
186
        prepare_frametrace(&regs);
187
        show_backtrace(current, &regs);
188
}
189
 
190
EXPORT_SYMBOL(dump_stack);
191
 
192
static void show_code(unsigned int __user *pc)
193
{
194
        long i;
195
 
196
        printk("\nCode:");
197
 
198
        for(i = -3 ; i < 6 ; i++) {
199
                unsigned int insn;
200
                if (__get_user(insn, pc + i)) {
201
                        printk(" (Bad address in epc)\n");
202
                        break;
203
                }
204
                printk("%c%08x%c", (i?' ':'<'), insn, (i?' ':'>'));
205
        }
206
}
207
 
208
static void __show_regs(const struct pt_regs *regs)
209
{
210
        const int field = 2 * sizeof(unsigned long);
211
        unsigned int cause = regs->cp0_cause;
212
        int i;
213
 
214
        printk("Cpu %d\n", smp_processor_id());
215
 
216
        /*
217
         * Saved main processor registers
218
         */
219
        for (i = 0; i < 32; ) {
220
                if ((i % 4) == 0)
221
                        printk("$%2d   :", i);
222
                if (i == 0)
223
                        printk(" %0*lx", field, 0UL);
224
                else if (i == 26 || i == 27)
225
                        printk(" %*s", field, "");
226
                else
227
                        printk(" %0*lx", field, regs->regs[i]);
228
 
229
                i++;
230
                if ((i % 4) == 0)
231
                        printk("\n");
232
        }
233
 
234
#ifdef CONFIG_CPU_HAS_SMARTMIPS
235
        printk("Acx    : %0*lx\n", field, regs->acx);
236
#endif
237
        printk("Hi    : %0*lx\n", field, regs->hi);
238
        printk("Lo    : %0*lx\n", field, regs->lo);
239
 
240
        /*
241
         * Saved cp0 registers
242
         */
243
        printk("epc   : %0*lx ", field, regs->cp0_epc);
244
        print_symbol("%s ", regs->cp0_epc);
245
        printk("    %s\n", print_tainted());
246
        printk("ra    : %0*lx ", field, regs->regs[31]);
247
        print_symbol("%s\n", regs->regs[31]);
248
 
249
        printk("Status: %08x    ", (uint32_t) regs->cp0_status);
250
 
251
        if (current_cpu_data.isa_level == MIPS_CPU_ISA_I) {
252
                if (regs->cp0_status & ST0_KUO)
253
                        printk("KUo ");
254
                if (regs->cp0_status & ST0_IEO)
255
                        printk("IEo ");
256
                if (regs->cp0_status & ST0_KUP)
257
                        printk("KUp ");
258
                if (regs->cp0_status & ST0_IEP)
259
                        printk("IEp ");
260
                if (regs->cp0_status & ST0_KUC)
261
                        printk("KUc ");
262
                if (regs->cp0_status & ST0_IEC)
263
                        printk("IEc ");
264
        } else {
265
                if (regs->cp0_status & ST0_KX)
266
                        printk("KX ");
267
                if (regs->cp0_status & ST0_SX)
268
                        printk("SX ");
269
                if (regs->cp0_status & ST0_UX)
270
                        printk("UX ");
271
                switch (regs->cp0_status & ST0_KSU) {
272
                case KSU_USER:
273
                        printk("USER ");
274
                        break;
275
                case KSU_SUPERVISOR:
276
                        printk("SUPERVISOR ");
277
                        break;
278
                case KSU_KERNEL:
279
                        printk("KERNEL ");
280
                        break;
281
                default:
282
                        printk("BAD_MODE ");
283
                        break;
284
                }
285
                if (regs->cp0_status & ST0_ERL)
286
                        printk("ERL ");
287
                if (regs->cp0_status & ST0_EXL)
288
                        printk("EXL ");
289
                if (regs->cp0_status & ST0_IE)
290
                        printk("IE ");
291
        }
292
        printk("\n");
293
 
294
        printk("Cause : %08x\n", cause);
295
 
296
        cause = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
297
        if (1 <= cause && cause <= 5)
298
                printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
299
 
300
        printk("PrId  : %08x (%s)\n", read_c0_prid(),
301
               cpu_name_string());
302
}
303
 
304
/*
305
 * FIXME: really the generic show_regs should take a const pointer argument.
306
 */
307
void show_regs(struct pt_regs *regs)
308
{
309
        __show_regs((struct pt_regs *)regs);
310
}
311
 
312
void show_registers(const struct pt_regs *regs)
313
{
314
        __show_regs(regs);
315
        print_modules();
316
        printk("Process %s (pid: %d, threadinfo=%p, task=%p)\n",
317
                current->comm, task_pid_nr(current), current_thread_info(), current);
318
        show_stacktrace(current, regs);
319
        show_code((unsigned int __user *) regs->cp0_epc);
320
        printk("\n");
321
}
322
 
323
static DEFINE_SPINLOCK(die_lock);
324
 
325
void __noreturn die(const char * str, const struct pt_regs * regs)
326
{
327
        static int die_counter;
328
#ifdef CONFIG_MIPS_MT_SMTC
329
        unsigned long dvpret = dvpe();
330
#endif /* CONFIG_MIPS_MT_SMTC */
331
 
332
        console_verbose();
333
        spin_lock_irq(&die_lock);
334
        bust_spinlocks(1);
335
#ifdef CONFIG_MIPS_MT_SMTC
336
        mips_mt_regdump(dvpret);
337
#endif /* CONFIG_MIPS_MT_SMTC */
338
        printk("%s[#%d]:\n", str, ++die_counter);
339
        show_registers(regs);
340
        add_taint(TAINT_DIE);
341
        spin_unlock_irq(&die_lock);
342
 
343
        if (in_interrupt())
344
                panic("Fatal exception in interrupt");
345
 
346
        if (panic_on_oops) {
347
                printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
348
                ssleep(5);
349
                panic("Fatal exception");
350
        }
351
 
352
        do_exit(SIGSEGV);
353
}
354
 
355
extern const struct exception_table_entry __start___dbe_table[];
356
extern const struct exception_table_entry __stop___dbe_table[];
357
 
358
__asm__(
359
"       .section        __dbe_table, \"a\"\n"
360
"       .previous                       \n");
361
 
362
/* Given an address, look for it in the exception tables. */
363
static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
364
{
365
        const struct exception_table_entry *e;
366
 
367
        e = search_extable(__start___dbe_table, __stop___dbe_table - 1, addr);
368
        if (!e)
369
                e = search_module_dbetables(addr);
370
        return e;
371
}
372
 
373
asmlinkage void do_be(struct pt_regs *regs)
374
{
375
        const int field = 2 * sizeof(unsigned long);
376
        const struct exception_table_entry *fixup = NULL;
377
        int data = regs->cp0_cause & 4;
378
        int action = MIPS_BE_FATAL;
379
 
380
        /* XXX For now.  Fixme, this searches the wrong table ...  */
381
        if (data && !user_mode(regs))
382
                fixup = search_dbe_tables(exception_epc(regs));
383
 
384
        if (fixup)
385
                action = MIPS_BE_FIXUP;
386
 
387
        if (board_be_handler)
388
                action = board_be_handler(regs, fixup != NULL);
389
 
390
        switch (action) {
391
        case MIPS_BE_DISCARD:
392
                return;
393
        case MIPS_BE_FIXUP:
394
                if (fixup) {
395
                        regs->cp0_epc = fixup->nextinsn;
396
                        return;
397
                }
398
                break;
399
        default:
400
                break;
401
        }
402
 
403
        /*
404
         * Assume it would be too dangerous to continue ...
405
         */
406
        printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
407
               data ? "Data" : "Instruction",
408
               field, regs->cp0_epc, field, regs->regs[31]);
409
        die_if_kernel("Oops", regs);
410
        force_sig(SIGBUS, current);
411
}
412
 
413
/*
414
 * ll/sc, rdhwr, sync emulation
415
 */
416
 
417
#define OPCODE 0xfc000000
418
#define BASE   0x03e00000
419
#define RT     0x001f0000
420
#define OFFSET 0x0000ffff
421
#define LL     0xc0000000
422
#define SC     0xe0000000
423
#define SPEC0  0x00000000
424
#define SPEC3  0x7c000000
425
#define RD     0x0000f800
426
#define FUNC   0x0000003f
427
#define SYNC   0x0000000f
428
#define RDHWR  0x0000003b
429
 
430
/*
431
 * The ll_bit is cleared by r*_switch.S
432
 */
433
 
434
unsigned long ll_bit;
435
 
436
static struct task_struct *ll_task = NULL;
437
 
438
static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode)
439
{
440
        unsigned long value, __user *vaddr;
441
        long offset;
442
 
443
        /*
444
         * analyse the ll instruction that just caused a ri exception
445
         * and put the referenced address to addr.
446
         */
447
 
448
        /* sign extend offset */
449
        offset = opcode & OFFSET;
450
        offset <<= 16;
451
        offset >>= 16;
452
 
453
        vaddr = (unsigned long __user *)
454
                ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
455
 
456
        if ((unsigned long)vaddr & 3)
457
                return SIGBUS;
458
        if (get_user(value, vaddr))
459
                return SIGSEGV;
460
 
461
        preempt_disable();
462
 
463
        if (ll_task == NULL || ll_task == current) {
464
                ll_bit = 1;
465
        } else {
466
                ll_bit = 0;
467
        }
468
        ll_task = current;
469
 
470
        preempt_enable();
471
 
472
        regs->regs[(opcode & RT) >> 16] = value;
473
 
474
        return 0;
475
}
476
 
477
static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
478
{
479
        unsigned long __user *vaddr;
480
        unsigned long reg;
481
        long offset;
482
 
483
        /*
484
         * analyse the sc instruction that just caused a ri exception
485
         * and put the referenced address to addr.
486
         */
487
 
488
        /* sign extend offset */
489
        offset = opcode & OFFSET;
490
        offset <<= 16;
491
        offset >>= 16;
492
 
493
        vaddr = (unsigned long __user *)
494
                ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
495
        reg = (opcode & RT) >> 16;
496
 
497
        if ((unsigned long)vaddr & 3)
498
                return SIGBUS;
499
 
500
        preempt_disable();
501
 
502
        if (ll_bit == 0 || ll_task != current) {
503
                regs->regs[reg] = 0;
504
                preempt_enable();
505
                return 0;
506
        }
507
 
508
        preempt_enable();
509
 
510
        if (put_user(regs->regs[reg], vaddr))
511
                return SIGSEGV;
512
 
513
        regs->regs[reg] = 1;
514
 
515
        return 0;
516
}
517
 
518
/*
519
 * ll uses the opcode of lwc0 and sc uses the opcode of swc0.  That is both
520
 * opcodes are supposed to result in coprocessor unusable exceptions if
521
 * executed on ll/sc-less processors.  That's the theory.  In practice a
522
 * few processors such as NEC's VR4100 throw reserved instruction exceptions
523
 * instead, so we're doing the emulation thing in both exception handlers.
524
 */
525
static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
526
{
527
        if ((opcode & OPCODE) == LL)
528
                return simulate_ll(regs, opcode);
529
        if ((opcode & OPCODE) == SC)
530
                return simulate_sc(regs, opcode);
531
 
532
        return -1;                      /* Must be something else ... */
533
}
534
 
535
/*
536
 * Simulate trapping 'rdhwr' instructions to provide user accessible
537
 * registers not implemented in hardware.  The only current use of this
538
 * is the thread area pointer.
539
 */
540
static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode)
541
{
542
        struct thread_info *ti = task_thread_info(current);
543
 
544
        if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
545
                int rd = (opcode & RD) >> 11;
546
                int rt = (opcode & RT) >> 16;
547
                switch (rd) {
548
                        case 29:
549
                                regs->regs[rt] = ti->tp_value;
550
                                return 0;
551
                        default:
552
                                return -1;
553
                }
554
        }
555
 
556
        /* Not ours.  */
557
        return -1;
558
}
559
 
560
static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
561
{
562
        if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC)
563
                return 0;
564
 
565
        return -1;                      /* Must be something else ... */
566
}
567
 
568
asmlinkage void do_ov(struct pt_regs *regs)
569
{
570
        siginfo_t info;
571
 
572
        die_if_kernel("Integer overflow", regs);
573
 
574
        info.si_code = FPE_INTOVF;
575
        info.si_signo = SIGFPE;
576
        info.si_errno = 0;
577
        info.si_addr = (void __user *) regs->cp0_epc;
578
        force_sig_info(SIGFPE, &info, current);
579
}
580
 
581
/*
582
 * XXX Delayed fp exceptions when doing a lazy ctx switch XXX
583
 */
584
asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
585
{
586
        siginfo_t info;
587
 
588
        die_if_kernel("FP exception in kernel code", regs);
589
 
590
        if (fcr31 & FPU_CSR_UNI_X) {
591
                int sig;
592
 
593
                /*
594
                 * Unimplemented operation exception.  If we've got the full
595
                 * software emulator on-board, let's use it...
596
                 *
597
                 * Force FPU to dump state into task/thread context.  We're
598
                 * moving a lot of data here for what is probably a single
599
                 * instruction, but the alternative is to pre-decode the FP
600
                 * register operands before invoking the emulator, which seems
601
                 * a bit extreme for what should be an infrequent event.
602
                 */
603
                /* Ensure 'resume' not overwrite saved fp context again. */
604
                lose_fpu(1);
605
 
606
                /* Run the emulator */
607
                sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1);
608
 
609
                /*
610
                 * We can't allow the emulated instruction to leave any of
611
                 * the cause bit set in $fcr31.
612
                 */
613
                current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
614
 
615
                /* Restore the hardware register state */
616
                own_fpu(1);     /* Using the FPU again.  */
617
 
618
                /* If something went wrong, signal */
619
                if (sig)
620
                        force_sig(sig, current);
621
 
622
                return;
623
        } else if (fcr31 & FPU_CSR_INV_X)
624
                info.si_code = FPE_FLTINV;
625
        else if (fcr31 & FPU_CSR_DIV_X)
626
                info.si_code = FPE_FLTDIV;
627
        else if (fcr31 & FPU_CSR_OVF_X)
628
                info.si_code = FPE_FLTOVF;
629
        else if (fcr31 & FPU_CSR_UDF_X)
630
                info.si_code = FPE_FLTUND;
631
        else if (fcr31 & FPU_CSR_INE_X)
632
                info.si_code = FPE_FLTRES;
633
        else
634
                info.si_code = __SI_FAULT;
635
        info.si_signo = SIGFPE;
636
        info.si_errno = 0;
637
        info.si_addr = (void __user *) regs->cp0_epc;
638
        force_sig_info(SIGFPE, &info, current);
639
}
640
 
641
asmlinkage void do_bp(struct pt_regs *regs)
642
{
643
        unsigned int opcode, bcode;
644
        siginfo_t info;
645
 
646
        if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
647
                goto out_sigsegv;
648
 
649
        /*
650
         * There is the ancient bug in the MIPS assemblers that the break
651
         * code starts left to bit 16 instead to bit 6 in the opcode.
652
         * Gas is bug-compatible, but not always, grrr...
653
         * We handle both cases with a simple heuristics.  --macro
654
         */
655
        bcode = ((opcode >> 6) & ((1 << 20) - 1));
656
        if (bcode < (1 << 10))
657
                bcode <<= 10;
658
 
659
        /*
660
         * (A short test says that IRIX 5.3 sends SIGTRAP for all break
661
         * insns, even for break codes that indicate arithmetic failures.
662
         * Weird ...)
663
         * But should we continue the brokenness???  --macro
664
         */
665
        switch (bcode) {
666
        case BRK_OVERFLOW << 10:
667
        case BRK_DIVZERO << 10:
668
                die_if_kernel("Break instruction in kernel code", regs);
669
                if (bcode == (BRK_DIVZERO << 10))
670
                        info.si_code = FPE_INTDIV;
671
                else
672
                        info.si_code = FPE_INTOVF;
673
                info.si_signo = SIGFPE;
674
                info.si_errno = 0;
675
                info.si_addr = (void __user *) regs->cp0_epc;
676
                force_sig_info(SIGFPE, &info, current);
677
                break;
678
        case BRK_BUG:
679
                die("Kernel bug detected", regs);
680
                break;
681
        default:
682
                die_if_kernel("Break instruction in kernel code", regs);
683
                force_sig(SIGTRAP, current);
684
        }
685
        return;
686
 
687
out_sigsegv:
688
        force_sig(SIGSEGV, current);
689
}
690
 
691
asmlinkage void do_tr(struct pt_regs *regs)
692
{
693
        unsigned int opcode, tcode = 0;
694
        siginfo_t info;
695
 
696
        if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
697
                goto out_sigsegv;
698
 
699
        /* Immediate versions don't provide a code.  */
700
        if (!(opcode & OPCODE))
701
                tcode = ((opcode >> 6) & ((1 << 10) - 1));
702
 
703
        /*
704
         * (A short test says that IRIX 5.3 sends SIGTRAP for all trap
705
         * insns, even for trap codes that indicate arithmetic failures.
706
         * Weird ...)
707
         * But should we continue the brokenness???  --macro
708
         */
709
        switch (tcode) {
710
        case BRK_OVERFLOW:
711
        case BRK_DIVZERO:
712
                die_if_kernel("Trap instruction in kernel code", regs);
713
                if (tcode == BRK_DIVZERO)
714
                        info.si_code = FPE_INTDIV;
715
                else
716
                        info.si_code = FPE_INTOVF;
717
                info.si_signo = SIGFPE;
718
                info.si_errno = 0;
719
                info.si_addr = (void __user *) regs->cp0_epc;
720
                force_sig_info(SIGFPE, &info, current);
721
                break;
722
        case BRK_BUG:
723
                die("Kernel bug detected", regs);
724
                break;
725
        default:
726
                die_if_kernel("Trap instruction in kernel code", regs);
727
                force_sig(SIGTRAP, current);
728
        }
729
        return;
730
 
731
out_sigsegv:
732
        force_sig(SIGSEGV, current);
733
}
734
 
735
asmlinkage void do_ri(struct pt_regs *regs)
736
{
737
        unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
738
        unsigned long old_epc = regs->cp0_epc;
739
        unsigned int opcode = 0;
740
        int status = -1;
741
 
742
        die_if_kernel("Reserved instruction in kernel code", regs);
743
 
744
        if (unlikely(compute_return_epc(regs) < 0))
745
                return;
746
 
747
        if (unlikely(get_user(opcode, epc) < 0))
748
                status = SIGSEGV;
749
 
750
        if (!cpu_has_llsc && status < 0)
751
                status = simulate_llsc(regs, opcode);
752
 
753
        if (status < 0)
754
                status = simulate_rdhwr(regs, opcode);
755
 
756
        if (status < 0)
757
                status = simulate_sync(regs, opcode);
758
 
759
        if (status < 0)
760
                status = SIGILL;
761
 
762
        if (unlikely(status > 0)) {
763
                regs->cp0_epc = old_epc;                /* Undo skip-over.  */
764
                force_sig(status, current);
765
        }
766
}
767
 
768
/*
769
 * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've
770
 * emulated more than some threshold number of instructions, force migration to
771
 * a "CPU" that has FP support.
772
 */
773
static void mt_ase_fp_affinity(void)
774
{
775
#ifdef CONFIG_MIPS_MT_FPAFF
776
        if (mt_fpemul_threshold > 0 &&
777
             ((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
778
                /*
779
                 * If there's no FPU present, or if the application has already
780
                 * restricted the allowed set to exclude any CPUs with FPUs,
781
                 * we'll skip the procedure.
782
                 */
783
                if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) {
784
                        cpumask_t tmask;
785
 
786
                        cpus_and(tmask, current->thread.user_cpus_allowed,
787
                                 mt_fpu_cpumask);
788
                        set_cpus_allowed(current, tmask);
789
                        set_thread_flag(TIF_FPUBOUND);
790
                }
791
        }
792
#endif /* CONFIG_MIPS_MT_FPAFF */
793
}
794
 
795
asmlinkage void do_cpu(struct pt_regs *regs)
796
{
797
        unsigned int __user *epc;
798
        unsigned long old_epc;
799
        unsigned int opcode;
800
        unsigned int cpid;
801
        int status;
802
 
803
        die_if_kernel("do_cpu invoked from kernel context!", regs);
804
 
805
        cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
806
 
807
        switch (cpid) {
808
        case 0:
809
                epc = (unsigned int __user *)exception_epc(regs);
810
                old_epc = regs->cp0_epc;
811
                opcode = 0;
812
                status = -1;
813
 
814
                if (unlikely(compute_return_epc(regs) < 0))
815
                        return;
816
 
817
                if (unlikely(get_user(opcode, epc) < 0))
818
                        status = SIGSEGV;
819
 
820
                if (!cpu_has_llsc && status < 0)
821
                        status = simulate_llsc(regs, opcode);
822
 
823
                if (status < 0)
824
                        status = simulate_rdhwr(regs, opcode);
825
 
826
                if (status < 0)
827
                        status = SIGILL;
828
 
829
                if (unlikely(status > 0)) {
830
                        regs->cp0_epc = old_epc;        /* Undo skip-over.  */
831
                        force_sig(status, current);
832
                }
833
 
834
                return;
835
 
836
        case 1:
837
                if (used_math())        /* Using the FPU again.  */
838
                        own_fpu(1);
839
                else {                  /* First time FPU user.  */
840
                        init_fpu();
841
                        set_used_math();
842
                }
843
 
844
                if (!raw_cpu_has_fpu) {
845
                        int sig;
846
                        sig = fpu_emulator_cop1Handler(regs,
847
                                                &current->thread.fpu, 0);
848
                        if (sig)
849
                                force_sig(sig, current);
850
                        else
851
                                mt_ase_fp_affinity();
852
                }
853
 
854
                return;
855
 
856
        case 2:
857
        case 3:
858
                break;
859
        }
860
 
861
        force_sig(SIGILL, current);
862
}
863
 
864
asmlinkage void do_mdmx(struct pt_regs *regs)
865
{
866
        force_sig(SIGILL, current);
867
}
868
 
869
asmlinkage void do_watch(struct pt_regs *regs)
870
{
871
        if (board_watchpoint_handler) {
872
                (*board_watchpoint_handler)(regs);
873
                return;
874
        }
875
 
876
        /*
877
         * We use the watch exception where available to detect stack
878
         * overflows.
879
         */
880
        dump_tlb_all();
881
        show_regs(regs);
882
        panic("Caught WATCH exception - probably caused by stack overflow.");
883
}
884
 
885
asmlinkage void do_mcheck(struct pt_regs *regs)
886
{
887
        const int field = 2 * sizeof(unsigned long);
888
        int multi_match = regs->cp0_status & ST0_TS;
889
 
890
        show_regs(regs);
891
 
892
        if (multi_match) {
893
                printk("Index   : %0x\n", read_c0_index());
894
                printk("Pagemask: %0x\n", read_c0_pagemask());
895
                printk("EntryHi : %0*lx\n", field, read_c0_entryhi());
896
                printk("EntryLo0: %0*lx\n", field, read_c0_entrylo0());
897
                printk("EntryLo1: %0*lx\n", field, read_c0_entrylo1());
898
                printk("\n");
899
                dump_tlb_all();
900
        }
901
 
902
        show_code((unsigned int __user *) regs->cp0_epc);
903
 
904
        /*
905
         * Some chips may have other causes of machine check (e.g. SB1
906
         * graduation timer)
907
         */
908
        panic("Caught Machine Check exception - %scaused by multiple "
909
              "matching entries in the TLB.",
910
              (multi_match) ? "" : "not ");
911
}
912
 
913
asmlinkage void do_mt(struct pt_regs *regs)
914
{
915
        int subcode;
916
 
917
        subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
918
                        >> VPECONTROL_EXCPT_SHIFT;
919
        switch (subcode) {
920
        case 0:
921
                printk(KERN_DEBUG "Thread Underflow\n");
922
                break;
923
        case 1:
924
                printk(KERN_DEBUG "Thread Overflow\n");
925
                break;
926
        case 2:
927
                printk(KERN_DEBUG "Invalid YIELD Qualifier\n");
928
                break;
929
        case 3:
930
                printk(KERN_DEBUG "Gating Storage Exception\n");
931
                break;
932
        case 4:
933
                printk(KERN_DEBUG "YIELD Scheduler Exception\n");
934
                break;
935
        case 5:
936
                printk(KERN_DEBUG "Gating Storage Schedulier Exception\n");
937
                break;
938
        default:
939
                printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
940
                        subcode);
941
                break;
942
        }
943
        die_if_kernel("MIPS MT Thread exception in kernel", regs);
944
 
945
        force_sig(SIGILL, current);
946
}
947
 
948
 
949
asmlinkage void do_dsp(struct pt_regs *regs)
950
{
951
        if (cpu_has_dsp)
952
                panic("Unexpected DSP exception\n");
953
 
954
        force_sig(SIGILL, current);
955
}
956
 
957
asmlinkage void do_reserved(struct pt_regs *regs)
958
{
959
        /*
960
         * Game over - no way to handle this if it ever occurs.  Most probably
961
         * caused by a new unknown cpu type or after another deadly
962
         * hard/software error.
963
         */
964
        show_regs(regs);
965
        panic("Caught reserved exception %ld - should not happen.",
966
              (regs->cp0_cause & 0x7f) >> 2);
967
}
968
 
969
/*
970
 * Some MIPS CPUs can enable/disable for cache parity detection, but do
971
 * it different ways.
972
 */
973
static inline void parity_protection_init(void)
974
{
975
        switch (current_cpu_type()) {
976
        case CPU_24K:
977
        case CPU_34K:
978
        case CPU_5KC:
979
                write_c0_ecc(0x80000000);
980
                back_to_back_c0_hazard();
981
                /* Set the PE bit (bit 31) in the c0_errctl register. */
982
                printk(KERN_INFO "Cache parity protection %sabled\n",
983
                       (read_c0_ecc() & 0x80000000) ? "en" : "dis");
984
                break;
985
        case CPU_20KC:
986
        case CPU_25KF:
987
                /* Clear the DE bit (bit 16) in the c0_status register. */
988
                printk(KERN_INFO "Enable cache parity protection for "
989
                       "MIPS 20KC/25KF CPUs.\n");
990
                clear_c0_status(ST0_DE);
991
                break;
992
        default:
993
                break;
994
        }
995
}
996
 
997
asmlinkage void cache_parity_error(void)
998
{
999
        const int field = 2 * sizeof(unsigned long);
1000
        unsigned int reg_val;
1001
 
1002
        /* For the moment, report the problem and hang. */
1003
        printk("Cache error exception:\n");
1004
        printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1005
        reg_val = read_c0_cacheerr();
1006
        printk("c0_cacheerr == %08x\n", reg_val);
1007
 
1008
        printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1009
               reg_val & (1<<30) ? "secondary" : "primary",
1010
               reg_val & (1<<31) ? "data" : "insn");
1011
        printk("Error bits: %s%s%s%s%s%s%s\n",
1012
               reg_val & (1<<29) ? "ED " : "",
1013
               reg_val & (1<<28) ? "ET " : "",
1014
               reg_val & (1<<26) ? "EE " : "",
1015
               reg_val & (1<<25) ? "EB " : "",
1016
               reg_val & (1<<24) ? "EI " : "",
1017
               reg_val & (1<<23) ? "E1 " : "",
1018
               reg_val & (1<<22) ? "E0 " : "");
1019
        printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
1020
 
1021
#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
1022
        if (reg_val & (1<<22))
1023
                printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
1024
 
1025
        if (reg_val & (1<<23))
1026
                printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
1027
#endif
1028
 
1029
        panic("Can't handle the cache error!");
1030
}
1031
 
1032
/*
1033
 * SDBBP EJTAG debug exception handler.
1034
 * We skip the instruction and return to the next instruction.
1035
 */
1036
void ejtag_exception_handler(struct pt_regs *regs)
1037
{
1038
        const int field = 2 * sizeof(unsigned long);
1039
        unsigned long depc, old_epc;
1040
        unsigned int debug;
1041
 
1042
        printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
1043
        depc = read_c0_depc();
1044
        debug = read_c0_debug();
1045
        printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
1046
        if (debug & 0x80000000) {
1047
                /*
1048
                 * In branch delay slot.
1049
                 * We cheat a little bit here and use EPC to calculate the
1050
                 * debug return address (DEPC). EPC is restored after the
1051
                 * calculation.
1052
                 */
1053
                old_epc = regs->cp0_epc;
1054
                regs->cp0_epc = depc;
1055
                __compute_return_epc(regs);
1056
                depc = regs->cp0_epc;
1057
                regs->cp0_epc = old_epc;
1058
        } else
1059
                depc += 4;
1060
        write_c0_depc(depc);
1061
 
1062
#if 0
1063
        printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n");
1064
        write_c0_debug(debug | 0x100);
1065
#endif
1066
}
1067
 
1068
/*
1069
 * NMI exception handler.
1070
 */
1071
NORET_TYPE void ATTRIB_NORET nmi_exception_handler(struct pt_regs *regs)
1072
{
1073
        bust_spinlocks(1);
1074
        printk("NMI taken!!!!\n");
1075
        die("NMI", regs);
1076
}
1077
 
1078
#define VECTORSPACING 0x100     /* for EI/VI mode */
1079
 
1080
unsigned long ebase;
1081
unsigned long exception_handlers[32];
1082
unsigned long vi_handlers[64];
1083
 
1084
/*
1085
 * As a side effect of the way this is implemented we're limited
1086
 * to interrupt handlers in the address range from
1087
 * KSEG0 <= x < KSEG0 + 256mb on the Nevada.  Oh well ...
1088
 */
1089
void *set_except_vector(int n, void *addr)
1090
{
1091
        unsigned long handler = (unsigned long) addr;
1092
        unsigned long old_handler = exception_handlers[n];
1093
 
1094
        exception_handlers[n] = handler;
1095
        if (n == 0 && cpu_has_divec) {
1096
                *(u32 *)(ebase + 0x200) = 0x08000000 |
1097
                                          (0x03ffffff & (handler >> 2));
1098
                flush_icache_range(ebase + 0x200, ebase + 0x204);
1099
        }
1100
        return (void *)old_handler;
1101
}
1102
 
1103
static asmlinkage void do_default_vi(void)
1104
{
1105
        show_regs(get_irq_regs());
1106
        panic("Caught unexpected vectored interrupt.");
1107
}
1108
 
1109
static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1110
{
1111
        unsigned long handler;
1112
        unsigned long old_handler = vi_handlers[n];
1113
        int srssets = current_cpu_data.srsets;
1114
        u32 *w;
1115
        unsigned char *b;
1116
 
1117
        if (!cpu_has_veic && !cpu_has_vint)
1118
                BUG();
1119
 
1120
        if (addr == NULL) {
1121
                handler = (unsigned long) do_default_vi;
1122
                srs = 0;
1123
        } else
1124
                handler = (unsigned long) addr;
1125
        vi_handlers[n] = (unsigned long) addr;
1126
 
1127
        b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
1128
 
1129
        if (srs >= srssets)
1130
                panic("Shadow register set %d not supported", srs);
1131
 
1132
        if (cpu_has_veic) {
1133
                if (board_bind_eic_interrupt)
1134
                        board_bind_eic_interrupt(n, srs);
1135
        } else if (cpu_has_vint) {
1136
                /* SRSMap is only defined if shadow sets are implemented */
1137
                if (srssets > 1)
1138
                        change_c0_srsmap(0xf << n*4, srs << n*4);
1139
        }
1140
 
1141
        if (srs == 0) {
1142
                /*
1143
                 * If no shadow set is selected then use the default handler
1144
                 * that does normal register saving and a standard interrupt exit
1145
                 */
1146
 
1147
                extern char except_vec_vi, except_vec_vi_lui;
1148
                extern char except_vec_vi_ori, except_vec_vi_end;
1149
#ifdef CONFIG_MIPS_MT_SMTC
1150
                /*
1151
                 * We need to provide the SMTC vectored interrupt handler
1152
                 * not only with the address of the handler, but with the
1153
                 * Status.IM bit to be masked before going there.
1154
                 */
1155
                extern char except_vec_vi_mori;
1156
                const int mori_offset = &except_vec_vi_mori - &except_vec_vi;
1157
#endif /* CONFIG_MIPS_MT_SMTC */
1158
                const int handler_len = &except_vec_vi_end - &except_vec_vi;
1159
                const int lui_offset = &except_vec_vi_lui - &except_vec_vi;
1160
                const int ori_offset = &except_vec_vi_ori - &except_vec_vi;
1161
 
1162
                if (handler_len > VECTORSPACING) {
1163
                        /*
1164
                         * Sigh... panicing won't help as the console
1165
                         * is probably not configured :(
1166
                         */
1167
                        panic("VECTORSPACING too small");
1168
                }
1169
 
1170
                memcpy(b, &except_vec_vi, handler_len);
1171
#ifdef CONFIG_MIPS_MT_SMTC
1172
                BUG_ON(n > 7);  /* Vector index %d exceeds SMTC maximum. */
1173
 
1174
                w = (u32 *)(b + mori_offset);
1175
                *w = (*w & 0xffff0000) | (0x100 << n);
1176
#endif /* CONFIG_MIPS_MT_SMTC */
1177
                w = (u32 *)(b + lui_offset);
1178
                *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff);
1179
                w = (u32 *)(b + ori_offset);
1180
                *w = (*w & 0xffff0000) | ((u32)handler & 0xffff);
1181
                flush_icache_range((unsigned long)b, (unsigned long)(b+handler_len));
1182
        }
1183
        else {
1184
                /*
1185
                 * In other cases jump directly to the interrupt handler
1186
                 *
1187
                 * It is the handlers responsibility to save registers if required
1188
                 * (eg hi/lo) and return from the exception using "eret"
1189
                 */
1190
                w = (u32 *)b;
1191
                *w++ = 0x08000000 | (((u32)handler >> 2) & 0x03fffff); /* j handler */
1192
                *w = 0;
1193
                flush_icache_range((unsigned long)b, (unsigned long)(b+8));
1194
        }
1195
 
1196
        return (void *)old_handler;
1197
}
1198
 
1199
void *set_vi_handler(int n, vi_handler_t addr)
1200
{
1201
        return set_vi_srs_handler(n, addr, 0);
1202
}
1203
 
1204
/*
1205
 * This is used by native signal handling
1206
 */
1207
asmlinkage int (*save_fp_context)(struct sigcontext __user *sc);
1208
asmlinkage int (*restore_fp_context)(struct sigcontext __user *sc);
1209
 
1210
extern asmlinkage int _save_fp_context(struct sigcontext __user *sc);
1211
extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc);
1212
 
1213
extern asmlinkage int fpu_emulator_save_context(struct sigcontext __user *sc);
1214
extern asmlinkage int fpu_emulator_restore_context(struct sigcontext __user *sc);
1215
 
1216
#ifdef CONFIG_SMP
1217
static int smp_save_fp_context(struct sigcontext __user *sc)
1218
{
1219
        return raw_cpu_has_fpu
1220
               ? _save_fp_context(sc)
1221
               : fpu_emulator_save_context(sc);
1222
}
1223
 
1224
static int smp_restore_fp_context(struct sigcontext __user *sc)
1225
{
1226
        return raw_cpu_has_fpu
1227
               ? _restore_fp_context(sc)
1228
               : fpu_emulator_restore_context(sc);
1229
}
1230
#endif
1231
 
1232
static inline void signal_init(void)
1233
{
1234
#ifdef CONFIG_SMP
1235
        /* For now just do the cpu_has_fpu check when the functions are invoked */
1236
        save_fp_context = smp_save_fp_context;
1237
        restore_fp_context = smp_restore_fp_context;
1238
#else
1239
        if (cpu_has_fpu) {
1240
                save_fp_context = _save_fp_context;
1241
                restore_fp_context = _restore_fp_context;
1242
        } else {
1243
                save_fp_context = fpu_emulator_save_context;
1244
                restore_fp_context = fpu_emulator_restore_context;
1245
        }
1246
#endif
1247
}
1248
 
1249
#ifdef CONFIG_MIPS32_COMPAT
1250
 
1251
/*
1252
 * This is used by 32-bit signal stuff on the 64-bit kernel
1253
 */
1254
asmlinkage int (*save_fp_context32)(struct sigcontext32 __user *sc);
1255
asmlinkage int (*restore_fp_context32)(struct sigcontext32 __user *sc);
1256
 
1257
extern asmlinkage int _save_fp_context32(struct sigcontext32 __user *sc);
1258
extern asmlinkage int _restore_fp_context32(struct sigcontext32 __user *sc);
1259
 
1260
extern asmlinkage int fpu_emulator_save_context32(struct sigcontext32 __user *sc);
1261
extern asmlinkage int fpu_emulator_restore_context32(struct sigcontext32 __user *sc);
1262
 
1263
static inline void signal32_init(void)
1264
{
1265
        if (cpu_has_fpu) {
1266
                save_fp_context32 = _save_fp_context32;
1267
                restore_fp_context32 = _restore_fp_context32;
1268
        } else {
1269
                save_fp_context32 = fpu_emulator_save_context32;
1270
                restore_fp_context32 = fpu_emulator_restore_context32;
1271
        }
1272
}
1273
#endif
1274
 
1275
extern void cpu_cache_init(void);
1276
extern void tlb_init(void);
1277
extern void flush_tlb_handlers(void);
1278
 
1279
/*
1280
 * Timer interrupt
1281
 */
1282
int cp0_compare_irq;
1283
 
1284
/*
1285
 * Performance counter IRQ or -1 if shared with timer
1286
 */
1287
int cp0_perfcount_irq;
1288
EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
1289
 
1290
void __init per_cpu_trap_init(void)
1291
{
1292
        unsigned int cpu = smp_processor_id();
1293
        unsigned int status_set = ST0_CU0;
1294
#ifdef CONFIG_MIPS_MT_SMTC
1295
        int secondaryTC = 0;
1296
        int bootTC = (cpu == 0);
1297
 
1298
        /*
1299
         * Only do per_cpu_trap_init() for first TC of Each VPE.
1300
         * Note that this hack assumes that the SMTC init code
1301
         * assigns TCs consecutively and in ascending order.
1302
         */
1303
 
1304
        if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
1305
            ((read_c0_tcbind() & TCBIND_CURVPE) == cpu_data[cpu - 1].vpe_id))
1306
                secondaryTC = 1;
1307
#endif /* CONFIG_MIPS_MT_SMTC */
1308
 
1309
        /*
1310
         * Disable coprocessors and select 32-bit or 64-bit addressing
1311
         * and the 16/32 or 32/32 FPR register model.  Reset the BEV
1312
         * flag that some firmware may have left set and the TS bit (for
1313
         * IP27).  Set XX for ISA IV code to work.
1314
         */
1315
#ifdef CONFIG_64BIT
1316
        status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
1317
#endif
1318
        if (current_cpu_data.isa_level == MIPS_CPU_ISA_IV)
1319
                status_set |= ST0_XX;
1320
        if (cpu_has_dsp)
1321
                status_set |= ST0_MX;
1322
 
1323
        change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
1324
                         status_set);
1325
 
1326
#ifdef CONFIG_CPU_MIPSR2
1327
        if (cpu_has_mips_r2) {
1328
                unsigned int enable = 0x0000000f;
1329
 
1330
                if (cpu_has_userlocal)
1331
                        enable |= (1 << 29);
1332
 
1333
                write_c0_hwrena(enable);
1334
        }
1335
#endif
1336
 
1337
#ifdef CONFIG_MIPS_MT_SMTC
1338
        if (!secondaryTC) {
1339
#endif /* CONFIG_MIPS_MT_SMTC */
1340
 
1341
        if (cpu_has_veic || cpu_has_vint) {
1342
                write_c0_ebase(ebase);
1343
                /* Setting vector spacing enables EI/VI mode  */
1344
                change_c0_intctl(0x3e0, VECTORSPACING);
1345
        }
1346
        if (cpu_has_divec) {
1347
                if (cpu_has_mipsmt) {
1348
                        unsigned int vpflags = dvpe();
1349
                        set_c0_cause(CAUSEF_IV);
1350
                        evpe(vpflags);
1351
                } else
1352
                        set_c0_cause(CAUSEF_IV);
1353
        }
1354
 
1355
        /*
1356
         * Before R2 both interrupt numbers were fixed to 7, so on R2 only:
1357
         *
1358
         *  o read IntCtl.IPTI to determine the timer interrupt
1359
         *  o read IntCtl.IPPCI to determine the performance counter interrupt
1360
         */
1361
        if (cpu_has_mips_r2) {
1362
                cp0_compare_irq = (read_c0_intctl() >> 29) & 7;
1363
                cp0_perfcount_irq = (read_c0_intctl() >> 26) & 7;
1364
                if (cp0_perfcount_irq == cp0_compare_irq)
1365
                        cp0_perfcount_irq = -1;
1366
        } else {
1367
                cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
1368
                cp0_perfcount_irq = -1;
1369
        }
1370
 
1371
#ifdef CONFIG_MIPS_MT_SMTC
1372
        }
1373
#endif /* CONFIG_MIPS_MT_SMTC */
1374
 
1375
        cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
1376
        TLBMISS_HANDLER_SETUP();
1377
 
1378
        atomic_inc(&init_mm.mm_count);
1379
        current->active_mm = &init_mm;
1380
        BUG_ON(current->mm);
1381
        enter_lazy_tlb(&init_mm, current);
1382
 
1383
#ifdef CONFIG_MIPS_MT_SMTC
1384
        if (bootTC) {
1385
#endif /* CONFIG_MIPS_MT_SMTC */
1386
                cpu_cache_init();
1387
                tlb_init();
1388
#ifdef CONFIG_MIPS_MT_SMTC
1389
        } else if (!secondaryTC) {
1390
                /*
1391
                 * First TC in non-boot VPE must do subset of tlb_init()
1392
                 * for MMU countrol registers.
1393
                 */
1394
                write_c0_pagemask(PM_DEFAULT_MASK);
1395
                write_c0_wired(0);
1396
        }
1397
#endif /* CONFIG_MIPS_MT_SMTC */
1398
}
1399
 
1400
/* Install CPU exception handler */
1401
void __init set_handler(unsigned long offset, void *addr, unsigned long size)
1402
{
1403
        memcpy((void *)(ebase + offset), addr, size);
1404
        flush_icache_range(ebase + offset, ebase + offset + size);
1405
}
1406
 
1407
static char panic_null_cerr[] __initdata =
1408
        "Trying to set NULL cache error exception handler";
1409
 
1410
/* Install uncached CPU exception handler */
1411
void __init set_uncached_handler(unsigned long offset, void *addr, unsigned long size)
1412
{
1413
#ifdef CONFIG_32BIT
1414
        unsigned long uncached_ebase = KSEG1ADDR(ebase);
1415
#endif
1416
#ifdef CONFIG_64BIT
1417
        unsigned long uncached_ebase = TO_UNCAC(ebase);
1418
#endif
1419
 
1420
        if (!addr)
1421
                panic(panic_null_cerr);
1422
 
1423
        memcpy((void *)(uncached_ebase + offset), addr, size);
1424
}
1425
 
1426
static int __initdata rdhwr_noopt;
1427
static int __init set_rdhwr_noopt(char *str)
1428
{
1429
        rdhwr_noopt = 1;
1430
        return 1;
1431
}
1432
 
1433
__setup("rdhwr_noopt", set_rdhwr_noopt);
1434
 
1435
void __init trap_init(void)
1436
{
1437
        extern char except_vec3_generic, except_vec3_r4000;
1438
        extern char except_vec4;
1439
        unsigned long i;
1440
 
1441
        if (cpu_has_veic || cpu_has_vint)
1442
                ebase = (unsigned long) alloc_bootmem_low_pages(0x200 + VECTORSPACING*64);
1443
        else
1444
                ebase = CAC_BASE;
1445
 
1446
        per_cpu_trap_init();
1447
 
1448
        /*
1449
         * Copy the generic exception handlers to their final destination.
1450
         * This will be overriden later as suitable for a particular
1451
         * configuration.
1452
         */
1453
        set_handler(0x180, &except_vec3_generic, 0x80);
1454
 
1455
        /*
1456
         * Setup default vectors
1457
         */
1458
        for (i = 0; i <= 31; i++)
1459
                set_except_vector(i, handle_reserved);
1460
 
1461
        /*
1462
         * Copy the EJTAG debug exception vector handler code to it's final
1463
         * destination.
1464
         */
1465
        if (cpu_has_ejtag && board_ejtag_handler_setup)
1466
                board_ejtag_handler_setup();
1467
 
1468
        /*
1469
         * Only some CPUs have the watch exceptions.
1470
         */
1471
        if (cpu_has_watch)
1472
                set_except_vector(23, handle_watch);
1473
 
1474
        /*
1475
         * Initialise interrupt handlers
1476
         */
1477
        if (cpu_has_veic || cpu_has_vint) {
1478
                int nvec = cpu_has_veic ? 64 : 8;
1479
                for (i = 0; i < nvec; i++)
1480
                        set_vi_handler(i, NULL);
1481
        }
1482
        else if (cpu_has_divec)
1483
                set_handler(0x200, &except_vec4, 0x8);
1484
 
1485
        /*
1486
         * Some CPUs can enable/disable for cache parity detection, but does
1487
         * it different ways.
1488
         */
1489
        parity_protection_init();
1490
 
1491
        /*
1492
         * The Data Bus Errors / Instruction Bus Errors are signaled
1493
         * by external hardware.  Therefore these two exceptions
1494
         * may have board specific handlers.
1495
         */
1496
        if (board_be_init)
1497
                board_be_init();
1498
 
1499
        set_except_vector(0, handle_int);
1500
        set_except_vector(1, handle_tlbm);
1501
        set_except_vector(2, handle_tlbl);
1502
        set_except_vector(3, handle_tlbs);
1503
 
1504
        set_except_vector(4, handle_adel);
1505
        set_except_vector(5, handle_ades);
1506
 
1507
        set_except_vector(6, handle_ibe);
1508
        set_except_vector(7, handle_dbe);
1509
 
1510
        set_except_vector(8, handle_sys);
1511
        set_except_vector(9, handle_bp);
1512
        set_except_vector(10, rdhwr_noopt ? handle_ri :
1513
                          (cpu_has_vtag_icache ?
1514
                           handle_ri_rdhwr_vivt : handle_ri_rdhwr));
1515
        set_except_vector(11, handle_cpu);
1516
        set_except_vector(12, handle_ov);
1517
        set_except_vector(13, handle_tr);
1518
 
1519
        if (current_cpu_type() == CPU_R6000 ||
1520
            current_cpu_type() == CPU_R6000A) {
1521
                /*
1522
                 * The R6000 is the only R-series CPU that features a machine
1523
                 * check exception (similar to the R4000 cache error) and
1524
                 * unaligned ldc1/sdc1 exception.  The handlers have not been
1525
                 * written yet.  Well, anyway there is no R6000 machine on the
1526
                 * current list of targets for Linux/MIPS.
1527
                 * (Duh, crap, there is someone with a triple R6k machine)
1528
                 */
1529
                //set_except_vector(14, handle_mc);
1530
                //set_except_vector(15, handle_ndc);
1531
        }
1532
 
1533
 
1534
        if (board_nmi_handler_setup)
1535
                board_nmi_handler_setup();
1536
 
1537
        if (cpu_has_fpu && !cpu_has_nofpuex)
1538
                set_except_vector(15, handle_fpe);
1539
 
1540
        set_except_vector(22, handle_mdmx);
1541
 
1542
        if (cpu_has_mcheck)
1543
                set_except_vector(24, handle_mcheck);
1544
 
1545
        if (cpu_has_mipsmt)
1546
                set_except_vector(25, handle_mt);
1547
 
1548
        set_except_vector(26, handle_dsp);
1549
 
1550
        if (cpu_has_vce)
1551
                /* Special exception: R4[04]00 uses also the divec space. */
1552
                memcpy((void *)(CAC_BASE + 0x180), &except_vec3_r4000, 0x100);
1553
        else if (cpu_has_4kex)
1554
                memcpy((void *)(CAC_BASE + 0x180), &except_vec3_generic, 0x80);
1555
        else
1556
                memcpy((void *)(CAC_BASE + 0x080), &except_vec3_generic, 0x80);
1557
 
1558
        signal_init();
1559
#ifdef CONFIG_MIPS32_COMPAT
1560
        signal32_init();
1561
#endif
1562
 
1563
        flush_icache_range(ebase, ebase + 0x400);
1564
        flush_tlb_handlers();
1565
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.