OpenCores
URL https://opencores.org/ocsvn/or1k_old/or1k_old/trunk

Subversion Repositories or1k_old

[/] [or1k_old/] [trunk/] [rc203soc/] [sw/] [uClinux/] [arch/] [i386/] [kernel/] [traps.c] - Blame information for rev 1782

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1623 jcastillo
/*
2
 *  linux/arch/i386/traps.c
3
 *
4
 *  Copyright (C) 1991, 1992  Linus Torvalds
5
 */
6
 
7
/*
8
 * 'Traps.c' handles hardware traps and faults after we have saved some
9
 * state in 'asm.s'. Currently mostly a debugging-aid, will be extended
10
 * to mainly kill the offending process (probably by giving it a signal,
11
 * but possibly by killing it outright if necessary).
12
 */
13
#include <linux/config.h>
14
#include <linux/head.h>
15
#include <linux/sched.h>
16
#include <linux/kernel.h>
17
#include <linux/string.h>
18
#include <linux/errno.h>
19
#include <linux/ptrace.h>
20
#include <linux/config.h>
21
#include <linux/timer.h>
22
#include <linux/mm.h>
23
 
24
#include <asm/system.h>
25
#include <asm/segment.h>
26
#include <asm/io.h>
27
#include <asm/pgtable.h>
28
 
29
asmlinkage int system_call(void);
30
asmlinkage void lcall7(void);
31
struct desc_struct default_ldt = { 0, 0 };
32
 
33
static inline void console_verbose(void)
34
{
35
        extern int console_loglevel;
36
        console_loglevel = 15;
37
}
38
 
39
#define DO_ERROR(trapnr, signr, str, name, tsk) \
40
asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
41
{ \
42
        tsk->tss.error_code = error_code; \
43
        tsk->tss.trap_no = trapnr; \
44
        force_sig(signr, tsk); \
45
        die_if_kernel(str,regs,error_code); \
46
}
47
 
48
#define DO_VM86_ERROR(trapnr, signr, str, name, tsk) \
49
asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
50
{ \
51
        if (regs->eflags & VM_MASK) { \
52
                if (!handle_vm86_trap((struct vm86_regs *) regs, error_code, trapnr)) \
53
                        return; \
54
                /* else fall through */ \
55
        } \
56
        tsk->tss.error_code = error_code; \
57
        tsk->tss.trap_no = trapnr; \
58
        force_sig(signr, tsk); \
59
        die_if_kernel(str,regs,error_code); \
60
}
61
 
62
#define get_seg_byte(seg,addr) ({ \
63
register unsigned char __res; \
64
__asm__("push %%fs;mov %%ax,%%fs;movb %%fs:%2,%%al;pop %%fs" \
65
        :"=a" (__res):"0" (seg),"m" (*(addr))); \
66
__res;})
67
 
68
#define get_seg_long(seg,addr) ({ \
69
register unsigned long __res; \
70
__asm__("push %%fs;mov %%ax,%%fs;movl %%fs:%2,%%eax;pop %%fs" \
71
        :"=a" (__res):"0" (seg),"m" (*(addr))); \
72
__res;})
73
 
74
#define _fs() ({ \
75
register unsigned short __res; \
76
__asm__("mov %%fs,%%ax":"=a" (__res):); \
77
__res;})
78
 
79
void page_exception(void);
80
 
81
asmlinkage void divide_error(void);
82
asmlinkage void debug(void);
83
asmlinkage void nmi(void);
84
asmlinkage void int3(void);
85
asmlinkage void overflow(void);
86
asmlinkage void bounds(void);
87
asmlinkage void invalid_op(void);
88
asmlinkage void device_not_available(void);
89
asmlinkage void double_fault(void);
90
asmlinkage void coprocessor_segment_overrun(void);
91
asmlinkage void invalid_TSS(void);
92
asmlinkage void segment_not_present(void);
93
asmlinkage void stack_segment(void);
94
asmlinkage void general_protection(void);
95
asmlinkage void page_fault(void);
96
asmlinkage void coprocessor_error(void);
97
asmlinkage void reserved(void);
98
asmlinkage void alignment_check(void);
99
asmlinkage void spurious_interrupt_bug(void);
100
 
101
int kstack_depth_to_print = 24;
102
 
103
/*
104
 * These constants are for searching for possible module text
105
 * segments.  VMALLOC_OFFSET comes from mm/vmalloc.c; MODULE_RANGE is
106
 * a guess of how much space is likely to be vmalloced.
107
 */
108
#define VMALLOC_OFFSET (8*1024*1024)
109
#define MODULE_RANGE (8*1024*1024)
110
 
111
/*static*/ void die_if_kernel(const char * str, struct pt_regs * regs, long err)
112
{
113
        int i;
114
        unsigned long esp;
115
        unsigned short ss;
116
        unsigned long *stack, addr, module_start, module_end;
117
        extern char start_kernel, _etext;
118
 
119
        esp = (unsigned long) &regs->esp;
120
        ss = KERNEL_DS;
121
        if ((regs->eflags & VM_MASK) || (3 & regs->cs) == 3)
122
                return;
123
        if (regs->cs & 3) {
124
                esp = regs->esp;
125
                ss = regs->ss;
126
        }
127
        console_verbose();
128
        printk("%s: %04lx\n", str, err & 0xffff);
129
        printk("CPU:    %d\n", smp_processor_id());
130
        printk("EIP:    %04x:[<%08lx>]\nEFLAGS: %08lx\n", 0xffff & regs->cs,regs->eip,regs->eflags);
131
        printk("eax: %08lx   ebx: %08lx   ecx: %08lx   edx: %08lx\n",
132
                regs->eax, regs->ebx, regs->ecx, regs->edx);
133
        printk("esi: %08lx   edi: %08lx   ebp: %08lx   esp: %08lx\n",
134
                regs->esi, regs->edi, regs->ebp, esp);
135
        printk("ds: %04x   es: %04x   fs: %04x   gs: %04x   ss: %04x\n",
136
                regs->ds, regs->es, regs->fs, regs->gs, ss);
137
        store_TR(i);
138
        if (STACK_MAGIC != *(unsigned long *)current->kernel_stack_page)
139
                printk("Corrupted stack page\n");
140
        printk("Process %s (pid: %d, process nr: %d, stackpage=%08lx)\nStack: ",
141
                current->comm, current->pid, 0xffff & i, current->kernel_stack_page);
142
        stack = (unsigned long *) esp;
143
        for(i=0; i < kstack_depth_to_print; i++) {
144
                if (((long) stack & 4095) == 0)
145
                        break;
146
                if (i && ((i % 8) == 0))
147
                        printk("\n       ");
148
                printk("%08lx ", get_seg_long(ss,stack++));
149
        }
150
        printk("\nCall Trace: ");
151
        stack = (unsigned long *) esp;
152
        i = 1;
153
        module_start = ((high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1));
154
        module_end = module_start + MODULE_RANGE;
155
        while (((long) stack & 4095) != 0) {
156
                addr = get_seg_long(ss, stack++);
157
                /*
158
                 * If the address is either in the text segment of the
159
                 * kernel, or in the region which contains vmalloc'ed
160
                 * memory, it *may* be the address of a calling
161
                 * routine; if so, print it so that someone tracing
162
                 * down the cause of the crash will be able to figure
163
                 * out the call path that was taken.
164
                 */
165
                if (((addr >= (unsigned long) &start_kernel) &&
166
                     (addr <= (unsigned long) &_etext)) ||
167
                    ((addr >= module_start) && (addr <= module_end))) {
168
                        if (i && ((i % 8) == 0))
169
                                printk("\n       ");
170
                        printk("[<%08lx>] ", addr);
171
                        i++;
172
                }
173
        }
174
        printk("\nCode: ");
175
        for(i=0;i<20;i++)
176
                printk("%02x ",0xff & get_seg_byte(regs->cs,(i+(char *)regs->eip)));
177
        printk("\n");
178
        do_exit(SIGSEGV);
179
}
180
 
181
DO_VM86_ERROR( 0, SIGFPE,  "divide error", divide_error, current)
182
DO_VM86_ERROR( 3, SIGTRAP, "int3", int3, current)
183
DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow, current)
184
DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds, current)
185
DO_ERROR( 6, SIGILL,  "invalid operand", invalid_op, current)
186
DO_VM86_ERROR( 7, SIGSEGV, "device not available", device_not_available, current)
187
DO_ERROR( 8, SIGSEGV, "double fault", double_fault, current)
188
DO_ERROR( 9, SIGFPE,  "coprocessor segment overrun", coprocessor_segment_overrun, last_task_used_math)
189
DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS, current)
190
DO_ERROR(11, SIGBUS,  "segment not present", segment_not_present, current)
191
DO_ERROR(12, SIGBUS,  "stack segment", stack_segment, current)
192
DO_ERROR(17, SIGSEGV, "alignment check", alignment_check, current)
193
DO_ERROR(18, SIGSEGV, "reserved", reserved, current)
194
 
195
/* divide_error is after ret_from_sys_call in entry.S */
196
asmlinkage void ret_from_sys_call(void) __asm__("ret_from_sys_call");
197
asmlinkage void divide_error(void)      __asm__("divide_error");
198
 
199
asmlinkage void do_general_protection(struct pt_regs * regs, long error_code)
200
{
201
        if (regs->eflags & VM_MASK) {
202
                handle_vm86_fault((struct vm86_regs *) regs, error_code);
203
                return;
204
        }
205
 
206
        /*
207
         * HACK HACK HACK  :)  Fixing the segment invalid on syscall return
208
         * barfage for 2.0 has been put into the too-hard basket but having
209
         * a user producing endless GPFs is unacceptable as well. - Paul G.
210
         */
211
        if ((regs->cs & 3) != 3) {
212
                if (regs->eip >= (unsigned long)ret_from_sys_call &&
213
                    regs->eip < (unsigned long)divide_error) {
214
                        static int moancount = 0;
215
                        if (moancount < 5) {
216
                                printk(KERN_INFO "Ignoring GPF attempt from program \"%s\" (pid %d).\n",
217
                                        current->comm, current->pid);
218
                                moancount++;
219
                        }
220
                        do_exit(SIGSEGV);
221
                }
222
                else
223
                        die_if_kernel("general protection",regs,error_code);
224
        }
225
        current->tss.error_code = error_code;
226
        current->tss.trap_no = 13;
227
        force_sig(SIGSEGV, current);
228
}
229
 
230
asmlinkage void do_nmi(struct pt_regs * regs, long error_code)
231
{
232
#ifdef CONFIG_SMP_NMI_INVAL
233
        smp_flush_tlb_rcv();
234
#else
235
#ifndef CONFIG_IGNORE_NMI
236
        printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n");
237
        printk("You probably have a hardware problem with your RAM chips or a\n");
238
        printk("power saving mode enabled.\n");
239
#endif  
240
#endif
241
}
242
 
243
asmlinkage void do_debug(struct pt_regs * regs, long error_code)
244
{
245
        if (regs->eflags & VM_MASK) {
246
                handle_vm86_trap((struct vm86_regs *) regs, error_code, 1);
247
                return;
248
        }
249
        force_sig(SIGTRAP, current);
250
        current->tss.trap_no = 1;
251
        current->tss.error_code = error_code;
252
        if ((regs->cs & 3) == 0) {
253
                /* If this is a kernel mode trap, then reset db7 and allow us to continue */
254
                __asm__("movl %0,%%db7"
255
                        : /* no output */
256
                        : "r" (0));
257
                return;
258
        }
259
        die_if_kernel("debug",regs,error_code);
260
}
261
 
262
/*
263
 * Note that we play around with the 'TS' bit to hopefully get
264
 * the correct behaviour even in the presence of the asynchronous
265
 * IRQ13 behaviour
266
 */
267
void math_error(void)
268
{
269
        struct task_struct * task;
270
 
271
        clts();
272
#ifdef __SMP__
273
        task = current;
274
#else
275
        task = last_task_used_math;
276
        last_task_used_math = NULL;
277
        if (!task) {
278
                __asm__("fnclex");
279
                return;
280
        }
281
#endif
282
        /*
283
         *      Save the info for the exception handler
284
         */
285
        __asm__ __volatile__("fnsave %0":"=m" (task->tss.i387.hard));
286
        task->flags&=~PF_USEDFPU;
287
        stts();
288
 
289
        force_sig(SIGFPE, task);
290
        task->tss.trap_no = 16;
291
        task->tss.error_code = 0;
292
}
293
 
294
asmlinkage void do_coprocessor_error(struct pt_regs * regs, long error_code)
295
{
296
        ignore_irq13 = 1;
297
        math_error();
298
}
299
 
300
asmlinkage void do_spurious_interrupt_bug(struct pt_regs * regs,
301
                                          long error_code)
302
{
303
#if 0
304
        /* No need to warn about this any longer. */
305
        printk("Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
306
#endif
307
}
308
 
309
/*
310
 *  'math_state_restore()' saves the current math information in the
311
 * old math state array, and gets the new ones from the current task
312
 *
313
 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
314
 * Don't touch unless you *really* know how it works.
315
 */
316
asmlinkage void math_state_restore(void)
317
{
318
        __asm__ __volatile__("clts");           /* Allow maths ops (or we recurse) */
319
 
320
/*
321
 *      SMP is actually simpler than uniprocessor for once. Because
322
 *      we can't pull the delayed FPU switching trick Linus does
323
 *      we simply have to do the restore each context switch and
324
 *      set the flag. switch_to() will always save the state in
325
 *      case we swap processors. We also don't use the coprocessor
326
 *      timer - IRQ 13 mode isn't used with SMP machines (thank god).
327
 */
328
#ifndef __SMP__
329
        if (last_task_used_math == current)
330
                return;
331
        if (last_task_used_math)
332
                __asm__("fnsave %0":"=m" (last_task_used_math->tss.i387));
333
        else
334
                __asm__("fnclex");
335
        last_task_used_math = current;
336
#endif
337
 
338
        if(current->used_math)
339
                __asm__("frstor %0": :"m" (current->tss.i387));
340
        else
341
        {
342
                /*
343
                 *      Our first FPU usage, clean the chip.
344
                 */
345
                __asm__("fninit");
346
                current->used_math = 1;
347
        }
348
        current->flags|=PF_USEDFPU;             /* So we fnsave on switch_to() */
349
}
350
 
351
#ifndef CONFIG_MATH_EMULATION
352
 
353
asmlinkage void math_emulate(long arg)
354
{
355
  printk("math-emulation not enabled and no coprocessor found.\n");
356
  printk("killing %s.\n",current->comm);
357
  force_sig(SIGFPE,current);
358
  schedule();
359
}
360
 
361
#endif /* CONFIG_MATH_EMULATION */
362
 
363
struct {
364
        unsigned short limit;
365
        unsigned long addr __attribute__((packed));
366
} idt_descriptor;
367
 
368
void trap_init_f00f_bug(void)
369
{
370
        pgd_t * pgd;
371
        pmd_t * pmd;
372
        pte_t * pte;
373
        unsigned long page;
374
        unsigned long idtpage = (unsigned long)idt;
375
        struct desc_struct *alias_idt;
376
 
377
        printk("alias mapping IDT readonly ... ");
378
 
379
                /* just to get free address space */
380
        page = (unsigned long) vmalloc (PAGE_SIZE);
381
 
382
        alias_idt = (void *)(page + (idtpage & ~PAGE_MASK));
383
        idt_descriptor.limit = 256*8-1;
384
        idt_descriptor.addr = VMALLOC_VMADDR(alias_idt);
385
 
386
        /*
387
         * alias map the original idt to the alias page:
388
         */
389
        page = VMALLOC_VMADDR(page);
390
        pgd = pgd_offset(&init_mm, page);
391
        pmd = pmd_offset(pgd, page);
392
        pte = pte_offset(pmd, page);
393
                /* give memory back to the pool, don't need it */
394
        free_page(pte_page(*pte));
395
                /* ... and set the readonly alias */
396
        set_pte(pte, mk_pte(idtpage  & PAGE_MASK, PAGE_KERNEL));
397
        *pte = pte_wrprotect(*pte);
398
        flush_tlb_all();
399
 
400
                /* now we have the mapping ok, we can do LIDT */
401
         __asm__ __volatile__("\tlidt %0": "=m" (idt_descriptor));
402
 
403
        printk(" ... done\n");
404
}
405
 
406
 
407
void trap_init(void)
408
{
409
        int i;
410
        struct desc_struct * p;
411
        static int smptrap=0;
412
 
413
        if(smptrap)
414
        {
415
                __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");
416
                load_ldt(0);
417
                return;
418
        }
419
        smptrap++;
420
        if (strncmp((char*)0x0FFFD9, "EISA", 4) == 0)
421
                EISA_bus = 1;
422
        set_call_gate(&default_ldt,lcall7);
423
        set_trap_gate(0,&divide_error);
424
        set_trap_gate(1,&debug);
425
        set_trap_gate(2,&nmi);
426
        set_system_gate(3,&int3);       /* int3-5 can be called from all */
427
        set_system_gate(4,&overflow);
428
        set_system_gate(5,&bounds);
429
        set_trap_gate(6,&invalid_op);
430
        set_trap_gate(7,&device_not_available);
431
        set_trap_gate(8,&double_fault);
432
        set_trap_gate(9,&coprocessor_segment_overrun);
433
        set_trap_gate(10,&invalid_TSS);
434
        set_trap_gate(11,&segment_not_present);
435
        set_trap_gate(12,&stack_segment);
436
        set_trap_gate(13,&general_protection);
437
        set_trap_gate(14,&page_fault);
438
        set_trap_gate(15,&spurious_interrupt_bug);
439
        set_trap_gate(16,&coprocessor_error);
440
        set_trap_gate(17,&alignment_check);
441
        for (i=18;i<48;i++)
442
                set_trap_gate(i,&reserved);
443
        set_system_gate(0x80,&system_call);
444
/* set up GDT task & ldt entries */
445
        p = gdt+FIRST_TSS_ENTRY;
446
        set_tss_desc(p, &init_task.tss);
447
        p++;
448
        set_ldt_desc(p, &default_ldt, 1);
449
        p++;
450
        for(i=1 ; i<NR_TASKS ; i++) {
451
                p->a=p->b=0;
452
                p++;
453
                p->a=p->b=0;
454
                p++;
455
        }
456
/* Clear NT, so that we won't have troubles with that later on */
457
        __asm__("pushfl ; andl $0xffffbfff,(%esp) ; popfl");
458
        load_TR(0);
459
        load_ldt(0);
460
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.