1 |
63 |
marcus.erl |
/*
|
2 |
|
|
* arch/s390/kernel/traps.c
|
3 |
|
|
*
|
4 |
|
|
* S390 version
|
5 |
|
|
* Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
|
6 |
|
|
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
|
7 |
|
|
* Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
|
8 |
|
|
*
|
9 |
|
|
* Derived from "arch/i386/kernel/traps.c"
|
10 |
|
|
* Copyright (C) 1991, 1992 Linus Torvalds
|
11 |
|
|
*/
|
12 |
|
|
|
13 |
|
|
/*
|
14 |
|
|
* 'Traps.c' handles hardware traps and faults after we have saved some
|
15 |
|
|
* state in 'asm.s'.
|
16 |
|
|
*/
|
17 |
|
|
#include <linux/sched.h>
|
18 |
|
|
#include <linux/kernel.h>
|
19 |
|
|
#include <linux/string.h>
|
20 |
|
|
#include <linux/errno.h>
|
21 |
|
|
#include <linux/ptrace.h>
|
22 |
|
|
#include <linux/timer.h>
|
23 |
|
|
#include <linux/mm.h>
|
24 |
|
|
#include <linux/smp.h>
|
25 |
|
|
#include <linux/init.h>
|
26 |
|
|
#include <linux/interrupt.h>
|
27 |
|
|
#include <linux/delay.h>
|
28 |
|
|
#include <linux/module.h>
|
29 |
|
|
#include <linux/kdebug.h>
|
30 |
|
|
#include <linux/kallsyms.h>
|
31 |
|
|
#include <linux/reboot.h>
|
32 |
|
|
#include <linux/kprobes.h>
|
33 |
|
|
#include <linux/bug.h>
|
34 |
|
|
#include <asm/system.h>
|
35 |
|
|
#include <asm/uaccess.h>
|
36 |
|
|
#include <asm/io.h>
|
37 |
|
|
#include <asm/atomic.h>
|
38 |
|
|
#include <asm/mathemu.h>
|
39 |
|
|
#include <asm/cpcmd.h>
|
40 |
|
|
#include <asm/s390_ext.h>
|
41 |
|
|
#include <asm/lowcore.h>
|
42 |
|
|
#include <asm/debug.h>
|
43 |
|
|
|
44 |
|
|
/* Called from entry.S only */
|
45 |
|
|
extern void handle_per_exception(struct pt_regs *regs);
|
46 |
|
|
|
47 |
|
|
typedef void pgm_check_handler_t(struct pt_regs *, long);
|
48 |
|
|
pgm_check_handler_t *pgm_check_table[128];
|
49 |
|
|
|
50 |
|
|
#ifdef CONFIG_SYSCTL
|
51 |
|
|
#ifdef CONFIG_PROCESS_DEBUG
|
52 |
|
|
int sysctl_userprocess_debug = 1;
|
53 |
|
|
#else
|
54 |
|
|
int sysctl_userprocess_debug = 0;
|
55 |
|
|
#endif
|
56 |
|
|
#endif
|
57 |
|
|
|
58 |
|
|
extern pgm_check_handler_t do_protection_exception;
|
59 |
|
|
extern pgm_check_handler_t do_dat_exception;
|
60 |
|
|
extern pgm_check_handler_t do_monitor_call;
|
61 |
|
|
|
62 |
|
|
#define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
|
63 |
|
|
|
64 |
|
|
#ifndef CONFIG_64BIT
|
65 |
|
|
#define FOURLONG "%08lx %08lx %08lx %08lx\n"
|
66 |
|
|
static int kstack_depth_to_print = 12;
|
67 |
|
|
#else /* CONFIG_64BIT */
|
68 |
|
|
#define FOURLONG "%016lx %016lx %016lx %016lx\n"
|
69 |
|
|
static int kstack_depth_to_print = 20;
|
70 |
|
|
#endif /* CONFIG_64BIT */
|
71 |
|
|
|
72 |
|
|
/*
|
73 |
|
|
* For show_trace we have tree different stack to consider:
|
74 |
|
|
* - the panic stack which is used if the kernel stack has overflown
|
75 |
|
|
* - the asynchronous interrupt stack (cpu related)
|
76 |
|
|
* - the synchronous kernel stack (process related)
|
77 |
|
|
* The stack trace can start at any of the three stack and can potentially
|
78 |
|
|
* touch all of them. The order is: panic stack, async stack, sync stack.
|
79 |
|
|
*/
|
80 |
|
|
static unsigned long
|
81 |
|
|
__show_trace(unsigned long sp, unsigned long low, unsigned long high)
|
82 |
|
|
{
|
83 |
|
|
struct stack_frame *sf;
|
84 |
|
|
struct pt_regs *regs;
|
85 |
|
|
|
86 |
|
|
while (1) {
|
87 |
|
|
sp = sp & PSW_ADDR_INSN;
|
88 |
|
|
if (sp < low || sp > high - sizeof(*sf))
|
89 |
|
|
return sp;
|
90 |
|
|
sf = (struct stack_frame *) sp;
|
91 |
|
|
printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
|
92 |
|
|
print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN);
|
93 |
|
|
/* Follow the backchain. */
|
94 |
|
|
while (1) {
|
95 |
|
|
low = sp;
|
96 |
|
|
sp = sf->back_chain & PSW_ADDR_INSN;
|
97 |
|
|
if (!sp)
|
98 |
|
|
break;
|
99 |
|
|
if (sp <= low || sp > high - sizeof(*sf))
|
100 |
|
|
return sp;
|
101 |
|
|
sf = (struct stack_frame *) sp;
|
102 |
|
|
printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
|
103 |
|
|
print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN);
|
104 |
|
|
}
|
105 |
|
|
/* Zero backchain detected, check for interrupt frame. */
|
106 |
|
|
sp = (unsigned long) (sf + 1);
|
107 |
|
|
if (sp <= low || sp > high - sizeof(*regs))
|
108 |
|
|
return sp;
|
109 |
|
|
regs = (struct pt_regs *) sp;
|
110 |
|
|
printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN);
|
111 |
|
|
print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN);
|
112 |
|
|
low = sp;
|
113 |
|
|
sp = regs->gprs[15];
|
114 |
|
|
}
|
115 |
|
|
}
|
116 |
|
|
|
117 |
|
|
void show_trace(struct task_struct *task, unsigned long *stack)
|
118 |
|
|
{
|
119 |
|
|
register unsigned long __r15 asm ("15");
|
120 |
|
|
unsigned long sp;
|
121 |
|
|
|
122 |
|
|
sp = (unsigned long) stack;
|
123 |
|
|
if (!sp)
|
124 |
|
|
sp = task ? task->thread.ksp : __r15;
|
125 |
|
|
printk("Call Trace:\n");
|
126 |
|
|
#ifdef CONFIG_CHECK_STACK
|
127 |
|
|
sp = __show_trace(sp, S390_lowcore.panic_stack - 4096,
|
128 |
|
|
S390_lowcore.panic_stack);
|
129 |
|
|
#endif
|
130 |
|
|
sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE,
|
131 |
|
|
S390_lowcore.async_stack);
|
132 |
|
|
if (task)
|
133 |
|
|
__show_trace(sp, (unsigned long) task_stack_page(task),
|
134 |
|
|
(unsigned long) task_stack_page(task) + THREAD_SIZE);
|
135 |
|
|
else
|
136 |
|
|
__show_trace(sp, S390_lowcore.thread_info,
|
137 |
|
|
S390_lowcore.thread_info + THREAD_SIZE);
|
138 |
|
|
printk("\n");
|
139 |
|
|
if (!task)
|
140 |
|
|
task = current;
|
141 |
|
|
debug_show_held_locks(task);
|
142 |
|
|
}
|
143 |
|
|
|
144 |
|
|
void show_stack(struct task_struct *task, unsigned long *sp)
|
145 |
|
|
{
|
146 |
|
|
register unsigned long * __r15 asm ("15");
|
147 |
|
|
unsigned long *stack;
|
148 |
|
|
int i;
|
149 |
|
|
|
150 |
|
|
if (!sp)
|
151 |
|
|
stack = task ? (unsigned long *) task->thread.ksp : __r15;
|
152 |
|
|
else
|
153 |
|
|
stack = sp;
|
154 |
|
|
|
155 |
|
|
for (i = 0; i < kstack_depth_to_print; i++) {
|
156 |
|
|
if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
|
157 |
|
|
break;
|
158 |
|
|
if (i && ((i * sizeof (long) % 32) == 0))
|
159 |
|
|
printk("\n ");
|
160 |
|
|
printk("%p ", (void *)*stack++);
|
161 |
|
|
}
|
162 |
|
|
printk("\n");
|
163 |
|
|
show_trace(task, sp);
|
164 |
|
|
}
|
165 |
|
|
|
166 |
|
|
/*
|
167 |
|
|
* The architecture-independent dump_stack generator
|
168 |
|
|
*/
|
169 |
|
|
void dump_stack(void)
|
170 |
|
|
{
|
171 |
|
|
show_stack(NULL, NULL);
|
172 |
|
|
}
|
173 |
|
|
|
174 |
|
|
EXPORT_SYMBOL(dump_stack);
|
175 |
|
|
|
176 |
|
|
static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
|
177 |
|
|
{
|
178 |
|
|
return (regs->psw.mask & bits) / ((~bits + 1) & bits);
|
179 |
|
|
}
|
180 |
|
|
|
181 |
|
|
void show_registers(struct pt_regs *regs)
|
182 |
|
|
{
|
183 |
|
|
char *mode;
|
184 |
|
|
|
185 |
|
|
mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl";
|
186 |
|
|
printk("%s PSW : %p %p",
|
187 |
|
|
mode, (void *) regs->psw.mask,
|
188 |
|
|
(void *) regs->psw.addr);
|
189 |
|
|
print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN);
|
190 |
|
|
printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
|
191 |
|
|
"P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER),
|
192 |
|
|
mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO),
|
193 |
|
|
mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY),
|
194 |
|
|
mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT),
|
195 |
|
|
mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC),
|
196 |
|
|
mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM));
|
197 |
|
|
#ifdef CONFIG_64BIT
|
198 |
|
|
printk(" EA:%x", mask_bits(regs, PSW_BASE_BITS));
|
199 |
|
|
#endif
|
200 |
|
|
printk("\n%s GPRS: " FOURLONG, mode,
|
201 |
|
|
regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
|
202 |
|
|
printk(" " FOURLONG,
|
203 |
|
|
regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
|
204 |
|
|
printk(" " FOURLONG,
|
205 |
|
|
regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
|
206 |
|
|
printk(" " FOURLONG,
|
207 |
|
|
regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
|
208 |
|
|
|
209 |
|
|
show_code(regs);
|
210 |
|
|
}
|
211 |
|
|
|
212 |
|
|
/* This is called from fs/proc/array.c */
|
213 |
|
|
char *task_show_regs(struct task_struct *task, char *buffer)
|
214 |
|
|
{
|
215 |
|
|
struct pt_regs *regs;
|
216 |
|
|
|
217 |
|
|
regs = task_pt_regs(task);
|
218 |
|
|
buffer += sprintf(buffer, "task: %p, ksp: %p\n",
|
219 |
|
|
task, (void *)task->thread.ksp);
|
220 |
|
|
buffer += sprintf(buffer, "User PSW : %p %p\n",
|
221 |
|
|
(void *) regs->psw.mask, (void *)regs->psw.addr);
|
222 |
|
|
|
223 |
|
|
buffer += sprintf(buffer, "User GPRS: " FOURLONG,
|
224 |
|
|
regs->gprs[0], regs->gprs[1],
|
225 |
|
|
regs->gprs[2], regs->gprs[3]);
|
226 |
|
|
buffer += sprintf(buffer, " " FOURLONG,
|
227 |
|
|
regs->gprs[4], regs->gprs[5],
|
228 |
|
|
regs->gprs[6], regs->gprs[7]);
|
229 |
|
|
buffer += sprintf(buffer, " " FOURLONG,
|
230 |
|
|
regs->gprs[8], regs->gprs[9],
|
231 |
|
|
regs->gprs[10], regs->gprs[11]);
|
232 |
|
|
buffer += sprintf(buffer, " " FOURLONG,
|
233 |
|
|
regs->gprs[12], regs->gprs[13],
|
234 |
|
|
regs->gprs[14], regs->gprs[15]);
|
235 |
|
|
buffer += sprintf(buffer, "User ACRS: %08x %08x %08x %08x\n",
|
236 |
|
|
task->thread.acrs[0], task->thread.acrs[1],
|
237 |
|
|
task->thread.acrs[2], task->thread.acrs[3]);
|
238 |
|
|
buffer += sprintf(buffer, " %08x %08x %08x %08x\n",
|
239 |
|
|
task->thread.acrs[4], task->thread.acrs[5],
|
240 |
|
|
task->thread.acrs[6], task->thread.acrs[7]);
|
241 |
|
|
buffer += sprintf(buffer, " %08x %08x %08x %08x\n",
|
242 |
|
|
task->thread.acrs[8], task->thread.acrs[9],
|
243 |
|
|
task->thread.acrs[10], task->thread.acrs[11]);
|
244 |
|
|
buffer += sprintf(buffer, " %08x %08x %08x %08x\n",
|
245 |
|
|
task->thread.acrs[12], task->thread.acrs[13],
|
246 |
|
|
task->thread.acrs[14], task->thread.acrs[15]);
|
247 |
|
|
return buffer;
|
248 |
|
|
}
|
249 |
|
|
|
250 |
|
|
static DEFINE_SPINLOCK(die_lock);
|
251 |
|
|
|
252 |
|
|
void die(const char * str, struct pt_regs * regs, long err)
|
253 |
|
|
{
|
254 |
|
|
static int die_counter;
|
255 |
|
|
|
256 |
|
|
oops_enter();
|
257 |
|
|
debug_stop_all();
|
258 |
|
|
console_verbose();
|
259 |
|
|
spin_lock_irq(&die_lock);
|
260 |
|
|
bust_spinlocks(1);
|
261 |
|
|
printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
|
262 |
|
|
print_modules();
|
263 |
|
|
notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV);
|
264 |
|
|
show_regs(regs);
|
265 |
|
|
bust_spinlocks(0);
|
266 |
|
|
add_taint(TAINT_DIE);
|
267 |
|
|
spin_unlock_irq(&die_lock);
|
268 |
|
|
if (in_interrupt())
|
269 |
|
|
panic("Fatal exception in interrupt");
|
270 |
|
|
if (panic_on_oops)
|
271 |
|
|
panic("Fatal exception: panic_on_oops");
|
272 |
|
|
oops_exit();
|
273 |
|
|
do_exit(SIGSEGV);
|
274 |
|
|
}
|
275 |
|
|
|
276 |
|
|
static void inline
|
277 |
|
|
report_user_fault(long interruption_code, struct pt_regs *regs)
|
278 |
|
|
{
|
279 |
|
|
#if defined(CONFIG_SYSCTL)
|
280 |
|
|
if (!sysctl_userprocess_debug)
|
281 |
|
|
return;
|
282 |
|
|
#endif
|
283 |
|
|
#if defined(CONFIG_SYSCTL) || defined(CONFIG_PROCESS_DEBUG)
|
284 |
|
|
printk("User process fault: interruption code 0x%lX\n",
|
285 |
|
|
interruption_code);
|
286 |
|
|
show_regs(regs);
|
287 |
|
|
#endif
|
288 |
|
|
}
|
289 |
|
|
|
290 |
|
|
int is_valid_bugaddr(unsigned long addr)
|
291 |
|
|
{
|
292 |
|
|
return 1;
|
293 |
|
|
}
|
294 |
|
|
|
295 |
|
|
static void __kprobes inline do_trap(long interruption_code, int signr,
|
296 |
|
|
char *str, struct pt_regs *regs,
|
297 |
|
|
siginfo_t *info)
|
298 |
|
|
{
|
299 |
|
|
/*
|
300 |
|
|
* We got all needed information from the lowcore and can
|
301 |
|
|
* now safely switch on interrupts.
|
302 |
|
|
*/
|
303 |
|
|
if (regs->psw.mask & PSW_MASK_PSTATE)
|
304 |
|
|
local_irq_enable();
|
305 |
|
|
|
306 |
|
|
if (notify_die(DIE_TRAP, str, regs, interruption_code,
|
307 |
|
|
interruption_code, signr) == NOTIFY_STOP)
|
308 |
|
|
return;
|
309 |
|
|
|
310 |
|
|
if (regs->psw.mask & PSW_MASK_PSTATE) {
|
311 |
|
|
struct task_struct *tsk = current;
|
312 |
|
|
|
313 |
|
|
tsk->thread.trap_no = interruption_code & 0xffff;
|
314 |
|
|
force_sig_info(signr, info, tsk);
|
315 |
|
|
report_user_fault(interruption_code, regs);
|
316 |
|
|
} else {
|
317 |
|
|
const struct exception_table_entry *fixup;
|
318 |
|
|
fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
|
319 |
|
|
if (fixup)
|
320 |
|
|
regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
|
321 |
|
|
else {
|
322 |
|
|
enum bug_trap_type btt;
|
323 |
|
|
|
324 |
|
|
btt = report_bug(regs->psw.addr & PSW_ADDR_INSN, regs);
|
325 |
|
|
if (btt == BUG_TRAP_TYPE_WARN)
|
326 |
|
|
return;
|
327 |
|
|
die(str, regs, interruption_code);
|
328 |
|
|
}
|
329 |
|
|
}
|
330 |
|
|
}
|
331 |
|
|
|
332 |
|
|
static inline void __user *get_check_address(struct pt_regs *regs)
|
333 |
|
|
{
|
334 |
|
|
return (void __user *)((regs->psw.addr-S390_lowcore.pgm_ilc) & PSW_ADDR_INSN);
|
335 |
|
|
}
|
336 |
|
|
|
337 |
|
|
void __kprobes do_single_step(struct pt_regs *regs)
|
338 |
|
|
{
|
339 |
|
|
if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0,
|
340 |
|
|
SIGTRAP) == NOTIFY_STOP){
|
341 |
|
|
return;
|
342 |
|
|
}
|
343 |
|
|
if ((current->ptrace & PT_PTRACED) != 0)
|
344 |
|
|
force_sig(SIGTRAP, current);
|
345 |
|
|
}
|
346 |
|
|
|
347 |
|
|
static void default_trap_handler(struct pt_regs * regs, long interruption_code)
|
348 |
|
|
{
|
349 |
|
|
if (regs->psw.mask & PSW_MASK_PSTATE) {
|
350 |
|
|
local_irq_enable();
|
351 |
|
|
do_exit(SIGSEGV);
|
352 |
|
|
report_user_fault(interruption_code, regs);
|
353 |
|
|
} else
|
354 |
|
|
die("Unknown program exception", regs, interruption_code);
|
355 |
|
|
}
|
356 |
|
|
|
357 |
|
|
#define DO_ERROR_INFO(signr, str, name, sicode, siaddr) \
|
358 |
|
|
static void name(struct pt_regs * regs, long interruption_code) \
|
359 |
|
|
{ \
|
360 |
|
|
siginfo_t info; \
|
361 |
|
|
info.si_signo = signr; \
|
362 |
|
|
info.si_errno = 0; \
|
363 |
|
|
info.si_code = sicode; \
|
364 |
|
|
info.si_addr = siaddr; \
|
365 |
|
|
do_trap(interruption_code, signr, str, regs, &info); \
|
366 |
|
|
}
|
367 |
|
|
|
368 |
|
|
DO_ERROR_INFO(SIGILL, "addressing exception", addressing_exception,
|
369 |
|
|
ILL_ILLADR, get_check_address(regs))
|
370 |
|
|
DO_ERROR_INFO(SIGILL, "execute exception", execute_exception,
|
371 |
|
|
ILL_ILLOPN, get_check_address(regs))
|
372 |
|
|
DO_ERROR_INFO(SIGFPE, "fixpoint divide exception", divide_exception,
|
373 |
|
|
FPE_INTDIV, get_check_address(regs))
|
374 |
|
|
DO_ERROR_INFO(SIGFPE, "fixpoint overflow exception", overflow_exception,
|
375 |
|
|
FPE_INTOVF, get_check_address(regs))
|
376 |
|
|
DO_ERROR_INFO(SIGFPE, "HFP overflow exception", hfp_overflow_exception,
|
377 |
|
|
FPE_FLTOVF, get_check_address(regs))
|
378 |
|
|
DO_ERROR_INFO(SIGFPE, "HFP underflow exception", hfp_underflow_exception,
|
379 |
|
|
FPE_FLTUND, get_check_address(regs))
|
380 |
|
|
DO_ERROR_INFO(SIGFPE, "HFP significance exception", hfp_significance_exception,
|
381 |
|
|
FPE_FLTRES, get_check_address(regs))
|
382 |
|
|
DO_ERROR_INFO(SIGFPE, "HFP divide exception", hfp_divide_exception,
|
383 |
|
|
FPE_FLTDIV, get_check_address(regs))
|
384 |
|
|
DO_ERROR_INFO(SIGFPE, "HFP square root exception", hfp_sqrt_exception,
|
385 |
|
|
FPE_FLTINV, get_check_address(regs))
|
386 |
|
|
DO_ERROR_INFO(SIGILL, "operand exception", operand_exception,
|
387 |
|
|
ILL_ILLOPN, get_check_address(regs))
|
388 |
|
|
DO_ERROR_INFO(SIGILL, "privileged operation", privileged_op,
|
389 |
|
|
ILL_PRVOPC, get_check_address(regs))
|
390 |
|
|
DO_ERROR_INFO(SIGILL, "special operation exception", special_op_exception,
|
391 |
|
|
ILL_ILLOPN, get_check_address(regs))
|
392 |
|
|
DO_ERROR_INFO(SIGILL, "translation exception", translation_exception,
|
393 |
|
|
ILL_ILLOPN, get_check_address(regs))
|
394 |
|
|
|
395 |
|
|
static inline void
|
396 |
|
|
do_fp_trap(struct pt_regs *regs, void __user *location,
|
397 |
|
|
int fpc, long interruption_code)
|
398 |
|
|
{
|
399 |
|
|
siginfo_t si;
|
400 |
|
|
|
401 |
|
|
si.si_signo = SIGFPE;
|
402 |
|
|
si.si_errno = 0;
|
403 |
|
|
si.si_addr = location;
|
404 |
|
|
si.si_code = 0;
|
405 |
|
|
/* FPC[2] is Data Exception Code */
|
406 |
|
|
if ((fpc & 0x00000300) == 0) {
|
407 |
|
|
/* bits 6 and 7 of DXC are 0 iff IEEE exception */
|
408 |
|
|
if (fpc & 0x8000) /* invalid fp operation */
|
409 |
|
|
si.si_code = FPE_FLTINV;
|
410 |
|
|
else if (fpc & 0x4000) /* div by 0 */
|
411 |
|
|
si.si_code = FPE_FLTDIV;
|
412 |
|
|
else if (fpc & 0x2000) /* overflow */
|
413 |
|
|
si.si_code = FPE_FLTOVF;
|
414 |
|
|
else if (fpc & 0x1000) /* underflow */
|
415 |
|
|
si.si_code = FPE_FLTUND;
|
416 |
|
|
else if (fpc & 0x0800) /* inexact */
|
417 |
|
|
si.si_code = FPE_FLTRES;
|
418 |
|
|
}
|
419 |
|
|
current->thread.ieee_instruction_pointer = (addr_t) location;
|
420 |
|
|
do_trap(interruption_code, SIGFPE,
|
421 |
|
|
"floating point exception", regs, &si);
|
422 |
|
|
}
|
423 |
|
|
|
424 |
|
|
static void illegal_op(struct pt_regs * regs, long interruption_code)
|
425 |
|
|
{
|
426 |
|
|
siginfo_t info;
|
427 |
|
|
__u8 opcode[6];
|
428 |
|
|
__u16 __user *location;
|
429 |
|
|
int signal = 0;
|
430 |
|
|
|
431 |
|
|
location = get_check_address(regs);
|
432 |
|
|
|
433 |
|
|
/*
|
434 |
|
|
* We got all needed information from the lowcore and can
|
435 |
|
|
* now safely switch on interrupts.
|
436 |
|
|
*/
|
437 |
|
|
if (regs->psw.mask & PSW_MASK_PSTATE)
|
438 |
|
|
local_irq_enable();
|
439 |
|
|
|
440 |
|
|
if (regs->psw.mask & PSW_MASK_PSTATE) {
|
441 |
|
|
if (get_user(*((__u16 *) opcode), (__u16 __user *) location))
|
442 |
|
|
return;
|
443 |
|
|
if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) {
|
444 |
|
|
if (current->ptrace & PT_PTRACED)
|
445 |
|
|
force_sig(SIGTRAP, current);
|
446 |
|
|
else
|
447 |
|
|
signal = SIGILL;
|
448 |
|
|
#ifdef CONFIG_MATHEMU
|
449 |
|
|
} else if (opcode[0] == 0xb3) {
|
450 |
|
|
if (get_user(*((__u16 *) (opcode+2)), location+1))
|
451 |
|
|
return;
|
452 |
|
|
signal = math_emu_b3(opcode, regs);
|
453 |
|
|
} else if (opcode[0] == 0xed) {
|
454 |
|
|
if (get_user(*((__u32 *) (opcode+2)),
|
455 |
|
|
(__u32 __user *)(location+1)))
|
456 |
|
|
return;
|
457 |
|
|
signal = math_emu_ed(opcode, regs);
|
458 |
|
|
} else if (*((__u16 *) opcode) == 0xb299) {
|
459 |
|
|
if (get_user(*((__u16 *) (opcode+2)), location+1))
|
460 |
|
|
return;
|
461 |
|
|
signal = math_emu_srnm(opcode, regs);
|
462 |
|
|
} else if (*((__u16 *) opcode) == 0xb29c) {
|
463 |
|
|
if (get_user(*((__u16 *) (opcode+2)), location+1))
|
464 |
|
|
return;
|
465 |
|
|
signal = math_emu_stfpc(opcode, regs);
|
466 |
|
|
} else if (*((__u16 *) opcode) == 0xb29d) {
|
467 |
|
|
if (get_user(*((__u16 *) (opcode+2)), location+1))
|
468 |
|
|
return;
|
469 |
|
|
signal = math_emu_lfpc(opcode, regs);
|
470 |
|
|
#endif
|
471 |
|
|
} else
|
472 |
|
|
signal = SIGILL;
|
473 |
|
|
} else {
|
474 |
|
|
/*
|
475 |
|
|
* If we get an illegal op in kernel mode, send it through the
|
476 |
|
|
* kprobes notifier. If kprobes doesn't pick it up, SIGILL
|
477 |
|
|
*/
|
478 |
|
|
if (notify_die(DIE_BPT, "bpt", regs, interruption_code,
|
479 |
|
|
3, SIGTRAP) != NOTIFY_STOP)
|
480 |
|
|
signal = SIGILL;
|
481 |
|
|
}
|
482 |
|
|
|
483 |
|
|
#ifdef CONFIG_MATHEMU
|
484 |
|
|
if (signal == SIGFPE)
|
485 |
|
|
do_fp_trap(regs, location,
|
486 |
|
|
current->thread.fp_regs.fpc, interruption_code);
|
487 |
|
|
else if (signal == SIGSEGV) {
|
488 |
|
|
info.si_signo = signal;
|
489 |
|
|
info.si_errno = 0;
|
490 |
|
|
info.si_code = SEGV_MAPERR;
|
491 |
|
|
info.si_addr = (void __user *) location;
|
492 |
|
|
do_trap(interruption_code, signal,
|
493 |
|
|
"user address fault", regs, &info);
|
494 |
|
|
} else
|
495 |
|
|
#endif
|
496 |
|
|
if (signal) {
|
497 |
|
|
info.si_signo = signal;
|
498 |
|
|
info.si_errno = 0;
|
499 |
|
|
info.si_code = ILL_ILLOPC;
|
500 |
|
|
info.si_addr = (void __user *) location;
|
501 |
|
|
do_trap(interruption_code, signal,
|
502 |
|
|
"illegal operation", regs, &info);
|
503 |
|
|
}
|
504 |
|
|
}
|
505 |
|
|
|
506 |
|
|
|
507 |
|
|
#ifdef CONFIG_MATHEMU
|
508 |
|
|
asmlinkage void
|
509 |
|
|
specification_exception(struct pt_regs * regs, long interruption_code)
|
510 |
|
|
{
|
511 |
|
|
__u8 opcode[6];
|
512 |
|
|
__u16 __user *location = NULL;
|
513 |
|
|
int signal = 0;
|
514 |
|
|
|
515 |
|
|
location = (__u16 __user *) get_check_address(regs);
|
516 |
|
|
|
517 |
|
|
/*
|
518 |
|
|
* We got all needed information from the lowcore and can
|
519 |
|
|
* now safely switch on interrupts.
|
520 |
|
|
*/
|
521 |
|
|
if (regs->psw.mask & PSW_MASK_PSTATE)
|
522 |
|
|
local_irq_enable();
|
523 |
|
|
|
524 |
|
|
if (regs->psw.mask & PSW_MASK_PSTATE) {
|
525 |
|
|
get_user(*((__u16 *) opcode), location);
|
526 |
|
|
switch (opcode[0]) {
|
527 |
|
|
case 0x28: /* LDR Rx,Ry */
|
528 |
|
|
signal = math_emu_ldr(opcode);
|
529 |
|
|
break;
|
530 |
|
|
case 0x38: /* LER Rx,Ry */
|
531 |
|
|
signal = math_emu_ler(opcode);
|
532 |
|
|
break;
|
533 |
|
|
case 0x60: /* STD R,D(X,B) */
|
534 |
|
|
get_user(*((__u16 *) (opcode+2)), location+1);
|
535 |
|
|
signal = math_emu_std(opcode, regs);
|
536 |
|
|
break;
|
537 |
|
|
case 0x68: /* LD R,D(X,B) */
|
538 |
|
|
get_user(*((__u16 *) (opcode+2)), location+1);
|
539 |
|
|
signal = math_emu_ld(opcode, regs);
|
540 |
|
|
break;
|
541 |
|
|
case 0x70: /* STE R,D(X,B) */
|
542 |
|
|
get_user(*((__u16 *) (opcode+2)), location+1);
|
543 |
|
|
signal = math_emu_ste(opcode, regs);
|
544 |
|
|
break;
|
545 |
|
|
case 0x78: /* LE R,D(X,B) */
|
546 |
|
|
get_user(*((__u16 *) (opcode+2)), location+1);
|
547 |
|
|
signal = math_emu_le(opcode, regs);
|
548 |
|
|
break;
|
549 |
|
|
default:
|
550 |
|
|
signal = SIGILL;
|
551 |
|
|
break;
|
552 |
|
|
}
|
553 |
|
|
} else
|
554 |
|
|
signal = SIGILL;
|
555 |
|
|
|
556 |
|
|
if (signal == SIGFPE)
|
557 |
|
|
do_fp_trap(regs, location,
|
558 |
|
|
current->thread.fp_regs.fpc, interruption_code);
|
559 |
|
|
else if (signal) {
|
560 |
|
|
siginfo_t info;
|
561 |
|
|
info.si_signo = signal;
|
562 |
|
|
info.si_errno = 0;
|
563 |
|
|
info.si_code = ILL_ILLOPN;
|
564 |
|
|
info.si_addr = location;
|
565 |
|
|
do_trap(interruption_code, signal,
|
566 |
|
|
"specification exception", regs, &info);
|
567 |
|
|
}
|
568 |
|
|
}
|
569 |
|
|
#else
|
570 |
|
|
DO_ERROR_INFO(SIGILL, "specification exception", specification_exception,
|
571 |
|
|
ILL_ILLOPN, get_check_address(regs));
|
572 |
|
|
#endif
|
573 |
|
|
|
574 |
|
|
static void data_exception(struct pt_regs * regs, long interruption_code)
|
575 |
|
|
{
|
576 |
|
|
__u16 __user *location;
|
577 |
|
|
int signal = 0;
|
578 |
|
|
|
579 |
|
|
location = get_check_address(regs);
|
580 |
|
|
|
581 |
|
|
/*
|
582 |
|
|
* We got all needed information from the lowcore and can
|
583 |
|
|
* now safely switch on interrupts.
|
584 |
|
|
*/
|
585 |
|
|
if (regs->psw.mask & PSW_MASK_PSTATE)
|
586 |
|
|
local_irq_enable();
|
587 |
|
|
|
588 |
|
|
if (MACHINE_HAS_IEEE)
|
589 |
|
|
asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
|
590 |
|
|
|
591 |
|
|
#ifdef CONFIG_MATHEMU
|
592 |
|
|
else if (regs->psw.mask & PSW_MASK_PSTATE) {
|
593 |
|
|
__u8 opcode[6];
|
594 |
|
|
get_user(*((__u16 *) opcode), location);
|
595 |
|
|
switch (opcode[0]) {
|
596 |
|
|
case 0x28: /* LDR Rx,Ry */
|
597 |
|
|
signal = math_emu_ldr(opcode);
|
598 |
|
|
break;
|
599 |
|
|
case 0x38: /* LER Rx,Ry */
|
600 |
|
|
signal = math_emu_ler(opcode);
|
601 |
|
|
break;
|
602 |
|
|
case 0x60: /* STD R,D(X,B) */
|
603 |
|
|
get_user(*((__u16 *) (opcode+2)), location+1);
|
604 |
|
|
signal = math_emu_std(opcode, regs);
|
605 |
|
|
break;
|
606 |
|
|
case 0x68: /* LD R,D(X,B) */
|
607 |
|
|
get_user(*((__u16 *) (opcode+2)), location+1);
|
608 |
|
|
signal = math_emu_ld(opcode, regs);
|
609 |
|
|
break;
|
610 |
|
|
case 0x70: /* STE R,D(X,B) */
|
611 |
|
|
get_user(*((__u16 *) (opcode+2)), location+1);
|
612 |
|
|
signal = math_emu_ste(opcode, regs);
|
613 |
|
|
break;
|
614 |
|
|
case 0x78: /* LE R,D(X,B) */
|
615 |
|
|
get_user(*((__u16 *) (opcode+2)), location+1);
|
616 |
|
|
signal = math_emu_le(opcode, regs);
|
617 |
|
|
break;
|
618 |
|
|
case 0xb3:
|
619 |
|
|
get_user(*((__u16 *) (opcode+2)), location+1);
|
620 |
|
|
signal = math_emu_b3(opcode, regs);
|
621 |
|
|
break;
|
622 |
|
|
case 0xed:
|
623 |
|
|
get_user(*((__u32 *) (opcode+2)),
|
624 |
|
|
(__u32 __user *)(location+1));
|
625 |
|
|
signal = math_emu_ed(opcode, regs);
|
626 |
|
|
break;
|
627 |
|
|
case 0xb2:
|
628 |
|
|
if (opcode[1] == 0x99) {
|
629 |
|
|
get_user(*((__u16 *) (opcode+2)), location+1);
|
630 |
|
|
signal = math_emu_srnm(opcode, regs);
|
631 |
|
|
} else if (opcode[1] == 0x9c) {
|
632 |
|
|
get_user(*((__u16 *) (opcode+2)), location+1);
|
633 |
|
|
signal = math_emu_stfpc(opcode, regs);
|
634 |
|
|
} else if (opcode[1] == 0x9d) {
|
635 |
|
|
get_user(*((__u16 *) (opcode+2)), location+1);
|
636 |
|
|
signal = math_emu_lfpc(opcode, regs);
|
637 |
|
|
} else
|
638 |
|
|
signal = SIGILL;
|
639 |
|
|
break;
|
640 |
|
|
default:
|
641 |
|
|
signal = SIGILL;
|
642 |
|
|
break;
|
643 |
|
|
}
|
644 |
|
|
}
|
645 |
|
|
#endif
|
646 |
|
|
if (current->thread.fp_regs.fpc & FPC_DXC_MASK)
|
647 |
|
|
signal = SIGFPE;
|
648 |
|
|
else
|
649 |
|
|
signal = SIGILL;
|
650 |
|
|
if (signal == SIGFPE)
|
651 |
|
|
do_fp_trap(regs, location,
|
652 |
|
|
current->thread.fp_regs.fpc, interruption_code);
|
653 |
|
|
else if (signal) {
|
654 |
|
|
siginfo_t info;
|
655 |
|
|
info.si_signo = signal;
|
656 |
|
|
info.si_errno = 0;
|
657 |
|
|
info.si_code = ILL_ILLOPN;
|
658 |
|
|
info.si_addr = location;
|
659 |
|
|
do_trap(interruption_code, signal,
|
660 |
|
|
"data exception", regs, &info);
|
661 |
|
|
}
|
662 |
|
|
}
|
663 |
|
|
|
664 |
|
|
static void space_switch_exception(struct pt_regs * regs, long int_code)
|
665 |
|
|
{
|
666 |
|
|
siginfo_t info;
|
667 |
|
|
|
668 |
|
|
/* Set user psw back to home space mode. */
|
669 |
|
|
if (regs->psw.mask & PSW_MASK_PSTATE)
|
670 |
|
|
regs->psw.mask |= PSW_ASC_HOME;
|
671 |
|
|
/* Send SIGILL. */
|
672 |
|
|
info.si_signo = SIGILL;
|
673 |
|
|
info.si_errno = 0;
|
674 |
|
|
info.si_code = ILL_PRVOPC;
|
675 |
|
|
info.si_addr = get_check_address(regs);
|
676 |
|
|
do_trap(int_code, SIGILL, "space switch event", regs, &info);
|
677 |
|
|
}
|
678 |
|
|
|
679 |
|
|
asmlinkage void kernel_stack_overflow(struct pt_regs * regs)
|
680 |
|
|
{
|
681 |
|
|
bust_spinlocks(1);
|
682 |
|
|
printk("Kernel stack overflow.\n");
|
683 |
|
|
show_regs(regs);
|
684 |
|
|
bust_spinlocks(0);
|
685 |
|
|
panic("Corrupt kernel stack, can't continue.");
|
686 |
|
|
}
|
687 |
|
|
|
688 |
|
|
/* init is done in lowcore.S and head.S */
|
689 |
|
|
|
690 |
|
|
void __init trap_init(void)
|
691 |
|
|
{
|
692 |
|
|
int i;
|
693 |
|
|
|
694 |
|
|
for (i = 0; i < 128; i++)
|
695 |
|
|
pgm_check_table[i] = &default_trap_handler;
|
696 |
|
|
pgm_check_table[1] = &illegal_op;
|
697 |
|
|
pgm_check_table[2] = &privileged_op;
|
698 |
|
|
pgm_check_table[3] = &execute_exception;
|
699 |
|
|
pgm_check_table[4] = &do_protection_exception;
|
700 |
|
|
pgm_check_table[5] = &addressing_exception;
|
701 |
|
|
pgm_check_table[6] = &specification_exception;
|
702 |
|
|
pgm_check_table[7] = &data_exception;
|
703 |
|
|
pgm_check_table[8] = &overflow_exception;
|
704 |
|
|
pgm_check_table[9] = ÷_exception;
|
705 |
|
|
pgm_check_table[0x0A] = &overflow_exception;
|
706 |
|
|
pgm_check_table[0x0B] = ÷_exception;
|
707 |
|
|
pgm_check_table[0x0C] = &hfp_overflow_exception;
|
708 |
|
|
pgm_check_table[0x0D] = &hfp_underflow_exception;
|
709 |
|
|
pgm_check_table[0x0E] = &hfp_significance_exception;
|
710 |
|
|
pgm_check_table[0x0F] = &hfp_divide_exception;
|
711 |
|
|
pgm_check_table[0x10] = &do_dat_exception;
|
712 |
|
|
pgm_check_table[0x11] = &do_dat_exception;
|
713 |
|
|
pgm_check_table[0x12] = &translation_exception;
|
714 |
|
|
pgm_check_table[0x13] = &special_op_exception;
|
715 |
|
|
#ifdef CONFIG_64BIT
|
716 |
|
|
pgm_check_table[0x38] = &do_dat_exception;
|
717 |
|
|
pgm_check_table[0x39] = &do_dat_exception;
|
718 |
|
|
pgm_check_table[0x3A] = &do_dat_exception;
|
719 |
|
|
pgm_check_table[0x3B] = &do_dat_exception;
|
720 |
|
|
#endif /* CONFIG_64BIT */
|
721 |
|
|
pgm_check_table[0x15] = &operand_exception;
|
722 |
|
|
pgm_check_table[0x1C] = &space_switch_exception;
|
723 |
|
|
pgm_check_table[0x1D] = &hfp_sqrt_exception;
|
724 |
|
|
pgm_check_table[0x40] = &do_monitor_call;
|
725 |
|
|
pfault_irq_init();
|
726 |
|
|
}
|