aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kernel/traps.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/s390/kernel/traps.c
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'arch/s390/kernel/traps.c')
-rw-r--r--arch/s390/kernel/traps.c738
1 files changed, 738 insertions, 0 deletions
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
new file mode 100644
index 000000000000..8b90e9528b91
--- /dev/null
+++ b/arch/s390/kernel/traps.c
@@ -0,0 +1,738 @@
1/*
2 * arch/s390/kernel/traps.c
3 *
4 * S390 version
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
8 *
9 * Derived from "arch/i386/kernel/traps.c"
10 * Copyright (C) 1991, 1992 Linus Torvalds
11 */
12
13/*
14 * 'Traps.c' handles hardware traps and faults after we have saved some
15 * state in 'asm.s'.
16 */
17#include <linux/config.h>
18#include <linux/sched.h>
19#include <linux/kernel.h>
20#include <linux/string.h>
21#include <linux/errno.h>
22#include <linux/ptrace.h>
23#include <linux/timer.h>
24#include <linux/mm.h>
25#include <linux/smp.h>
26#include <linux/smp_lock.h>
27#include <linux/init.h>
28#include <linux/interrupt.h>
29#include <linux/delay.h>
30#include <linux/module.h>
31#include <linux/kallsyms.h>
32
33#include <asm/system.h>
34#include <asm/uaccess.h>
35#include <asm/io.h>
36#include <asm/atomic.h>
37#include <asm/mathemu.h>
38#include <asm/cpcmd.h>
39#include <asm/s390_ext.h>
40#include <asm/lowcore.h>
41#include <asm/debug.h>
42
43/* Called from entry.S only */
44extern void handle_per_exception(struct pt_regs *regs);
45
46typedef void pgm_check_handler_t(struct pt_regs *, long);
47pgm_check_handler_t *pgm_check_table[128];
48
49#ifdef CONFIG_SYSCTL
50#ifdef CONFIG_PROCESS_DEBUG
51int sysctl_userprocess_debug = 1;
52#else
53int sysctl_userprocess_debug = 0;
54#endif
55#endif
56
57extern pgm_check_handler_t do_protection_exception;
58extern pgm_check_handler_t do_dat_exception;
59extern pgm_check_handler_t do_pseudo_page_fault;
60#ifdef CONFIG_PFAULT
61extern int pfault_init(void);
62extern void pfault_fini(void);
63extern void pfault_interrupt(struct pt_regs *regs, __u16 error_code);
64static ext_int_info_t ext_int_pfault;
65#endif
66extern pgm_check_handler_t do_monitor_call;
67
68#define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
69
70#ifndef CONFIG_ARCH_S390X
71#define FOURLONG "%08lx %08lx %08lx %08lx\n"
72static int kstack_depth_to_print = 12;
73#else /* CONFIG_ARCH_S390X */
74#define FOURLONG "%016lx %016lx %016lx %016lx\n"
75static int kstack_depth_to_print = 20;
76#endif /* CONFIG_ARCH_S390X */
77
78/*
79 * For show_trace we have tree different stack to consider:
80 * - the panic stack which is used if the kernel stack has overflown
81 * - the asynchronous interrupt stack (cpu related)
82 * - the synchronous kernel stack (process related)
83 * The stack trace can start at any of the three stack and can potentially
84 * touch all of them. The order is: panic stack, async stack, sync stack.
85 */
86static unsigned long
87__show_trace(unsigned long sp, unsigned long low, unsigned long high)
88{
89 struct stack_frame *sf;
90 struct pt_regs *regs;
91
92 while (1) {
93 sp = sp & PSW_ADDR_INSN;
94 if (sp < low || sp > high - sizeof(*sf))
95 return sp;
96 sf = (struct stack_frame *) sp;
97 printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
98 print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN);
99 /* Follow the backchain. */
100 while (1) {
101 low = sp;
102 sp = sf->back_chain & PSW_ADDR_INSN;
103 if (!sp)
104 break;
105 if (sp <= low || sp > high - sizeof(*sf))
106 return sp;
107 sf = (struct stack_frame *) sp;
108 printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
109 print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN);
110 }
111 /* Zero backchain detected, check for interrupt frame. */
112 sp = (unsigned long) (sf + 1);
113 if (sp <= low || sp > high - sizeof(*regs))
114 return sp;
115 regs = (struct pt_regs *) sp;
116 printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN);
117 print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN);
118 low = sp;
119 sp = regs->gprs[15];
120 }
121}
122
123void show_trace(struct task_struct *task, unsigned long * stack)
124{
125 register unsigned long __r15 asm ("15");
126 unsigned long sp;
127
128 sp = (unsigned long) stack;
129 if (!sp)
130 sp = task ? task->thread.ksp : __r15;
131 printk("Call Trace:\n");
132#ifdef CONFIG_CHECK_STACK
133 sp = __show_trace(sp, S390_lowcore.panic_stack - 4096,
134 S390_lowcore.panic_stack);
135#endif
136 sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE,
137 S390_lowcore.async_stack);
138 if (task)
139 __show_trace(sp, (unsigned long) task->thread_info,
140 (unsigned long) task->thread_info + THREAD_SIZE);
141 else
142 __show_trace(sp, S390_lowcore.thread_info,
143 S390_lowcore.thread_info + THREAD_SIZE);
144 printk("\n");
145}
146
147void show_stack(struct task_struct *task, unsigned long *sp)
148{
149 register unsigned long * __r15 asm ("15");
150 unsigned long *stack;
151 int i;
152
153 // debugging aid: "show_stack(NULL);" prints the
154 // back trace for this cpu.
155
156 if (!sp)
157 sp = task ? (unsigned long *) task->thread.ksp : __r15;
158
159 stack = sp;
160 for (i = 0; i < kstack_depth_to_print; i++) {
161 if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
162 break;
163 if (i && ((i * sizeof (long) % 32) == 0))
164 printk("\n ");
165 printk("%p ", (void *)*stack++);
166 }
167 printk("\n");
168 show_trace(task, sp);
169}
170
171/*
172 * The architecture-independent dump_stack generator
173 */
174void dump_stack(void)
175{
176 show_stack(0, 0);
177}
178
179EXPORT_SYMBOL(dump_stack);
180
181void show_registers(struct pt_regs *regs)
182{
183 mm_segment_t old_fs;
184 char *mode;
185 int i;
186
187 mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl";
188 printk("%s PSW : %p %p",
189 mode, (void *) regs->psw.mask,
190 (void *) regs->psw.addr);
191 print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN);
192 printk("%s GPRS: " FOURLONG, mode,
193 regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
194 printk(" " FOURLONG,
195 regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
196 printk(" " FOURLONG,
197 regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
198 printk(" " FOURLONG,
199 regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
200
201#if 0
202 /* FIXME: this isn't needed any more but it changes the ksymoops
203 * input. To remove or not to remove ... */
204 save_access_regs(regs->acrs);
205 printk("%s ACRS: %08x %08x %08x %08x\n", mode,
206 regs->acrs[0], regs->acrs[1], regs->acrs[2], regs->acrs[3]);
207 printk(" %08x %08x %08x %08x\n",
208 regs->acrs[4], regs->acrs[5], regs->acrs[6], regs->acrs[7]);
209 printk(" %08x %08x %08x %08x\n",
210 regs->acrs[8], regs->acrs[9], regs->acrs[10], regs->acrs[11]);
211 printk(" %08x %08x %08x %08x\n",
212 regs->acrs[12], regs->acrs[13], regs->acrs[14], regs->acrs[15]);
213#endif
214
215 /*
216 * Print the first 20 byte of the instruction stream at the
217 * time of the fault.
218 */
219 old_fs = get_fs();
220 if (regs->psw.mask & PSW_MASK_PSTATE)
221 set_fs(USER_DS);
222 else
223 set_fs(KERNEL_DS);
224 printk("%s Code: ", mode);
225 for (i = 0; i < 20; i++) {
226 unsigned char c;
227 if (__get_user(c, (char __user *)(regs->psw.addr + i))) {
228 printk(" Bad PSW.");
229 break;
230 }
231 printk("%02x ", c);
232 }
233 set_fs(old_fs);
234
235 printk("\n");
236}
237
238/* This is called from fs/proc/array.c */
239char *task_show_regs(struct task_struct *task, char *buffer)
240{
241 struct pt_regs *regs;
242
243 regs = __KSTK_PTREGS(task);
244 buffer += sprintf(buffer, "task: %p, ksp: %p\n",
245 task, (void *)task->thread.ksp);
246 buffer += sprintf(buffer, "User PSW : %p %p\n",
247 (void *) regs->psw.mask, (void *)regs->psw.addr);
248
249 buffer += sprintf(buffer, "User GPRS: " FOURLONG,
250 regs->gprs[0], regs->gprs[1],
251 regs->gprs[2], regs->gprs[3]);
252 buffer += sprintf(buffer, " " FOURLONG,
253 regs->gprs[4], regs->gprs[5],
254 regs->gprs[6], regs->gprs[7]);
255 buffer += sprintf(buffer, " " FOURLONG,
256 regs->gprs[8], regs->gprs[9],
257 regs->gprs[10], regs->gprs[11]);
258 buffer += sprintf(buffer, " " FOURLONG,
259 regs->gprs[12], regs->gprs[13],
260 regs->gprs[14], regs->gprs[15]);
261 buffer += sprintf(buffer, "User ACRS: %08x %08x %08x %08x\n",
262 task->thread.acrs[0], task->thread.acrs[1],
263 task->thread.acrs[2], task->thread.acrs[3]);
264 buffer += sprintf(buffer, " %08x %08x %08x %08x\n",
265 task->thread.acrs[4], task->thread.acrs[5],
266 task->thread.acrs[6], task->thread.acrs[7]);
267 buffer += sprintf(buffer, " %08x %08x %08x %08x\n",
268 task->thread.acrs[8], task->thread.acrs[9],
269 task->thread.acrs[10], task->thread.acrs[11]);
270 buffer += sprintf(buffer, " %08x %08x %08x %08x\n",
271 task->thread.acrs[12], task->thread.acrs[13],
272 task->thread.acrs[14], task->thread.acrs[15]);
273 return buffer;
274}
275
276DEFINE_SPINLOCK(die_lock);
277
278void die(const char * str, struct pt_regs * regs, long err)
279{
280 static int die_counter;
281
282 debug_stop_all();
283 console_verbose();
284 spin_lock_irq(&die_lock);
285 bust_spinlocks(1);
286 printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
287 show_regs(regs);
288 bust_spinlocks(0);
289 spin_unlock_irq(&die_lock);
290 if (in_interrupt())
291 panic("Fatal exception in interrupt");
292 if (panic_on_oops)
293 panic("Fatal exception: panic_on_oops");
294 do_exit(SIGSEGV);
295}
296
297static void inline
298report_user_fault(long interruption_code, struct pt_regs *regs)
299{
300#if defined(CONFIG_SYSCTL)
301 if (!sysctl_userprocess_debug)
302 return;
303#endif
304#if defined(CONFIG_SYSCTL) || defined(CONFIG_PROCESS_DEBUG)
305 printk("User process fault: interruption code 0x%lX\n",
306 interruption_code);
307 show_regs(regs);
308#endif
309}
310
311static void inline do_trap(long interruption_code, int signr, char *str,
312 struct pt_regs *regs, siginfo_t *info)
313{
314 /*
315 * We got all needed information from the lowcore and can
316 * now safely switch on interrupts.
317 */
318 if (regs->psw.mask & PSW_MASK_PSTATE)
319 local_irq_enable();
320
321 if (regs->psw.mask & PSW_MASK_PSTATE) {
322 struct task_struct *tsk = current;
323
324 tsk->thread.trap_no = interruption_code & 0xffff;
325 force_sig_info(signr, info, tsk);
326 report_user_fault(interruption_code, regs);
327 } else {
328 const struct exception_table_entry *fixup;
329 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
330 if (fixup)
331 regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
332 else
333 die(str, regs, interruption_code);
334 }
335}
336
337static inline void *get_check_address(struct pt_regs *regs)
338{
339 return (void *)((regs->psw.addr-S390_lowcore.pgm_ilc) & PSW_ADDR_INSN);
340}
341
342void do_single_step(struct pt_regs *regs)
343{
344 if ((current->ptrace & PT_PTRACED) != 0)
345 force_sig(SIGTRAP, current);
346}
347
348asmlinkage void
349default_trap_handler(struct pt_regs * regs, long interruption_code)
350{
351 if (regs->psw.mask & PSW_MASK_PSTATE) {
352 local_irq_enable();
353 do_exit(SIGSEGV);
354 report_user_fault(interruption_code, regs);
355 } else
356 die("Unknown program exception", regs, interruption_code);
357}
358
359#define DO_ERROR_INFO(signr, str, name, sicode, siaddr) \
360asmlinkage void name(struct pt_regs * regs, long interruption_code) \
361{ \
362 siginfo_t info; \
363 info.si_signo = signr; \
364 info.si_errno = 0; \
365 info.si_code = sicode; \
366 info.si_addr = (void *)siaddr; \
367 do_trap(interruption_code, signr, str, regs, &info); \
368}
369
370DO_ERROR_INFO(SIGILL, "addressing exception", addressing_exception,
371 ILL_ILLADR, get_check_address(regs))
372DO_ERROR_INFO(SIGILL, "execute exception", execute_exception,
373 ILL_ILLOPN, get_check_address(regs))
374DO_ERROR_INFO(SIGFPE, "fixpoint divide exception", divide_exception,
375 FPE_INTDIV, get_check_address(regs))
376DO_ERROR_INFO(SIGFPE, "fixpoint overflow exception", overflow_exception,
377 FPE_INTOVF, get_check_address(regs))
378DO_ERROR_INFO(SIGFPE, "HFP overflow exception", hfp_overflow_exception,
379 FPE_FLTOVF, get_check_address(regs))
380DO_ERROR_INFO(SIGFPE, "HFP underflow exception", hfp_underflow_exception,
381 FPE_FLTUND, get_check_address(regs))
382DO_ERROR_INFO(SIGFPE, "HFP significance exception", hfp_significance_exception,
383 FPE_FLTRES, get_check_address(regs))
384DO_ERROR_INFO(SIGFPE, "HFP divide exception", hfp_divide_exception,
385 FPE_FLTDIV, get_check_address(regs))
386DO_ERROR_INFO(SIGFPE, "HFP square root exception", hfp_sqrt_exception,
387 FPE_FLTINV, get_check_address(regs))
388DO_ERROR_INFO(SIGILL, "operand exception", operand_exception,
389 ILL_ILLOPN, get_check_address(regs))
390DO_ERROR_INFO(SIGILL, "privileged operation", privileged_op,
391 ILL_PRVOPC, get_check_address(regs))
392DO_ERROR_INFO(SIGILL, "special operation exception", special_op_exception,
393 ILL_ILLOPN, get_check_address(regs))
394DO_ERROR_INFO(SIGILL, "translation exception", translation_exception,
395 ILL_ILLOPN, get_check_address(regs))
396
397static inline void
398do_fp_trap(struct pt_regs *regs, void *location,
399 int fpc, long interruption_code)
400{
401 siginfo_t si;
402
403 si.si_signo = SIGFPE;
404 si.si_errno = 0;
405 si.si_addr = location;
406 si.si_code = 0;
407 /* FPC[2] is Data Exception Code */
408 if ((fpc & 0x00000300) == 0) {
409 /* bits 6 and 7 of DXC are 0 iff IEEE exception */
410 if (fpc & 0x8000) /* invalid fp operation */
411 si.si_code = FPE_FLTINV;
412 else if (fpc & 0x4000) /* div by 0 */
413 si.si_code = FPE_FLTDIV;
414 else if (fpc & 0x2000) /* overflow */
415 si.si_code = FPE_FLTOVF;
416 else if (fpc & 0x1000) /* underflow */
417 si.si_code = FPE_FLTUND;
418 else if (fpc & 0x0800) /* inexact */
419 si.si_code = FPE_FLTRES;
420 }
421 current->thread.ieee_instruction_pointer = (addr_t) location;
422 do_trap(interruption_code, SIGFPE,
423 "floating point exception", regs, &si);
424}
425
426asmlinkage void illegal_op(struct pt_regs * regs, long interruption_code)
427{
428 siginfo_t info;
429 __u8 opcode[6];
430 __u16 *location;
431 int signal = 0;
432
433 location = (__u16 *) get_check_address(regs);
434
435 /*
436 * We got all needed information from the lowcore and can
437 * now safely switch on interrupts.
438 */
439 if (regs->psw.mask & PSW_MASK_PSTATE)
440 local_irq_enable();
441
442 if (regs->psw.mask & PSW_MASK_PSTATE) {
443 get_user(*((__u16 *) opcode), (__u16 __user *) location);
444 if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) {
445 if (current->ptrace & PT_PTRACED)
446 force_sig(SIGTRAP, current);
447 else
448 signal = SIGILL;
449#ifdef CONFIG_MATHEMU
450 } else if (opcode[0] == 0xb3) {
451 get_user(*((__u16 *) (opcode+2)), location+1);
452 signal = math_emu_b3(opcode, regs);
453 } else if (opcode[0] == 0xed) {
454 get_user(*((__u32 *) (opcode+2)),
455 (__u32 *)(location+1));
456 signal = math_emu_ed(opcode, regs);
457 } else if (*((__u16 *) opcode) == 0xb299) {
458 get_user(*((__u16 *) (opcode+2)), location+1);
459 signal = math_emu_srnm(opcode, regs);
460 } else if (*((__u16 *) opcode) == 0xb29c) {
461 get_user(*((__u16 *) (opcode+2)), location+1);
462 signal = math_emu_stfpc(opcode, regs);
463 } else if (*((__u16 *) opcode) == 0xb29d) {
464 get_user(*((__u16 *) (opcode+2)), location+1);
465 signal = math_emu_lfpc(opcode, regs);
466#endif
467 } else
468 signal = SIGILL;
469 } else
470 signal = SIGILL;
471
472#ifdef CONFIG_MATHEMU
473 if (signal == SIGFPE)
474 do_fp_trap(regs, location,
475 current->thread.fp_regs.fpc, interruption_code);
476 else if (signal == SIGSEGV) {
477 info.si_signo = signal;
478 info.si_errno = 0;
479 info.si_code = SEGV_MAPERR;
480 info.si_addr = (void *) location;
481 do_trap(interruption_code, signal,
482 "user address fault", regs, &info);
483 } else
484#endif
485 if (signal) {
486 info.si_signo = signal;
487 info.si_errno = 0;
488 info.si_code = ILL_ILLOPC;
489 info.si_addr = (void *) location;
490 do_trap(interruption_code, signal,
491 "illegal operation", regs, &info);
492 }
493}
494
495
496#ifdef CONFIG_MATHEMU
497asmlinkage void
498specification_exception(struct pt_regs * regs, long interruption_code)
499{
500 __u8 opcode[6];
501 __u16 *location = NULL;
502 int signal = 0;
503
504 location = (__u16 *) get_check_address(regs);
505
506 /*
507 * We got all needed information from the lowcore and can
508 * now safely switch on interrupts.
509 */
510 if (regs->psw.mask & PSW_MASK_PSTATE)
511 local_irq_enable();
512
513 if (regs->psw.mask & PSW_MASK_PSTATE) {
514 get_user(*((__u16 *) opcode), location);
515 switch (opcode[0]) {
516 case 0x28: /* LDR Rx,Ry */
517 signal = math_emu_ldr(opcode);
518 break;
519 case 0x38: /* LER Rx,Ry */
520 signal = math_emu_ler(opcode);
521 break;
522 case 0x60: /* STD R,D(X,B) */
523 get_user(*((__u16 *) (opcode+2)), location+1);
524 signal = math_emu_std(opcode, regs);
525 break;
526 case 0x68: /* LD R,D(X,B) */
527 get_user(*((__u16 *) (opcode+2)), location+1);
528 signal = math_emu_ld(opcode, regs);
529 break;
530 case 0x70: /* STE R,D(X,B) */
531 get_user(*((__u16 *) (opcode+2)), location+1);
532 signal = math_emu_ste(opcode, regs);
533 break;
534 case 0x78: /* LE R,D(X,B) */
535 get_user(*((__u16 *) (opcode+2)), location+1);
536 signal = math_emu_le(opcode, regs);
537 break;
538 default:
539 signal = SIGILL;
540 break;
541 }
542 } else
543 signal = SIGILL;
544
545 if (signal == SIGFPE)
546 do_fp_trap(regs, location,
547 current->thread.fp_regs.fpc, interruption_code);
548 else if (signal) {
549 siginfo_t info;
550 info.si_signo = signal;
551 info.si_errno = 0;
552 info.si_code = ILL_ILLOPN;
553 info.si_addr = location;
554 do_trap(interruption_code, signal,
555 "specification exception", regs, &info);
556 }
557}
558#else
559DO_ERROR_INFO(SIGILL, "specification exception", specification_exception,
560 ILL_ILLOPN, get_check_address(regs));
561#endif
562
563asmlinkage void data_exception(struct pt_regs * regs, long interruption_code)
564{
565 __u16 *location;
566 int signal = 0;
567
568 location = (__u16 *) get_check_address(regs);
569
570 /*
571 * We got all needed information from the lowcore and can
572 * now safely switch on interrupts.
573 */
574 if (regs->psw.mask & PSW_MASK_PSTATE)
575 local_irq_enable();
576
577 if (MACHINE_HAS_IEEE)
578 __asm__ volatile ("stfpc %0\n\t"
579 : "=m" (current->thread.fp_regs.fpc));
580
581#ifdef CONFIG_MATHEMU
582 else if (regs->psw.mask & PSW_MASK_PSTATE) {
583 __u8 opcode[6];
584 get_user(*((__u16 *) opcode), location);
585 switch (opcode[0]) {
586 case 0x28: /* LDR Rx,Ry */
587 signal = math_emu_ldr(opcode);
588 break;
589 case 0x38: /* LER Rx,Ry */
590 signal = math_emu_ler(opcode);
591 break;
592 case 0x60: /* STD R,D(X,B) */
593 get_user(*((__u16 *) (opcode+2)), location+1);
594 signal = math_emu_std(opcode, regs);
595 break;
596 case 0x68: /* LD R,D(X,B) */
597 get_user(*((__u16 *) (opcode+2)), location+1);
598 signal = math_emu_ld(opcode, regs);
599 break;
600 case 0x70: /* STE R,D(X,B) */
601 get_user(*((__u16 *) (opcode+2)), location+1);
602 signal = math_emu_ste(opcode, regs);
603 break;
604 case 0x78: /* LE R,D(X,B) */
605 get_user(*((__u16 *) (opcode+2)), location+1);
606 signal = math_emu_le(opcode, regs);
607 break;
608 case 0xb3:
609 get_user(*((__u16 *) (opcode+2)), location+1);
610 signal = math_emu_b3(opcode, regs);
611 break;
612 case 0xed:
613 get_user(*((__u32 *) (opcode+2)),
614 (__u32 *)(location+1));
615 signal = math_emu_ed(opcode, regs);
616 break;
617 case 0xb2:
618 if (opcode[1] == 0x99) {
619 get_user(*((__u16 *) (opcode+2)), location+1);
620 signal = math_emu_srnm(opcode, regs);
621 } else if (opcode[1] == 0x9c) {
622 get_user(*((__u16 *) (opcode+2)), location+1);
623 signal = math_emu_stfpc(opcode, regs);
624 } else if (opcode[1] == 0x9d) {
625 get_user(*((__u16 *) (opcode+2)), location+1);
626 signal = math_emu_lfpc(opcode, regs);
627 } else
628 signal = SIGILL;
629 break;
630 default:
631 signal = SIGILL;
632 break;
633 }
634 }
635#endif
636 if (current->thread.fp_regs.fpc & FPC_DXC_MASK)
637 signal = SIGFPE;
638 else
639 signal = SIGILL;
640 if (signal == SIGFPE)
641 do_fp_trap(regs, location,
642 current->thread.fp_regs.fpc, interruption_code);
643 else if (signal) {
644 siginfo_t info;
645 info.si_signo = signal;
646 info.si_errno = 0;
647 info.si_code = ILL_ILLOPN;
648 info.si_addr = location;
649 do_trap(interruption_code, signal,
650 "data exception", regs, &info);
651 }
652}
653
654asmlinkage void space_switch_exception(struct pt_regs * regs, long int_code)
655{
656 siginfo_t info;
657
658 /* Set user psw back to home space mode. */
659 if (regs->psw.mask & PSW_MASK_PSTATE)
660 regs->psw.mask |= PSW_ASC_HOME;
661 /* Send SIGILL. */
662 info.si_signo = SIGILL;
663 info.si_errno = 0;
664 info.si_code = ILL_PRVOPC;
665 info.si_addr = get_check_address(regs);
666 do_trap(int_code, SIGILL, "space switch event", regs, &info);
667}
668
669asmlinkage void kernel_stack_overflow(struct pt_regs * regs)
670{
671 die("Kernel stack overflow", regs, 0);
672 panic("Corrupt kernel stack, can't continue.");
673}
674
675
676/* init is done in lowcore.S and head.S */
677
678void __init trap_init(void)
679{
680 int i;
681
682 for (i = 0; i < 128; i++)
683 pgm_check_table[i] = &default_trap_handler;
684 pgm_check_table[1] = &illegal_op;
685 pgm_check_table[2] = &privileged_op;
686 pgm_check_table[3] = &execute_exception;
687 pgm_check_table[4] = &do_protection_exception;
688 pgm_check_table[5] = &addressing_exception;
689 pgm_check_table[6] = &specification_exception;
690 pgm_check_table[7] = &data_exception;
691 pgm_check_table[8] = &overflow_exception;
692 pgm_check_table[9] = &divide_exception;
693 pgm_check_table[0x0A] = &overflow_exception;
694 pgm_check_table[0x0B] = &divide_exception;
695 pgm_check_table[0x0C] = &hfp_overflow_exception;
696 pgm_check_table[0x0D] = &hfp_underflow_exception;
697 pgm_check_table[0x0E] = &hfp_significance_exception;
698 pgm_check_table[0x0F] = &hfp_divide_exception;
699 pgm_check_table[0x10] = &do_dat_exception;
700 pgm_check_table[0x11] = &do_dat_exception;
701 pgm_check_table[0x12] = &translation_exception;
702 pgm_check_table[0x13] = &special_op_exception;
703#ifndef CONFIG_ARCH_S390X
704 pgm_check_table[0x14] = &do_pseudo_page_fault;
705#else /* CONFIG_ARCH_S390X */
706 pgm_check_table[0x38] = &do_dat_exception;
707 pgm_check_table[0x39] = &do_dat_exception;
708 pgm_check_table[0x3A] = &do_dat_exception;
709 pgm_check_table[0x3B] = &do_dat_exception;
710#endif /* CONFIG_ARCH_S390X */
711 pgm_check_table[0x15] = &operand_exception;
712 pgm_check_table[0x1C] = &space_switch_exception;
713 pgm_check_table[0x1D] = &hfp_sqrt_exception;
714 pgm_check_table[0x40] = &do_monitor_call;
715
716 if (MACHINE_IS_VM) {
717 /*
718 * First try to get pfault pseudo page faults going.
719 * If this isn't available turn on pagex page faults.
720 */
721#ifdef CONFIG_PFAULT
722 /* request the 0x2603 external interrupt */
723 if (register_early_external_interrupt(0x2603, pfault_interrupt,
724 &ext_int_pfault) != 0)
725 panic("Couldn't request external interrupt 0x2603");
726
727 if (pfault_init() == 0)
728 return;
729
730 /* Tough luck, no pfault. */
731 unregister_early_external_interrupt(0x2603, pfault_interrupt,
732 &ext_int_pfault);
733#endif
734#ifndef CONFIG_ARCH_S390X
735 cpcmd("SET PAGEX ON", NULL, 0);
736#endif
737 }
738}