aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/traps.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-29 21:21:35 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-29 21:21:35 -0400
commiteb05df9e7e793f3134dbb574c7ccc05f7932bc59 (patch)
tree6373858c571902eece2761fe2b0d9cc84e7a66fe /arch/x86/kernel/traps.c
parenta591afc01d9e48affbacb365558a31e53c85af45 (diff)
parentef334a20d84f52407a8a2afd02ddeaecbef0ad3d (diff)
Merge branch 'x86-cleanups-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 cleanups from Peter Anvin: "The biggest textual change is the cleanup to use symbolic constants for x86 trap values. The only *functional* change and the reason for the x86/x32 dependency is the move of is_ia32_task() into <asm/thread_info.h> so that it can be used in other code that needs to understand if a system call comes from the compat entry point (and therefore uses i386 system call numbers) or not. One intended user for that is the BPF system call filter. Moving it out of <asm/compat.h> means we can define it unconditionally, returning always true on i386." * 'x86-cleanups-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86: Move is_ia32_task to asm/thread_info.h from asm/compat.h x86: Rename trap_no to trap_nr in thread_struct x86: Use enum instead of literals for trap values
Diffstat (limited to 'arch/x86/kernel/traps.c')
-rw-r--r--arch/x86/kernel/traps.c133
1 files changed, 70 insertions, 63 deletions
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 860f126ca233..ff9281f16029 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -119,7 +119,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
119 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86. 119 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
120 * On nmi (interrupt 2), do_trap should not be called. 120 * On nmi (interrupt 2), do_trap should not be called.
121 */ 121 */
122 if (trapnr < 6) 122 if (trapnr < X86_TRAP_UD)
123 goto vm86_trap; 123 goto vm86_trap;
124 goto trap_signal; 124 goto trap_signal;
125 } 125 }
@@ -132,7 +132,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
132trap_signal: 132trap_signal:
133#endif 133#endif
134 /* 134 /*
135 * We want error_code and trap_no set for userspace faults and 135 * We want error_code and trap_nr set for userspace faults and
136 * kernelspace faults which result in die(), but not 136 * kernelspace faults which result in die(), but not
137 * kernelspace faults which are fixed up. die() gives the 137 * kernelspace faults which are fixed up. die() gives the
138 * process no chance to handle the signal and notice the 138 * process no chance to handle the signal and notice the
@@ -141,7 +141,7 @@ trap_signal:
141 * delivered, faults. See also do_general_protection below. 141 * delivered, faults. See also do_general_protection below.
142 */ 142 */
143 tsk->thread.error_code = error_code; 143 tsk->thread.error_code = error_code;
144 tsk->thread.trap_no = trapnr; 144 tsk->thread.trap_nr = trapnr;
145 145
146#ifdef CONFIG_X86_64 146#ifdef CONFIG_X86_64
147 if (show_unhandled_signals && unhandled_signal(tsk, signr) && 147 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
@@ -164,7 +164,7 @@ trap_signal:
164kernel_trap: 164kernel_trap:
165 if (!fixup_exception(regs)) { 165 if (!fixup_exception(regs)) {
166 tsk->thread.error_code = error_code; 166 tsk->thread.error_code = error_code;
167 tsk->thread.trap_no = trapnr; 167 tsk->thread.trap_nr = trapnr;
168 die(str, regs, error_code); 168 die(str, regs, error_code);
169 } 169 }
170 return; 170 return;
@@ -203,27 +203,31 @@ dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
203 do_trap(trapnr, signr, str, regs, error_code, &info); \ 203 do_trap(trapnr, signr, str, regs, error_code, &info); \
204} 204}
205 205
206DO_ERROR_INFO(0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip) 206DO_ERROR_INFO(X86_TRAP_DE, SIGFPE, "divide error", divide_error, FPE_INTDIV,
207DO_ERROR(4, SIGSEGV, "overflow", overflow) 207 regs->ip)
208DO_ERROR(5, SIGSEGV, "bounds", bounds) 208DO_ERROR(X86_TRAP_OF, SIGSEGV, "overflow", overflow)
209DO_ERROR_INFO(6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip) 209DO_ERROR(X86_TRAP_BR, SIGSEGV, "bounds", bounds)
210DO_ERROR(9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun) 210DO_ERROR_INFO(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN,
211DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS) 211 regs->ip)
212DO_ERROR(11, SIGBUS, "segment not present", segment_not_present) 212DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun",
213 coprocessor_segment_overrun)
214DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS)
215DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present)
213#ifdef CONFIG_X86_32 216#ifdef CONFIG_X86_32
214DO_ERROR(12, SIGBUS, "stack segment", stack_segment) 217DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment)
215#endif 218#endif
216DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0) 219DO_ERROR_INFO(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check,
220 BUS_ADRALN, 0)
217 221
218#ifdef CONFIG_X86_64 222#ifdef CONFIG_X86_64
219/* Runs on IST stack */ 223/* Runs on IST stack */
220dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code) 224dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code)
221{ 225{
222 if (notify_die(DIE_TRAP, "stack segment", regs, error_code, 226 if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
223 12, SIGBUS) == NOTIFY_STOP) 227 X86_TRAP_SS, SIGBUS) == NOTIFY_STOP)
224 return; 228 return;
225 preempt_conditional_sti(regs); 229 preempt_conditional_sti(regs);
226 do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL); 230 do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL);
227 preempt_conditional_cli(regs); 231 preempt_conditional_cli(regs);
228} 232}
229 233
@@ -233,10 +237,10 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
233 struct task_struct *tsk = current; 237 struct task_struct *tsk = current;
234 238
235 /* Return not checked because double check cannot be ignored */ 239 /* Return not checked because double check cannot be ignored */
236 notify_die(DIE_TRAP, str, regs, error_code, 8, SIGSEGV); 240 notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
237 241
238 tsk->thread.error_code = error_code; 242 tsk->thread.error_code = error_code;
239 tsk->thread.trap_no = 8; 243 tsk->thread.trap_nr = X86_TRAP_DF;
240 244
241 /* 245 /*
242 * This is always a kernel trap and never fixable (and thus must 246 * This is always a kernel trap and never fixable (and thus must
@@ -264,7 +268,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
264 goto gp_in_kernel; 268 goto gp_in_kernel;
265 269
266 tsk->thread.error_code = error_code; 270 tsk->thread.error_code = error_code;
267 tsk->thread.trap_no = 13; 271 tsk->thread.trap_nr = X86_TRAP_GP;
268 272
269 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && 273 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
270 printk_ratelimit()) { 274 printk_ratelimit()) {
@@ -291,9 +295,9 @@ gp_in_kernel:
291 return; 295 return;
292 296
293 tsk->thread.error_code = error_code; 297 tsk->thread.error_code = error_code;
294 tsk->thread.trap_no = 13; 298 tsk->thread.trap_nr = X86_TRAP_GP;
295 if (notify_die(DIE_GPF, "general protection fault", regs, 299 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
296 error_code, 13, SIGSEGV) == NOTIFY_STOP) 300 X86_TRAP_GP, SIGSEGV) == NOTIFY_STOP)
297 return; 301 return;
298 die("general protection fault", regs, error_code); 302 die("general protection fault", regs, error_code);
299} 303}
@@ -302,13 +306,13 @@ gp_in_kernel:
302dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code) 306dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
303{ 307{
304#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP 308#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
305 if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) 309 if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
306 == NOTIFY_STOP) 310 SIGTRAP) == NOTIFY_STOP)
307 return; 311 return;
308#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */ 312#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
309 313
310 if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) 314 if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
311 == NOTIFY_STOP) 315 SIGTRAP) == NOTIFY_STOP)
312 return; 316 return;
313 317
314 /* 318 /*
@@ -317,7 +321,7 @@ dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
317 */ 321 */
318 debug_stack_usage_inc(); 322 debug_stack_usage_inc();
319 preempt_conditional_sti(regs); 323 preempt_conditional_sti(regs);
320 do_trap(3, SIGTRAP, "int3", regs, error_code, NULL); 324 do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
321 preempt_conditional_cli(regs); 325 preempt_conditional_cli(regs);
322 debug_stack_usage_dec(); 326 debug_stack_usage_dec();
323} 327}
@@ -422,8 +426,8 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
422 preempt_conditional_sti(regs); 426 preempt_conditional_sti(regs);
423 427
424 if (regs->flags & X86_VM_MASK) { 428 if (regs->flags & X86_VM_MASK) {
425 handle_vm86_trap((struct kernel_vm86_regs *) regs, 429 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
426 error_code, 1); 430 X86_TRAP_DB);
427 preempt_conditional_cli(regs); 431 preempt_conditional_cli(regs);
428 debug_stack_usage_dec(); 432 debug_stack_usage_dec();
429 return; 433 return;
@@ -460,7 +464,8 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
460 struct task_struct *task = current; 464 struct task_struct *task = current;
461 siginfo_t info; 465 siginfo_t info;
462 unsigned short err; 466 unsigned short err;
463 char *str = (trapnr == 16) ? "fpu exception" : "simd exception"; 467 char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
468 "simd exception";
464 469
465 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP) 470 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP)
466 return; 471 return;
@@ -470,7 +475,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
470 { 475 {
471 if (!fixup_exception(regs)) { 476 if (!fixup_exception(regs)) {
472 task->thread.error_code = error_code; 477 task->thread.error_code = error_code;
473 task->thread.trap_no = trapnr; 478 task->thread.trap_nr = trapnr;
474 die(str, regs, error_code); 479 die(str, regs, error_code);
475 } 480 }
476 return; 481 return;
@@ -480,12 +485,12 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
480 * Save the info for the exception handler and clear the error. 485 * Save the info for the exception handler and clear the error.
481 */ 486 */
482 save_init_fpu(task); 487 save_init_fpu(task);
483 task->thread.trap_no = trapnr; 488 task->thread.trap_nr = trapnr;
484 task->thread.error_code = error_code; 489 task->thread.error_code = error_code;
485 info.si_signo = SIGFPE; 490 info.si_signo = SIGFPE;
486 info.si_errno = 0; 491 info.si_errno = 0;
487 info.si_addr = (void __user *)regs->ip; 492 info.si_addr = (void __user *)regs->ip;
488 if (trapnr == 16) { 493 if (trapnr == X86_TRAP_MF) {
489 unsigned short cwd, swd; 494 unsigned short cwd, swd;
490 /* 495 /*
491 * (~cwd & swd) will mask out exceptions that are not set to unmasked 496 * (~cwd & swd) will mask out exceptions that are not set to unmasked
@@ -529,10 +534,11 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
529 info.si_code = FPE_FLTRES; 534 info.si_code = FPE_FLTRES;
530 } else { 535 } else {
531 /* 536 /*
532 * If we're using IRQ 13, or supposedly even some trap 16 537 * If we're using IRQ 13, or supposedly even some trap
533 * implementations, it's possible we get a spurious trap... 538 * X86_TRAP_MF implementations, it's possible
539 * we get a spurious trap, which is not an error.
534 */ 540 */
535 return; /* Spurious trap, no error */ 541 return;
536 } 542 }
537 force_sig_info(SIGFPE, &info, task); 543 force_sig_info(SIGFPE, &info, task);
538} 544}
@@ -543,13 +549,13 @@ dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
543 ignore_fpu_irq = 1; 549 ignore_fpu_irq = 1;
544#endif 550#endif
545 551
546 math_error(regs, error_code, 16); 552 math_error(regs, error_code, X86_TRAP_MF);
547} 553}
548 554
549dotraplinkage void 555dotraplinkage void
550do_simd_coprocessor_error(struct pt_regs *regs, long error_code) 556do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
551{ 557{
552 math_error(regs, error_code, 19); 558 math_error(regs, error_code, X86_TRAP_XF);
553} 559}
554 560
555dotraplinkage void 561dotraplinkage void
@@ -643,20 +649,21 @@ dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
643 info.si_errno = 0; 649 info.si_errno = 0;
644 info.si_code = ILL_BADSTK; 650 info.si_code = ILL_BADSTK;
645 info.si_addr = NULL; 651 info.si_addr = NULL;
646 if (notify_die(DIE_TRAP, "iret exception", 652 if (notify_die(DIE_TRAP, "iret exception", regs, error_code,
647 regs, error_code, 32, SIGILL) == NOTIFY_STOP) 653 X86_TRAP_IRET, SIGILL) == NOTIFY_STOP)
648 return; 654 return;
649 do_trap(32, SIGILL, "iret exception", regs, error_code, &info); 655 do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code,
656 &info);
650} 657}
651#endif 658#endif
652 659
653/* Set of traps needed for early debugging. */ 660/* Set of traps needed for early debugging. */
654void __init early_trap_init(void) 661void __init early_trap_init(void)
655{ 662{
656 set_intr_gate_ist(1, &debug, DEBUG_STACK); 663 set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK);
657 /* int3 can be called from all */ 664 /* int3 can be called from all */
658 set_system_intr_gate_ist(3, &int3, DEBUG_STACK); 665 set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK);
659 set_intr_gate(14, &page_fault); 666 set_intr_gate(X86_TRAP_PF, &page_fault);
660 load_idt(&idt_descr); 667 load_idt(&idt_descr);
661} 668}
662 669
@@ -672,30 +679,30 @@ void __init trap_init(void)
672 early_iounmap(p, 4); 679 early_iounmap(p, 4);
673#endif 680#endif
674 681
675 set_intr_gate(0, &divide_error); 682 set_intr_gate(X86_TRAP_DE, &divide_error);
676 set_intr_gate_ist(2, &nmi, NMI_STACK); 683 set_intr_gate_ist(X86_TRAP_NMI, &nmi, NMI_STACK);
677 /* int4 can be called from all */ 684 /* int4 can be called from all */
678 set_system_intr_gate(4, &overflow); 685 set_system_intr_gate(X86_TRAP_OF, &overflow);
679 set_intr_gate(5, &bounds); 686 set_intr_gate(X86_TRAP_BR, &bounds);
680 set_intr_gate(6, &invalid_op); 687 set_intr_gate(X86_TRAP_UD, &invalid_op);
681 set_intr_gate(7, &device_not_available); 688 set_intr_gate(X86_TRAP_NM, &device_not_available);
682#ifdef CONFIG_X86_32 689#ifdef CONFIG_X86_32
683 set_task_gate(8, GDT_ENTRY_DOUBLEFAULT_TSS); 690 set_task_gate(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS);
684#else 691#else
685 set_intr_gate_ist(8, &double_fault, DOUBLEFAULT_STACK); 692 set_intr_gate_ist(X86_TRAP_DF, &double_fault, DOUBLEFAULT_STACK);
686#endif 693#endif
687 set_intr_gate(9, &coprocessor_segment_overrun); 694 set_intr_gate(X86_TRAP_OLD_MF, &coprocessor_segment_overrun);
688 set_intr_gate(10, &invalid_TSS); 695 set_intr_gate(X86_TRAP_TS, &invalid_TSS);
689 set_intr_gate(11, &segment_not_present); 696 set_intr_gate(X86_TRAP_NP, &segment_not_present);
690 set_intr_gate_ist(12, &stack_segment, STACKFAULT_STACK); 697 set_intr_gate_ist(X86_TRAP_SS, &stack_segment, STACKFAULT_STACK);
691 set_intr_gate(13, &general_protection); 698 set_intr_gate(X86_TRAP_GP, &general_protection);
692 set_intr_gate(15, &spurious_interrupt_bug); 699 set_intr_gate(X86_TRAP_SPURIOUS, &spurious_interrupt_bug);
693 set_intr_gate(16, &coprocessor_error); 700 set_intr_gate(X86_TRAP_MF, &coprocessor_error);
694 set_intr_gate(17, &alignment_check); 701 set_intr_gate(X86_TRAP_AC, &alignment_check);
695#ifdef CONFIG_X86_MCE 702#ifdef CONFIG_X86_MCE
696 set_intr_gate_ist(18, &machine_check, MCE_STACK); 703 set_intr_gate_ist(X86_TRAP_MC, &machine_check, MCE_STACK);
697#endif 704#endif
698 set_intr_gate(19, &simd_coprocessor_error); 705 set_intr_gate(X86_TRAP_XF, &simd_coprocessor_error);
699 706
700 /* Reserve all the builtin and the syscall vector: */ 707 /* Reserve all the builtin and the syscall vector: */
701 for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) 708 for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
@@ -720,7 +727,7 @@ void __init trap_init(void)
720 727
721#ifdef CONFIG_X86_64 728#ifdef CONFIG_X86_64
722 memcpy(&nmi_idt_table, &idt_table, IDT_ENTRIES * 16); 729 memcpy(&nmi_idt_table, &idt_table, IDT_ENTRIES * 16);
723 set_nmi_gate(1, &debug); 730 set_nmi_gate(X86_TRAP_DB, &debug);
724 set_nmi_gate(3, &int3); 731 set_nmi_gate(X86_TRAP_BP, &int3);
725#endif 732#endif
726} 733}