aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/traps.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/traps.c')
-rw-r--r--arch/x86/kernel/traps.c60
1 files changed, 39 insertions, 21 deletions
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 4ff5d162ff9f..f4fa991406cd 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -112,7 +112,7 @@ enum ctx_state ist_enter(struct pt_regs *regs)
112{ 112{
113 enum ctx_state prev_state; 113 enum ctx_state prev_state;
114 114
115 if (user_mode_vm(regs)) { 115 if (user_mode(regs)) {
116 /* Other than that, we're just an exception. */ 116 /* Other than that, we're just an exception. */
117 prev_state = exception_enter(); 117 prev_state = exception_enter();
118 } else { 118 } else {
@@ -146,7 +146,7 @@ void ist_exit(struct pt_regs *regs, enum ctx_state prev_state)
146 /* Must be before exception_exit. */ 146 /* Must be before exception_exit. */
147 preempt_count_sub(HARDIRQ_OFFSET); 147 preempt_count_sub(HARDIRQ_OFFSET);
148 148
149 if (user_mode_vm(regs)) 149 if (user_mode(regs))
150 return exception_exit(prev_state); 150 return exception_exit(prev_state);
151 else 151 else
152 rcu_nmi_exit(); 152 rcu_nmi_exit();
@@ -158,7 +158,7 @@ void ist_exit(struct pt_regs *regs, enum ctx_state prev_state)
158 * 158 *
159 * IST exception handlers normally cannot schedule. As a special 159 * IST exception handlers normally cannot schedule. As a special
160 * exception, if the exception interrupted userspace code (i.e. 160 * exception, if the exception interrupted userspace code (i.e.
161 * user_mode_vm(regs) would return true) and the exception was not 161 * user_mode(regs) would return true) and the exception was not
162 * a double fault, it can be safe to schedule. ist_begin_non_atomic() 162 * a double fault, it can be safe to schedule. ist_begin_non_atomic()
163 * begins a non-atomic section within an ist_enter()/ist_exit() region. 163 * begins a non-atomic section within an ist_enter()/ist_exit() region.
164 * Callers are responsible for enabling interrupts themselves inside 164 * Callers are responsible for enabling interrupts themselves inside
@@ -167,15 +167,15 @@ void ist_exit(struct pt_regs *regs, enum ctx_state prev_state)
167 */ 167 */
168void ist_begin_non_atomic(struct pt_regs *regs) 168void ist_begin_non_atomic(struct pt_regs *regs)
169{ 169{
170 BUG_ON(!user_mode_vm(regs)); 170 BUG_ON(!user_mode(regs));
171 171
172 /* 172 /*
173 * Sanity check: we need to be on the normal thread stack. This 173 * Sanity check: we need to be on the normal thread stack. This
174 * will catch asm bugs and any attempt to use ist_preempt_enable 174 * will catch asm bugs and any attempt to use ist_preempt_enable
175 * from double_fault. 175 * from double_fault.
176 */ 176 */
177 BUG_ON(((current_stack_pointer() ^ this_cpu_read_stable(kernel_stack)) 177 BUG_ON((unsigned long)(current_top_of_stack() -
178 & ~(THREAD_SIZE - 1)) != 0); 178 current_stack_pointer()) >= THREAD_SIZE);
179 179
180 preempt_count_sub(HARDIRQ_OFFSET); 180 preempt_count_sub(HARDIRQ_OFFSET);
181} 181}
@@ -194,8 +194,7 @@ static nokprobe_inline int
194do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str, 194do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
195 struct pt_regs *regs, long error_code) 195 struct pt_regs *regs, long error_code)
196{ 196{
197#ifdef CONFIG_X86_32 197 if (v8086_mode(regs)) {
198 if (regs->flags & X86_VM_MASK) {
199 /* 198 /*
200 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86. 199 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
201 * On nmi (interrupt 2), do_trap should not be called. 200 * On nmi (interrupt 2), do_trap should not be called.
@@ -207,7 +206,7 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
207 } 206 }
208 return -1; 207 return -1;
209 } 208 }
210#endif 209
211 if (!user_mode(regs)) { 210 if (!user_mode(regs)) {
212 if (!fixup_exception(regs)) { 211 if (!fixup_exception(regs)) {
213 tsk->thread.error_code = error_code; 212 tsk->thread.error_code = error_code;
@@ -384,7 +383,7 @@ dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
384 goto exit; 383 goto exit;
385 conditional_sti(regs); 384 conditional_sti(regs);
386 385
387 if (!user_mode_vm(regs)) 386 if (!user_mode(regs))
388 die("bounds", regs, error_code); 387 die("bounds", regs, error_code);
389 388
390 if (!cpu_feature_enabled(X86_FEATURE_MPX)) { 389 if (!cpu_feature_enabled(X86_FEATURE_MPX)) {
@@ -462,13 +461,11 @@ do_general_protection(struct pt_regs *regs, long error_code)
462 prev_state = exception_enter(); 461 prev_state = exception_enter();
463 conditional_sti(regs); 462 conditional_sti(regs);
464 463
465#ifdef CONFIG_X86_32 464 if (v8086_mode(regs)) {
466 if (regs->flags & X86_VM_MASK) {
467 local_irq_enable(); 465 local_irq_enable();
468 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code); 466 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
469 goto exit; 467 goto exit;
470 } 468 }
471#endif
472 469
473 tsk = current; 470 tsk = current;
474 if (!user_mode(regs)) { 471 if (!user_mode(regs)) {
@@ -587,7 +584,7 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
587 /* Copy the remainder of the stack from the current stack. */ 584 /* Copy the remainder of the stack from the current stack. */
588 memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip)); 585 memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip));
589 586
590 BUG_ON(!user_mode_vm(&new_stack->regs)); 587 BUG_ON(!user_mode(&new_stack->regs));
591 return new_stack; 588 return new_stack;
592} 589}
593NOKPROBE_SYMBOL(fixup_bad_iret); 590NOKPROBE_SYMBOL(fixup_bad_iret);
@@ -637,7 +634,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
637 * then it's very likely the result of an icebp/int01 trap. 634 * then it's very likely the result of an icebp/int01 trap.
638 * User wants a sigtrap for that. 635 * User wants a sigtrap for that.
639 */ 636 */
640 if (!dr6 && user_mode_vm(regs)) 637 if (!dr6 && user_mode(regs))
641 user_icebp = 1; 638 user_icebp = 1;
642 639
643 /* Catch kmemcheck conditions first of all! */ 640 /* Catch kmemcheck conditions first of all! */
@@ -673,7 +670,7 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
673 /* It's safe to allow irq's after DR6 has been saved */ 670 /* It's safe to allow irq's after DR6 has been saved */
674 preempt_conditional_sti(regs); 671 preempt_conditional_sti(regs);
675 672
676 if (regs->flags & X86_VM_MASK) { 673 if (v8086_mode(regs)) {
677 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 674 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
678 X86_TRAP_DB); 675 X86_TRAP_DB);
679 preempt_conditional_cli(regs); 676 preempt_conditional_cli(regs);
@@ -721,7 +718,7 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
721 return; 718 return;
722 conditional_sti(regs); 719 conditional_sti(regs);
723 720
724 if (!user_mode_vm(regs)) 721 if (!user_mode(regs))
725 { 722 {
726 if (!fixup_exception(regs)) { 723 if (!fixup_exception(regs)) {
727 task->thread.error_code = error_code; 724 task->thread.error_code = error_code;
@@ -734,7 +731,7 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
734 /* 731 /*
735 * Save the info for the exception handler and clear the error. 732 * Save the info for the exception handler and clear the error.
736 */ 733 */
737 save_init_fpu(task); 734 unlazy_fpu(task);
738 task->thread.trap_nr = trapnr; 735 task->thread.trap_nr = trapnr;
739 task->thread.error_code = error_code; 736 task->thread.error_code = error_code;
740 info.si_signo = SIGFPE; 737 info.si_signo = SIGFPE;
@@ -863,7 +860,7 @@ void math_state_restore(void)
863 kernel_fpu_disable(); 860 kernel_fpu_disable();
864 __thread_fpu_begin(tsk); 861 __thread_fpu_begin(tsk);
865 if (unlikely(restore_fpu_checking(tsk))) { 862 if (unlikely(restore_fpu_checking(tsk))) {
866 drop_init_fpu(tsk); 863 fpu_reset_state(tsk);
867 force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk); 864 force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
868 } else { 865 } else {
869 tsk->thread.fpu_counter++; 866 tsk->thread.fpu_counter++;
@@ -925,9 +922,21 @@ dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
925/* Set of traps needed for early debugging. */ 922/* Set of traps needed for early debugging. */
926void __init early_trap_init(void) 923void __init early_trap_init(void)
927{ 924{
928 set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK); 925 /*
926 * Don't use IST to set DEBUG_STACK as it doesn't work until TSS
927 * is ready in cpu_init() <-- trap_init(). Before trap_init(),
928 * CPU runs at ring 0 so it is impossible to hit an invalid
929 * stack. Using the original stack works well enough at this
930 * early stage. DEBUG_STACK will be equipped after cpu_init() in
931 * trap_init().
932 *
933 * We don't need to set trace_idt_table like set_intr_gate(),
934 * since we don't have trace_debug and it will be reset to
935 * 'debug' in trap_init() by set_intr_gate_ist().
936 */
937 set_intr_gate_notrace(X86_TRAP_DB, debug);
929 /* int3 can be called from all */ 938 /* int3 can be called from all */
930 set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK); 939 set_system_intr_gate(X86_TRAP_BP, &int3);
931#ifdef CONFIG_X86_32 940#ifdef CONFIG_X86_32
932 set_intr_gate(X86_TRAP_PF, page_fault); 941 set_intr_gate(X86_TRAP_PF, page_fault);
933#endif 942#endif
@@ -1005,6 +1014,15 @@ void __init trap_init(void)
1005 */ 1014 */
1006 cpu_init(); 1015 cpu_init();
1007 1016
1017 /*
1018 * X86_TRAP_DB and X86_TRAP_BP have been set
1019 * in early_trap_init(). However, ITS works only after
1020 * cpu_init() loads TSS. See comments in early_trap_init().
1021 */
1022 set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK);
1023 /* int3 can be called from all */
1024 set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK);
1025
1008 x86_init.irqs.trap_init(); 1026 x86_init.irqs.trap_init();
1009 1027
1010#ifdef CONFIG_X86_64 1028#ifdef CONFIG_X86_64