aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2011-12-16 11:43:02 -0500
committerSteven Rostedt <rostedt@goodmis.org>2011-12-21 15:38:56 -0500
commit42181186ad4db986fcaa40ca95c6e407e9e79372 (patch)
tree7fcfbc106d34905932c5b0df0a26887f5ac8b69f /arch
parentccd49c2391773ffbf52bb80d75c4a92b16972517 (diff)
x86: Add counter when debug stack is used with interrupts enabled
Mathieu Desnoyers pointed out a case that can cause issues with NMIs running on the debug stack: int3 -> interrupt -> NMI -> int3 Because the interrupt changes the stack, the NMI will not see that it preempted the debug stack. Looking deeper at this case, interrupts only happen when the int3 is from userspace or in an a location in the exception table (fixup). userspace -> int3 -> interurpt -> NMI -> int3 All other int3s that happen in the kernel should be processed without ever enabling interrupts, as the do_trap() call will panic the kernel if it is called to process any other location within the kernel. Adding a counter around the sections that enable interrupts while using the debug stack allows the NMI to also check that case. If the NMI sees that it either interrupted a task using the debug stack or the debug counter is non-zero, then it will have to change the IDT table to make the int3 not change stacks (which will corrupt the stack if it does). Note, I had to move the debug_usage functions out of processor.h and into debugreg.h because of the static inlined functions to inc and dec the debug_usage counter. __get_cpu_var() requires smp.h which includes processor.h, and would fail to build. Link: http://lkml.kernel.org/r/1323976535.23971.112.camel@gandalf.stny.rr.com Reported-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: H. Peter Anvin <hpa@linux.intel.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Paul Turner <pjt@google.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/debugreg.h22
-rw-r--r--arch/x86/include/asm/processor.h6
-rw-r--r--arch/x86/kernel/cpu/common.c6
-rw-r--r--arch/x86/kernel/traps.c14
4 files changed, 40 insertions, 8 deletions
diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h
index 078ad0caefc6..b903d5ea3941 100644
--- a/arch/x86/include/asm/debugreg.h
+++ b/arch/x86/include/asm/debugreg.h
@@ -101,6 +101,28 @@ extern void aout_dump_debugregs(struct user *dump);
101 101
102extern void hw_breakpoint_restore(void); 102extern void hw_breakpoint_restore(void);
103 103
104#ifdef CONFIG_X86_64
105DECLARE_PER_CPU(int, debug_stack_usage);
106static inline void debug_stack_usage_inc(void)
107{
108 __get_cpu_var(debug_stack_usage)++;
109}
110static inline void debug_stack_usage_dec(void)
111{
112 __get_cpu_var(debug_stack_usage)--;
113}
114int is_debug_stack(unsigned long addr);
115void debug_stack_set_zero(void);
116void debug_stack_reset(void);
117#else /* !X86_64 */
118static inline int is_debug_stack(unsigned long addr) { return 0; }
119static inline void debug_stack_set_zero(void) { }
120static inline void debug_stack_reset(void) { }
121static inline void debug_stack_usage_inc(void) { }
122static inline void debug_stack_usage_dec(void) { }
123#endif /* X86_64 */
124
125
104#endif /* __KERNEL__ */ 126#endif /* __KERNEL__ */
105 127
106#endif /* _ASM_X86_DEBUGREG_H */ 128#endif /* _ASM_X86_DEBUGREG_H */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 4b39d6d7e3a1..b650435ffb53 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -402,9 +402,6 @@ DECLARE_PER_CPU(char *, irq_stack_ptr);
402DECLARE_PER_CPU(unsigned int, irq_count); 402DECLARE_PER_CPU(unsigned int, irq_count);
403extern unsigned long kernel_eflags; 403extern unsigned long kernel_eflags;
404extern asmlinkage void ignore_sysret(void); 404extern asmlinkage void ignore_sysret(void);
405int is_debug_stack(unsigned long addr);
406void debug_stack_set_zero(void);
407void debug_stack_reset(void);
408#else /* X86_64 */ 405#else /* X86_64 */
409#ifdef CONFIG_CC_STACKPROTECTOR 406#ifdef CONFIG_CC_STACKPROTECTOR
410/* 407/*
@@ -419,9 +416,6 @@ struct stack_canary {
419}; 416};
420DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); 417DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
421#endif 418#endif
422static inline int is_debug_stack(unsigned long addr) { return 0; }
423static inline void debug_stack_set_zero(void) { }
424static inline void debug_stack_reset(void) { }
425#endif /* X86_64 */ 419#endif /* X86_64 */
426 420
427extern unsigned int xstate_size; 421extern unsigned int xstate_size;
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index caa404556b9c..266e4649b1da 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1093,11 +1093,13 @@ unsigned long kernel_eflags;
1093DEFINE_PER_CPU(struct orig_ist, orig_ist); 1093DEFINE_PER_CPU(struct orig_ist, orig_ist);
1094 1094
1095static DEFINE_PER_CPU(unsigned long, debug_stack_addr); 1095static DEFINE_PER_CPU(unsigned long, debug_stack_addr);
1096DEFINE_PER_CPU(int, debug_stack_usage);
1096 1097
1097int is_debug_stack(unsigned long addr) 1098int is_debug_stack(unsigned long addr)
1098{ 1099{
1099 return addr <= __get_cpu_var(debug_stack_addr) && 1100 return __get_cpu_var(debug_stack_usage) ||
1100 addr > (__get_cpu_var(debug_stack_addr) - DEBUG_STKSZ); 1101 (addr <= __get_cpu_var(debug_stack_addr) &&
1102 addr > (__get_cpu_var(debug_stack_addr) - DEBUG_STKSZ));
1101} 1103}
1102 1104
1103void debug_stack_set_zero(void) 1105void debug_stack_set_zero(void)
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index a93c5cabc36a..0072b38e3ea1 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -316,9 +316,15 @@ dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
316 return; 316 return;
317#endif 317#endif
318 318
319 /*
320 * Let others (NMI) know that the debug stack is in use
321 * as we may switch to the interrupt stack.
322 */
323 debug_stack_usage_inc();
319 preempt_conditional_sti(regs); 324 preempt_conditional_sti(regs);
320 do_trap(3, SIGTRAP, "int3", regs, error_code, NULL); 325 do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
321 preempt_conditional_cli(regs); 326 preempt_conditional_cli(regs);
327 debug_stack_usage_dec();
322} 328}
323 329
324#ifdef CONFIG_X86_64 330#ifdef CONFIG_X86_64
@@ -411,6 +417,12 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
411 SIGTRAP) == NOTIFY_STOP) 417 SIGTRAP) == NOTIFY_STOP)
412 return; 418 return;
413 419
420 /*
421 * Let others (NMI) know that the debug stack is in use
422 * as we may switch to the interrupt stack.
423 */
424 debug_stack_usage_inc();
425
414 /* It's safe to allow irq's after DR6 has been saved */ 426 /* It's safe to allow irq's after DR6 has been saved */
415 preempt_conditional_sti(regs); 427 preempt_conditional_sti(regs);
416 428
@@ -418,6 +430,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
418 handle_vm86_trap((struct kernel_vm86_regs *) regs, 430 handle_vm86_trap((struct kernel_vm86_regs *) regs,
419 error_code, 1); 431 error_code, 1);
420 preempt_conditional_cli(regs); 432 preempt_conditional_cli(regs);
433 debug_stack_usage_dec();
421 return; 434 return;
422 } 435 }
423 436
@@ -437,6 +450,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
437 if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp) 450 if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
438 send_sigtrap(tsk, regs, error_code, si_code); 451 send_sigtrap(tsk, regs, error_code, si_code);
439 preempt_conditional_cli(regs); 452 preempt_conditional_cli(regs);
453 debug_stack_usage_dec();
440 454
441 return; 455 return;
442} 456}