aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/common.c
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2011-12-09 03:02:19 -0500
committerSteven Rostedt <rostedt@goodmis.org>2011-12-21 15:38:55 -0500
commit228bdaa95fb830e08b6acd1afd4d2c55093cabfa (patch)
tree11d91c3d9f5b576003a07852fcf31eb2ec53bc39 /arch/x86/kernel/cpu/common.c
parent3f3c8b8c4b2a34776c3470142a7c8baafcda6eb0 (diff)
x86: Keep current stack in NMI breakpoints
We want to allow NMI handlers to have breakpoints to be able to remove stop_machine from ftrace, kprobes and jump_labels. But if an NMI interrupts a current breakpoint, and then it triggers a breakpoint itself, it will switch to the breakpoint stack and corrupt the data on it for the breakpoint processing that it interrupted. Instead, have the NMI check if it interrupted breakpoint processing by checking if the stack that is currently used is a breakpoint stack. If it is, then load a special IDT that changes the IST for the debug exception to keep the same stack in kernel context. When the NMI is done, it puts it back. This way, if the NMI does trigger a breakpoint, it will keep using the same stack and not stomp on the breakpoint data for the breakpoint it interrupted. Suggested-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'arch/x86/kernel/cpu/common.c')
-rw-r--r--arch/x86/kernel/cpu/common.c22
1 files changed, 22 insertions, 0 deletions
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index aa003b13a831..caa404556b9c 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1026,6 +1026,8 @@ __setup("clearcpuid=", setup_disablecpuid);
1026 1026
1027#ifdef CONFIG_X86_64 1027#ifdef CONFIG_X86_64
1028struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table }; 1028struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
1029struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1,
1030 (unsigned long) nmi_idt_table };
1029 1031
1030DEFINE_PER_CPU_FIRST(union irq_stack_union, 1032DEFINE_PER_CPU_FIRST(union irq_stack_union,
1031 irq_stack_union) __aligned(PAGE_SIZE); 1033 irq_stack_union) __aligned(PAGE_SIZE);
@@ -1090,6 +1092,24 @@ unsigned long kernel_eflags;
1090 */ 1092 */
1091DEFINE_PER_CPU(struct orig_ist, orig_ist); 1093DEFINE_PER_CPU(struct orig_ist, orig_ist);
1092 1094
1095static DEFINE_PER_CPU(unsigned long, debug_stack_addr);
1096
1097int is_debug_stack(unsigned long addr)
1098{
1099 return addr <= __get_cpu_var(debug_stack_addr) &&
1100 addr > (__get_cpu_var(debug_stack_addr) - DEBUG_STKSZ);
1101}
1102
1103void debug_stack_set_zero(void)
1104{
1105 load_idt((const struct desc_ptr *)&nmi_idt_descr);
1106}
1107
1108void debug_stack_reset(void)
1109{
1110 load_idt((const struct desc_ptr *)&idt_descr);
1111}
1112
1093#else /* CONFIG_X86_64 */ 1113#else /* CONFIG_X86_64 */
1094 1114
1095DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; 1115DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
@@ -1208,6 +1228,8 @@ void __cpuinit cpu_init(void)
1208 estacks += exception_stack_sizes[v]; 1228 estacks += exception_stack_sizes[v];
1209 oist->ist[v] = t->x86_tss.ist[v] = 1229 oist->ist[v] = t->x86_tss.ist[v] =
1210 (unsigned long)estacks; 1230 (unsigned long)estacks;
1231 if (v == DEBUG_STACK-1)
1232 per_cpu(debug_stack_addr, cpu) = (unsigned long)estacks;
1211 } 1233 }
1212 } 1234 }
1213 1235