aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndy Lutomirski <luto@amacapital.net>2014-11-19 20:59:41 -0500
committerAndy Lutomirski <luto@amacapital.net>2015-01-02 13:22:46 -0500
commitbced35b65aefe53a6f77a9ed0ce1aea86e9d65a2 (patch)
tree9a07fcff24c833c92146cfccfa4f7ab580913518
parent83653c16da91112236292871b820cb8b367220e3 (diff)
x86, traps: Add ist_begin_non_atomic and ist_end_non_atomic
In some IST handlers, if the interrupt came from user mode, we can safely enable preemption. Add helpers to do it safely. This is intended to be used my the memory failure code in do_machine_check. Acked-by: Borislav Petkov <bp@suse.de> Signed-off-by: Andy Lutomirski <luto@amacapital.net>
-rw-r--r--arch/x86/include/asm/traps.h2
-rw-r--r--arch/x86/kernel/traps.c38
2 files changed, 40 insertions, 0 deletions
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index 3cf525ec762d..4e49d7dff78e 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -113,6 +113,8 @@ asmlinkage void mce_threshold_interrupt(void);
113 113
114extern enum ctx_state ist_enter(struct pt_regs *regs); 114extern enum ctx_state ist_enter(struct pt_regs *regs);
115extern void ist_exit(struct pt_regs *regs, enum ctx_state prev_state); 115extern void ist_exit(struct pt_regs *regs, enum ctx_state prev_state);
116extern void ist_begin_non_atomic(struct pt_regs *regs);
117extern void ist_end_non_atomic(void);
116 118
117/* Interrupts/Exceptions */ 119/* Interrupts/Exceptions */
118enum { 120enum {
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index b3a9d24dba25..7176f84f95a4 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -141,6 +141,44 @@ void ist_exit(struct pt_regs *regs, enum ctx_state prev_state)
141 rcu_nmi_exit(); 141 rcu_nmi_exit();
142} 142}
143 143
144/**
145 * ist_begin_non_atomic() - begin a non-atomic section in an IST exception
146 * @regs: regs passed to the IST exception handler
147 *
148 * IST exception handlers normally cannot schedule. As a special
149 * exception, if the exception interrupted userspace code (i.e.
150 * user_mode_vm(regs) would return true) and the exception was not
151 * a double fault, it can be safe to schedule. ist_begin_non_atomic()
152 * begins a non-atomic section within an ist_enter()/ist_exit() region.
153 * Callers are responsible for enabling interrupts themselves inside
154 * the non-atomic section, and callers must call is_end_non_atomic()
155 * before ist_exit().
156 */
157void ist_begin_non_atomic(struct pt_regs *regs)
158{
159 BUG_ON(!user_mode_vm(regs));
160
161 /*
162 * Sanity check: we need to be on the normal thread stack. This
163 * will catch asm bugs and any attempt to use ist_preempt_enable
164 * from double_fault.
165 */
166 BUG_ON(((current_stack_pointer() ^ this_cpu_read_stable(kernel_stack))
167 & ~(THREAD_SIZE - 1)) != 0);
168
169 preempt_count_sub(HARDIRQ_OFFSET);
170}
171
172/**
173 * ist_end_non_atomic() - begin a non-atomic section in an IST exception
174 *
175 * Ends a non-atomic section started with ist_begin_non_atomic().
176 */
177void ist_end_non_atomic(void)
178{
179 preempt_count_add(HARDIRQ_OFFSET);
180}
181
144static nokprobe_inline int 182static nokprobe_inline int
145do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str, 183do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
146 struct pt_regs *regs, long error_code) 184 struct pt_regs *regs, long error_code)