aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2013-02-24 06:59:30 -0500
committerFrederic Weisbecker <fweisbec@gmail.com>2013-03-07 11:10:21 -0500
commitb22366cd54c6fe05db426f20adb10f461c19ec06 (patch)
tree3eb42a6ae0c6b25c27d946e6ffa4787f82ac952f /kernel/sched/core.c
parent6c1e0256fad84a843d915414e4b5973b7443d48d (diff)
context_tracking: Restore preempted context state after preempt_schedule_irq()
From the context tracking POV, preempt_schedule_irq() behaves pretty much like an exception: It can be called anytime and schedule another task. But currently it doesn't restore the context tracking state of the preempted code on preempt_schedule_irq() return. As a result, if preempt_schedule_irq() is called in the tiny frame between user_enter() and the actual return to userspace, we resume userspace with the wrong context tracking state. Fix this by using exception_enter/exit() which are a perfect fit for this kind of issue. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Li Zhong <zhong@linux.vnet.ibm.com> Cc: Kevin Hilman <khilman@linaro.org> Cc: Mats Liljegren <mats.liljegren@enea.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ingo Molnar <mingo@kernel.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Namhyung Kim <namhyung.kim@lge.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c6
1 files changed, 5 insertions, 1 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 7f12624a393c..af7a8c84b797 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3082,11 +3082,13 @@ EXPORT_SYMBOL(preempt_schedule);
3082asmlinkage void __sched preempt_schedule_irq(void) 3082asmlinkage void __sched preempt_schedule_irq(void)
3083{ 3083{
3084 struct thread_info *ti = current_thread_info(); 3084 struct thread_info *ti = current_thread_info();
3085 enum ctx_state prev_state;
3085 3086
3086 /* Catch callers which need to be fixed */ 3087 /* Catch callers which need to be fixed */
3087 BUG_ON(ti->preempt_count || !irqs_disabled()); 3088 BUG_ON(ti->preempt_count || !irqs_disabled());
3088 3089
3089 user_exit(); 3090 prev_state = exception_enter();
3091
3090 do { 3092 do {
3091 add_preempt_count(PREEMPT_ACTIVE); 3093 add_preempt_count(PREEMPT_ACTIVE);
3092 local_irq_enable(); 3094 local_irq_enable();
@@ -3100,6 +3102,8 @@ asmlinkage void __sched preempt_schedule_irq(void)
3100 */ 3102 */
3101 barrier(); 3103 barrier();
3102 } while (need_resched()); 3104 } while (need_resched());
3105
3106 exception_exit(prev_state);
3103} 3107}
3104 3108
3105#endif /* CONFIG_PREEMPT */ 3109#endif /* CONFIG_PREEMPT */