diff options
author | Peter Zijlstra <peterz@infradead.org> | 2015-09-28 11:57:39 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-10-06 11:08:17 -0400 |
commit | 1dc0fffc48af94513e621f95dff730ed4f7317ec (patch) | |
tree | 602dbd67f0565830ea99196d71e7f47b17d849e3 | |
parent | 3d8f74dd4ca1da8a1a464bbafcf679e40c2fc10f (diff) |
sched/core: Robustify preemption leak checks
When we warn about a preempt_count leak; reset the preempt_count to
the known good value such that the problem does not ripple forward.
This is most important on x86 which has a per cpu preempt_count that is
not saved/restored (after this series). So if you schedule with an
invalid (!2*PREEMPT_DISABLE_OFFSET) preempt_count the next task is
messed up too.
Enforcing this invariant limits the borkage to just the one task.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Frederic Weisbecker <fweisbec@gmail.com>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Steven Rostedt <rostedt@goodmis.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | kernel/exit.c | 4 | ||||
-rw-r--r-- | kernel/sched/core.c | 4 |
2 files changed, 6 insertions, 2 deletions
diff --git a/kernel/exit.c b/kernel/exit.c index ea95ee1b5ef7..443677c8efe6 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -706,10 +706,12 @@ void do_exit(long code) | |||
706 | smp_mb(); | 706 | smp_mb(); |
707 | raw_spin_unlock_wait(&tsk->pi_lock); | 707 | raw_spin_unlock_wait(&tsk->pi_lock); |
708 | 708 | ||
709 | if (unlikely(in_atomic())) | 709 | if (unlikely(in_atomic())) { |
710 | pr_info("note: %s[%d] exited with preempt_count %d\n", | 710 | pr_info("note: %s[%d] exited with preempt_count %d\n", |
711 | current->comm, task_pid_nr(current), | 711 | current->comm, task_pid_nr(current), |
712 | preempt_count()); | 712 | preempt_count()); |
713 | preempt_count_set(PREEMPT_ENABLED); | ||
714 | } | ||
713 | 715 | ||
714 | /* sync mm's RSS info before statistics gathering */ | 716 | /* sync mm's RSS info before statistics gathering */ |
715 | if (tsk->mm) | 717 | if (tsk->mm) |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 6344d82a84f6..d6989f85c641 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -2968,8 +2968,10 @@ static inline void schedule_debug(struct task_struct *prev) | |||
2968 | BUG_ON(unlikely(task_stack_end_corrupted(prev))); | 2968 | BUG_ON(unlikely(task_stack_end_corrupted(prev))); |
2969 | #endif | 2969 | #endif |
2970 | 2970 | ||
2971 | if (unlikely(in_atomic_preempt_off())) | 2971 | if (unlikely(in_atomic_preempt_off())) { |
2972 | __schedule_bug(prev); | 2972 | __schedule_bug(prev); |
2973 | preempt_count_set(PREEMPT_DISABLED); | ||
2974 | } | ||
2973 | rcu_sleep_check(); | 2975 | rcu_sleep_check(); |
2974 | 2976 | ||
2975 | profile_hit(SCHED_PROFILING, __builtin_return_address(0)); | 2977 | profile_hit(SCHED_PROFILING, __builtin_return_address(0)); |