aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2015-01-27 19:24:09 -0500
committerIngo Molnar <mingo@kernel.org>2015-02-04 01:52:30 -0500
commitbfd9b2b5f80e7289fdd50210afe4d9ca5952a865 (patch)
treedd65ba421f2c4e7081b6797bb6c87c97ad2c6c9c /kernel/sched/core.c
parent9659e1eeee28f7025b6545934d644d19e9c6e603 (diff)
sched: Pull resched loop to __schedule() callers
__schedule() disables preemption during its job and re-enables it afterward without doing a preemption check to avoid recursion. But if an event happens after the context switch which requires rescheduling, we need to check again if a task of a higher priority needs the CPU. A preempt irq can raise such a situation. To handle that, __schedule() loops on need_resched(). But preempt_schedule_*() functions, which call __schedule(), also loop on need_resched() to handle missed preempt irqs. Hence we end up with the same loop happening twice. Lets simplify that by attributing the need_resched() loop responsibility to all __schedule() callers. There is a risk that the outer loop now handles reschedules that used to be handled by the inner loop with the added overhead of caller details (inc/dec of PREEMPT_ACTIVE, irq save/restore) but assuming those inner rescheduling loop weren't too frequent, this shouldn't matter. Especially since the whole preemption path is now losing one loop in any case. Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Steven Rostedt <rostedt@goodmis.org> Link: http://lkml.kernel.org/r/1422404652-29067-2-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c11
1 files changed, 7 insertions, 4 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 5091fd4feed8..b269e8a2a516 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2765,6 +2765,10 @@ again:
2765 * - explicit schedule() call 2765 * - explicit schedule() call
2766 * - return from syscall or exception to user-space 2766 * - return from syscall or exception to user-space
2767 * - return from interrupt-handler to user-space 2767 * - return from interrupt-handler to user-space
2768 *
2769 * WARNING: all callers must re-check need_resched() afterward and reschedule
2770 * accordingly in case an event triggered the need for rescheduling (such as
2771 * an interrupt waking up a task) while preemption was disabled in __schedule().
2768 */ 2772 */
2769static void __sched __schedule(void) 2773static void __sched __schedule(void)
2770{ 2774{
@@ -2773,7 +2777,6 @@ static void __sched __schedule(void)
2773 struct rq *rq; 2777 struct rq *rq;
2774 int cpu; 2778 int cpu;
2775 2779
2776need_resched:
2777 preempt_disable(); 2780 preempt_disable();
2778 cpu = smp_processor_id(); 2781 cpu = smp_processor_id();
2779 rq = cpu_rq(cpu); 2782 rq = cpu_rq(cpu);
@@ -2840,8 +2843,6 @@ need_resched:
2840 post_schedule(rq); 2843 post_schedule(rq);
2841 2844
2842 sched_preempt_enable_no_resched(); 2845 sched_preempt_enable_no_resched();
2843 if (need_resched())
2844 goto need_resched;
2845} 2846}
2846 2847
2847static inline void sched_submit_work(struct task_struct *tsk) 2848static inline void sched_submit_work(struct task_struct *tsk)
@@ -2861,7 +2862,9 @@ asmlinkage __visible void __sched schedule(void)
2861 struct task_struct *tsk = current; 2862 struct task_struct *tsk = current;
2862 2863
2863 sched_submit_work(tsk); 2864 sched_submit_work(tsk);
2864 __schedule(); 2865 do {
2866 __schedule();
2867 } while (need_resched());
2865} 2868}
2866EXPORT_SYMBOL(schedule); 2869EXPORT_SYMBOL(schedule);
2867 2870