aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorBjoern B. Brandenburg <bbb@cs.unc.edu>2010-11-10 12:10:49 -0500
committerBjoern B. Brandenburg <bbb@cs.unc.edu>2010-11-11 17:57:44 -0500
commitfb3df2ec261d8cd6bcb8206d9d985355214d7767 (patch)
treec41f8818ad4f1b699afbe292d131c1073b4d7c6e /kernel/sched.c
parent516b6601bb5f71035e8859735a25dea0da4a0211 (diff)
Implement proper remote preemption support
To date, Litmus has just hooked into the smp_send_reschedule() IPI handler and marked tasks as having to reschedule to implement remote preemptions. This was never particularly clean, but so far we got away with it. However, changes in the underlying Linux, and peculartities of the ARM code (interrupts enabled before context switch) break this naive approach. This patch introduces new state-machine based remote preemption support. By examining the local state before calling set_tsk_need_resched(), we avoid confusing the underlying Linux scheduler. Further, this patch avoids sending unncessary IPIs.
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c3
1 files changed, 2 insertions, 1 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index a9dd6f96c731..60fbae0c747c 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3794,6 +3794,7 @@ asmlinkage void __sched schedule(void)
3794 3794
3795need_resched: 3795need_resched:
3796 preempt_disable(); 3796 preempt_disable();
3797 sched_state_entered_schedule();
3797 cpu = smp_processor_id(); 3798 cpu = smp_processor_id();
3798 rq = cpu_rq(cpu); 3799 rq = cpu_rq(cpu);
3799 rcu_note_context_switch(cpu); 3800 rcu_note_context_switch(cpu);
@@ -3872,7 +3873,7 @@ need_resched_nonpreemptible:
3872 3873
3873 post_schedule(rq); 3874 post_schedule(rq);
3874 3875
3875 if (unlikely(reacquire_kernel_lock(prev))) 3876 if (sched_state_validate_switch() || unlikely(reacquire_kernel_lock(prev)))
3876 goto need_resched_nonpreemptible; 3877 goto need_resched_nonpreemptible;
3877 3878
3878 preempt_enable_no_resched(); 3879 preempt_enable_no_resched();