aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/sched_pfair.c
diff options
context:
space:
mode:
authorBjoern B. Brandenburg <bbb@cs.unc.edu>2010-11-10 12:10:49 -0500
committerBjoern B. Brandenburg <bbb@cs.unc.edu>2010-11-11 17:57:44 -0500
commitfb3df2ec261d8cd6bcb8206d9d985355214d7767 (patch)
treec41f8818ad4f1b699afbe292d131c1073b4d7c6e /litmus/sched_pfair.c
parent516b6601bb5f71035e8859735a25dea0da4a0211 (diff)
Implement proper remote preemption support
To date, Litmus has just hooked into the smp_send_reschedule() IPI handler and marked tasks as having to reschedule to implement remote preemptions. This was never particularly clean, but so far we got away with it. However, changes in the underlying Linux, and peculartities of the ARM code (interrupts enabled before context switch) break this naive approach. This patch introduces new state-machine based remote preemption support. By examining the local state before calling set_tsk_need_resched(), we avoid confusing the underlying Linux scheduler. Further, this patch avoids sending unncessary IPIs.
Diffstat (limited to 'litmus/sched_pfair.c')
-rw-r--r--litmus/sched_pfair.c11
1 files changed, 4 insertions, 7 deletions
diff --git a/litmus/sched_pfair.c b/litmus/sched_pfair.c
index ea77d3295290..c7d5cf7aa2b3 100644
--- a/litmus/sched_pfair.c
+++ b/litmus/sched_pfair.c
@@ -16,6 +16,7 @@
16 16
17#include <litmus/litmus.h> 17#include <litmus/litmus.h>
18#include <litmus/jobs.h> 18#include <litmus/jobs.h>
19#include <litmus/preempt.h>
19#include <litmus/rt_domain.h> 20#include <litmus/rt_domain.h>
20#include <litmus/sched_plugin.h> 21#include <litmus/sched_plugin.h>
21#include <litmus/sched_trace.h> 22#include <litmus/sched_trace.h>
@@ -241,11 +242,7 @@ static void check_preempt(struct task_struct* t)
241 PTRACE_TASK(t, "linked_on:%d, scheduled_on:%d\n", 242 PTRACE_TASK(t, "linked_on:%d, scheduled_on:%d\n",
242 tsk_rt(t)->linked_on, tsk_rt(t)->scheduled_on); 243 tsk_rt(t)->linked_on, tsk_rt(t)->scheduled_on);
243 /* preempt */ 244 /* preempt */
244 if (cpu == smp_processor_id()) 245 litmus_reschedule(cpu);
245 set_tsk_need_resched(current);
246 else {
247 smp_send_reschedule(cpu);
248 }
249 } 246 }
250} 247}
251 248
@@ -545,7 +542,7 @@ static void pfair_tick(struct task_struct* t)
545 542
546 if (state->local != current 543 if (state->local != current
547 && (is_realtime(current) || is_present(state->local))) 544 && (is_realtime(current) || is_present(state->local)))
548 set_tsk_need_resched(current); 545 litmus_reschedule_local();
549} 546}
550 547
551static int safe_to_schedule(struct task_struct* t, int cpu) 548static int safe_to_schedule(struct task_struct* t, int cpu)
@@ -577,7 +574,7 @@ static struct task_struct* pfair_schedule(struct task_struct * prev)
577 if (next) 574 if (next)
578 tsk_rt(next)->scheduled_on = state->cpu; 575 tsk_rt(next)->scheduled_on = state->cpu;
579 } 576 }
580 577 sched_state_task_picked();
581 raw_spin_unlock(&pfair_lock); 578 raw_spin_unlock(&pfair_lock);
582 579
583 if (next) 580 if (next)