aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/sched_pfp.c
diff options
context:
space:
mode:
authorBjoern Brandenburg <bbb@mpi-sws.org>2012-08-14 14:38:55 -0400
committerBjoern Brandenburg <bbb@mpi-sws.org>2012-08-14 14:42:08 -0400
commitc00613f1fad38acec00ef2c009ae4e73110084ac (patch)
tree9eae54e50e73eac4ee20c20f170ff1567d73e69f /litmus/sched_pfp.c
parentb53c479a0f44b8990ce106622412a3bf54809944 (diff)
Fix wake_up() preemptions under P-FP and PSN-EDF
Due to some mistake in the past, PSN-EDF was missing a check for preemptions when a task resumes. P-FP adopted it by virtue of copy&paste. This patch makes sure that a preemption is triggered when a higher-priority task is added to the ready queue.
Diffstat (limited to 'litmus/sched_pfp.c')
-rw-r--r--litmus/sched_pfp.c12
1 files changed, 10 insertions, 2 deletions
diff --git a/litmus/sched_pfp.c b/litmus/sched_pfp.c
index 62be699629b1..b1d5b4326a0e 100644
--- a/litmus/sched_pfp.c
+++ b/litmus/sched_pfp.c
@@ -95,6 +95,12 @@ static void pfp_release_jobs(rt_domain_t* rt, struct bheap* tasks)
95 raw_spin_unlock_irqrestore(&pfp->slock, flags); 95 raw_spin_unlock_irqrestore(&pfp->slock, flags);
96} 96}
97 97
98static void pfp_preempt_check(pfp_domain_t *pfp)
99{
100 if (fp_higher_prio(fp_prio_peek(&pfp->ready_queue), pfp->scheduled))
101 preempt(pfp);
102}
103
98static void pfp_domain_init(pfp_domain_t* pfp, 104static void pfp_domain_init(pfp_domain_t* pfp,
99 int cpu) 105 int cpu)
100{ 106{
@@ -291,7 +297,7 @@ static void pfp_task_new(struct task_struct * t, int on_rq, int running)
291 } else { 297 } else {
292 requeue(t, pfp); 298 requeue(t, pfp);
293 /* maybe we have to reschedule */ 299 /* maybe we have to reschedule */
294 preempt(pfp); 300 pfp_preempt_check(pfp);
295 } 301 }
296 raw_spin_unlock_irqrestore(&pfp->slock, flags); 302 raw_spin_unlock_irqrestore(&pfp->slock, flags);
297} 303}
@@ -337,8 +343,10 @@ static void pfp_task_wake_up(struct task_struct *task)
337 * and won. Also, don't requeue if it is still queued, which can 343 * and won. Also, don't requeue if it is still queued, which can
338 * happen under the DPCP due wake-ups racing with migrations. 344 * happen under the DPCP due wake-ups racing with migrations.
339 */ 345 */
340 if (pfp->scheduled != task) 346 if (pfp->scheduled != task) {
341 requeue(task, pfp); 347 requeue(task, pfp);
348 pfp_preempt_check(pfp);
349 }
342 350
343out_unlock: 351out_unlock:
344 raw_spin_unlock_irqrestore(&pfp->slock, flags); 352 raw_spin_unlock_irqrestore(&pfp->slock, flags);