aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBjoern Brandenburg <bbb@mpi-sws.org>2012-08-14 14:38:55 -0400
committerBjoern Brandenburg <bbb@mpi-sws.org>2012-08-14 14:42:08 -0400
commitc00613f1fad38acec00ef2c009ae4e73110084ac (patch)
tree9eae54e50e73eac4ee20c20f170ff1567d73e69f
parentb53c479a0f44b8990ce106622412a3bf54809944 (diff)
Fix wake_up() preemptions under P-FP and PSN-EDF
Due to some mistake in the past, PSN-EDF was missing a check for preemptions when a task resumes. P-FP adopted it by virtue of copy&paste. This patch makes sure that a preemption is triggered when a higher-priority task is added to the ready queue.
-rw-r--r--litmus/sched_pfp.c12
-rw-r--r--litmus/sched_psn_edf.c21
2 files changed, 24 insertions, 9 deletions
diff --git a/litmus/sched_pfp.c b/litmus/sched_pfp.c
index 62be699629b1..b1d5b4326a0e 100644
--- a/litmus/sched_pfp.c
+++ b/litmus/sched_pfp.c
@@ -95,6 +95,12 @@ static void pfp_release_jobs(rt_domain_t* rt, struct bheap* tasks)
95 raw_spin_unlock_irqrestore(&pfp->slock, flags); 95 raw_spin_unlock_irqrestore(&pfp->slock, flags);
96} 96}
97 97
98static void pfp_preempt_check(pfp_domain_t *pfp)
99{
100 if (fp_higher_prio(fp_prio_peek(&pfp->ready_queue), pfp->scheduled))
101 preempt(pfp);
102}
103
98static void pfp_domain_init(pfp_domain_t* pfp, 104static void pfp_domain_init(pfp_domain_t* pfp,
99 int cpu) 105 int cpu)
100{ 106{
@@ -291,7 +297,7 @@ static void pfp_task_new(struct task_struct * t, int on_rq, int running)
291 } else { 297 } else {
292 requeue(t, pfp); 298 requeue(t, pfp);
293 /* maybe we have to reschedule */ 299 /* maybe we have to reschedule */
294 preempt(pfp); 300 pfp_preempt_check(pfp);
295 } 301 }
296 raw_spin_unlock_irqrestore(&pfp->slock, flags); 302 raw_spin_unlock_irqrestore(&pfp->slock, flags);
297} 303}
@@ -337,8 +343,10 @@ static void pfp_task_wake_up(struct task_struct *task)
337 * and won. Also, don't requeue if it is still queued, which can 343 * and won. Also, don't requeue if it is still queued, which can
338 * happen under the DPCP due wake-ups racing with migrations. 344 * happen under the DPCP due wake-ups racing with migrations.
339 */ 345 */
340 if (pfp->scheduled != task) 346 if (pfp->scheduled != task) {
341 requeue(task, pfp); 347 requeue(task, pfp);
348 pfp_preempt_check(pfp);
349 }
342 350
343out_unlock: 351out_unlock:
344 raw_spin_unlock_irqrestore(&pfp->slock, flags); 352 raw_spin_unlock_irqrestore(&pfp->slock, flags);
diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c
index b0c8126bd44a..8933e15605ae 100644
--- a/litmus/sched_psn_edf.c
+++ b/litmus/sched_psn_edf.c
@@ -133,6 +133,15 @@ static void unboost_priority(struct task_struct* t)
133 133
134#endif 134#endif
135 135
136static int psnedf_preempt_check(psnedf_domain_t *pedf)
137{
138 if (edf_preemption_needed(&pedf->domain, pedf->scheduled)) {
139 preempt(pedf);
140 return 1;
141 } else
142 return 0;
143}
144
136/* This check is trivial in partioned systems as we only have to consider 145/* This check is trivial in partioned systems as we only have to consider
137 * the CPU of the partition. 146 * the CPU of the partition.
138 */ 147 */
@@ -143,11 +152,7 @@ static int psnedf_check_resched(rt_domain_t *edf)
143 /* because this is a callback from rt_domain_t we already hold 152 /* because this is a callback from rt_domain_t we already hold
144 * the necessary lock for the ready queue 153 * the necessary lock for the ready queue
145 */ 154 */
146 if (edf_preemption_needed(edf, pedf->scheduled)) { 155 return psnedf_preempt_check(pedf);
147 preempt(pedf);
148 return 1;
149 } else
150 return 0;
151} 156}
152 157
153static void job_completion(struct task_struct* t, int forced) 158static void job_completion(struct task_struct* t, int forced)
@@ -299,7 +304,7 @@ static void psnedf_task_new(struct task_struct * t, int on_rq, int running)
299 } else { 304 } else {
300 requeue(t, edf); 305 requeue(t, edf);
301 /* maybe we have to reschedule */ 306 /* maybe we have to reschedule */
302 preempt(pedf); 307 psnedf_preempt_check(pedf);
303 } 308 }
304 raw_spin_unlock_irqrestore(&pedf->slock, flags); 309 raw_spin_unlock_irqrestore(&pedf->slock, flags);
305} 310}
@@ -335,8 +340,10 @@ static void psnedf_task_wake_up(struct task_struct *task)
335 * de-scheduling the task, i.e., wake_up() raced with schedule() 340 * de-scheduling the task, i.e., wake_up() raced with schedule()
336 * and won. 341 * and won.
337 */ 342 */
338 if (pedf->scheduled != task) 343 if (pedf->scheduled != task) {
339 requeue(task, edf); 344 requeue(task, edf);
345 psnedf_preempt_check(pedf);
346 }
340 347
341 raw_spin_unlock_irqrestore(&pedf->slock, flags); 348 raw_spin_unlock_irqrestore(&pedf->slock, flags);
342 TRACE_TASK(task, "wake up done\n"); 349 TRACE_TASK(task, "wake up done\n");