aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBjoern Brandenburg <bbb@mpi-sws.org>2012-09-04 08:42:22 -0400
committerBjoern Brandenburg <bbb@mpi-sws.org>2012-10-18 16:14:07 -0400
commita5797a7ff1bec0600d78120b269adfe565432fc8 (patch)
tree7aaf47ca6b2c4e901a50e32cd838bb058bbd8f42
parent3ede287e25b12fa4019d786ef084d5c88d4adaa9 (diff)
P-FP: simplify boost_priority()
boost_priority() is only applied to already-scheduled tasks. Remove the (untested and unneeded) case handling unscheduled tasks, which was likely not correct anyway.
-rw-r--r--litmus/sched_pfp.c15
1 files changed, 4 insertions, 11 deletions
diff --git a/litmus/sched_pfp.c b/litmus/sched_pfp.c
index c425e15c8777..97581f689508 100644
--- a/litmus/sched_pfp.c
+++ b/litmus/sched_pfp.c
@@ -465,17 +465,10 @@ static void boost_priority(struct task_struct* t, lt_t priority_point)
465 /* tie-break by protocol-specific priority point */ 465 /* tie-break by protocol-specific priority point */
466 tsk_rt(t)->boost_start_time = priority_point; 466 tsk_rt(t)->boost_start_time = priority_point;
467 467
468 if (pfp->scheduled != t) { 468 /* Priority boosting currently only takes effect for already-scheduled
469 /* holder may be queued: first stop queue changes */ 469 * tasks. This is sufficient since priority boosting only kicks in as
470 raw_spin_lock(&pfp->domain.release_lock); 470 * part of lock acquisitions. */
471 if (is_queued(t) && 471 BUG_ON(pfp->scheduled != t);
472 /* If it is queued, then we need to re-order. */
473 bheap_decrease(fp_ready_order, tsk_rt(t)->heap_node) &&
474 /* If we bubbled to the top, then we need to check for preemptions. */
475 fp_preemption_needed(&pfp->ready_queue, pfp->scheduled))
476 preempt(pfp);
477 raw_spin_unlock(&pfp->domain.release_lock);
478 } /* else: nothing to do since the job is not queued while scheduled */
479 472
480 raw_spin_unlock_irqrestore(&pfp->slock, flags); 473 raw_spin_unlock_irqrestore(&pfp->slock, flags);
481} 474}