diff options
author | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2009-10-05 14:10:44 -0400 |
---|---|---|
committer | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2009-10-05 14:10:44 -0400 |
commit | 4ad2bcf6f6fff31ace097f7616b002df445110ec (patch) | |
tree | d7dda7c8cdfcabf1e92ac7044f0e08b3b7cc455c /litmus/sched_gsn_edf.c | |
parent | f8ed8c406d70b9a040807a01de5296d8832cafd7 (diff) |
This fixes a regression introduced by the RTSS'09
improvements.
Just calling unlink() on any task is not safe: it might
reside in a release heap. This was done in the case
that a new blocker increases a lock holder's priority.
To avoid crashing in this case, the FMLP code was changed
to be more careful when propagating an inherited priority.
The code now detects three different scenarios and uses
heap_decrease() to update the position of the lock holder's
heap node. This fixes a simple test case.
Diffstat (limited to 'litmus/sched_gsn_edf.c')
-rw-r--r-- | litmus/sched_gsn_edf.c | 75 |
1 files changed, 72 insertions, 3 deletions
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index cbde657ce7..53538e2eb2 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c | |||
@@ -600,6 +600,72 @@ static void gsnedf_task_exit(struct task_struct * t) | |||
600 | } | 600 | } |
601 | 601 | ||
602 | #ifdef CONFIG_FMLP | 602 | #ifdef CONFIG_FMLP |
603 | |||
604 | /* Update the queue position of a task that got it's priority boosted via | ||
605 | * priority inheritance. */ | ||
606 | static void update_queue_position(struct task_struct *holder) | ||
607 | { | ||
608 | /* We don't know whether holder is in the ready queue. It should, but | ||
609 | * on a budget overrun it may already be in a release queue. Hence, | ||
610 | * calling unlink() is not possible since it assumes that the task is | ||
611 | * not in a release queue. However, we can safely check whether | ||
612 | * sem->holder is currently in a queue or scheduled after locking both | ||
613 | * the release and the ready queue lock. */ | ||
614 | |||
615 | /* Assumption: caller holds gsnedf_lock */ | ||
616 | |||
617 | int check_preempt = 0; | ||
618 | |||
619 | if (tsk_rt(holder)->linked_on != NO_CPU) { | ||
620 | TRACE_TASK(holder, "%s: linked on %d\n", | ||
621 | __FUNCTION__, tsk_rt(holder)->linked_on); | ||
622 | /* Holder is scheduled; need to re-order CPUs. | ||
623 | * We can't use heap_decrease() here since | ||
624 | * the cpu_heap is ordered in reverse direction, so | ||
625 | * it is actually an increase. */ | ||
626 | heap_delete(cpu_lower_prio, &gsnedf_cpu_heap, | ||
627 | gsnedf_cpus[tsk_rt(holder)->linked_on]->hn); | ||
628 | heap_insert(cpu_lower_prio, &gsnedf_cpu_heap, | ||
629 | gsnedf_cpus[tsk_rt(holder)->linked_on]->hn); | ||
630 | } else { | ||
631 | /* holder may be queued: first stop queue changes */ | ||
632 | spin_lock(&gsnedf.release_lock); | ||
633 | if (is_queued(holder)) { | ||
634 | TRACE_TASK(holder, "%s: is queued\n", | ||
635 | __FUNCTION__); | ||
636 | /* We need to update the position | ||
637 | * of holder in some heap. Note that this | ||
638 | * may be a release heap. */ | ||
639 | check_preempt = | ||
640 | !heap_decrease(edf_ready_order, | ||
641 | tsk_rt(holder)->heap_node); | ||
642 | } else { | ||
643 | /* Nothing to do: if it is not queued and not linked | ||
644 | * then it is currently being moved by other code | ||
645 | * (e.g., a timer interrupt handler) that will use the | ||
646 | * correct priority when enqueuing the task. */ | ||
647 | TRACE_TASK(holder, "%s: is NOT queued => Done.\n", | ||
648 | __FUNCTION__); | ||
649 | } | ||
650 | spin_unlock(&gsnedf.release_lock); | ||
651 | |||
652 | /* If holder was enqueued in a release heap, then the following | ||
653 | * preemption check is pointless, but we can't easily detect | ||
654 | * that case. If you want to fix this, then consider that | ||
655 | * simply adding a state flag requires O(n) time to update when | ||
656 | * releasing n tasks, which conflicts with the goal to have | ||
657 | * O(log n) merges. */ | ||
658 | if (check_preempt) { | ||
659 | /* heap_decrease() hit the top level of the heap: make | ||
660 | * sure preemption checks get the right task, not the | ||
661 | * potentially stale cache. */ | ||
662 | heap_uncache_min(edf_ready_order, | ||
663 | &gsnedf.ready_queue); | ||
664 | check_for_preemptions(); | ||
665 | } | ||
666 | } | ||
667 | } | ||
668 | |||
603 | static long gsnedf_pi_block(struct pi_semaphore *sem, | 669 | static long gsnedf_pi_block(struct pi_semaphore *sem, |
604 | struct task_struct *new_waiter) | 670 | struct task_struct *new_waiter) |
605 | { | 671 | { |
@@ -613,16 +679,19 @@ static long gsnedf_pi_block(struct pi_semaphore *sem, | |||
613 | BUG_ON(!new_waiter); | 679 | BUG_ON(!new_waiter); |
614 | 680 | ||
615 | if (edf_higher_prio(new_waiter, sem->hp.task)) { | 681 | if (edf_higher_prio(new_waiter, sem->hp.task)) { |
616 | TRACE_TASK(new_waiter, " boosts priority\n"); | 682 | TRACE_TASK(new_waiter, " boosts priority via %p\n", sem); |
617 | /* called with IRQs disabled */ | 683 | /* called with IRQs disabled */ |
618 | spin_lock(&gsnedf_lock); | 684 | spin_lock(&gsnedf_lock); |
619 | /* store new highest-priority task */ | 685 | /* store new highest-priority task */ |
620 | sem->hp.task = new_waiter; | 686 | sem->hp.task = new_waiter; |
621 | if (sem->holder) { | 687 | if (sem->holder) { |
688 | TRACE_TASK(sem->holder, | ||
689 | " holds %p and will inherit from %s/%d\n", | ||
690 | sem, | ||
691 | new_waiter->comm, new_waiter->pid); | ||
622 | /* let holder inherit */ | 692 | /* let holder inherit */ |
623 | sem->holder->rt_param.inh_task = new_waiter; | 693 | sem->holder->rt_param.inh_task = new_waiter; |
624 | unlink(sem->holder); | 694 | update_queue_position(sem->holder); |
625 | gsnedf_job_arrival(sem->holder); | ||
626 | } | 695 | } |
627 | spin_unlock(&gsnedf_lock); | 696 | spin_unlock(&gsnedf_lock); |
628 | } | 697 | } |