aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/sched_gsn_edf.c
diff options
context:
space:
mode:
Diffstat (limited to 'litmus/sched_gsn_edf.c')
-rw-r--r--litmus/sched_gsn_edf.c22
1 files changed, 13 insertions, 9 deletions
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index 6ed504f4750e..677a932e08be 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -645,7 +645,7 @@ static long gsnedf_admit_task(struct task_struct* tsk)
645#include <litmus/fdso.h> 645#include <litmus/fdso.h>
646 646
647/* called with IRQs off */ 647/* called with IRQs off */
648static void set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) 648static void increase_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh)
649{ 649{
650 int linked_on; 650 int linked_on;
651 int check_preempt = 0; 651 int check_preempt = 0;
@@ -653,7 +653,7 @@ static void set_priority_inheritance(struct task_struct* t, struct task_struct*
653 raw_spin_lock(&gsnedf_lock); 653 raw_spin_lock(&gsnedf_lock);
654 654
655 TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid); 655 TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid);
656 tsk_rt(t)->inh_task = prio_inh; 656 tsk_rt(t)->eff_prio = prio_inh;
657 657
658 linked_on = tsk_rt(t)->linked_on; 658 linked_on = tsk_rt(t)->linked_on;
659 659
@@ -712,7 +712,7 @@ static void set_priority_inheritance(struct task_struct* t, struct task_struct*
712} 712}
713 713
714/* called with IRQs off */ 714/* called with IRQs off */
715static void clear_priority_inheritance(struct task_struct* t) 715static void decrease_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh)
716{ 716{
717 raw_spin_lock(&gsnedf_lock); 717 raw_spin_lock(&gsnedf_lock);
718 718
@@ -720,8 +720,12 @@ static void clear_priority_inheritance(struct task_struct* t)
720 * resource. Thus we can make the following assumption.*/ 720 * resource. Thus we can make the following assumption.*/
721 BUG_ON(tsk_rt(t)->scheduled_on == NO_CPU); 721 BUG_ON(tsk_rt(t)->scheduled_on == NO_CPU);
722 722
723 TRACE_TASK(t, "priority restored\n"); 723 if(prio_inh)
724 tsk_rt(t)->inh_task = NULL; 724 TRACE_TASK(t, "inherited priority decreased to %s/%d\n", prio_inh->comm, prio_inh->pid);
725 else
726 TRACE_TASK(t, "base priority restored.\n");
727
728 tsk_rt(t)->eff_prio = prio_inh;
725 729
726 /* Check if rescheduling is necessary. We can't use heap_decrease() 730 /* Check if rescheduling is necessary. We can't use heap_decrease()
727 * since the priority was effectively lowered. */ 731 * since the priority was effectively lowered. */
@@ -797,7 +801,7 @@ int gsnedf_fmlp_lock(struct litmus_lock* l)
797 if (edf_higher_prio(t, sem->hp_waiter)) { 801 if (edf_higher_prio(t, sem->hp_waiter)) {
798 sem->hp_waiter = t; 802 sem->hp_waiter = t;
799 if (edf_higher_prio(t, sem->owner)) 803 if (edf_higher_prio(t, sem->owner))
800 set_priority_inheritance(sem->owner, sem->hp_waiter); 804 increase_priority_inheritance(sem->owner, sem->hp_waiter);
801 } 805 }
802 806
803 TS_LOCK_SUSPEND; 807 TS_LOCK_SUSPEND;
@@ -865,7 +869,7 @@ int gsnedf_fmlp_unlock(struct litmus_lock* l)
865 /* Well, if next is not the highest-priority waiter, 869 /* Well, if next is not the highest-priority waiter,
866 * then it ought to inherit the highest-priority 870 * then it ought to inherit the highest-priority
867 * waiter's priority. */ 871 * waiter's priority. */
868 set_priority_inheritance(next, sem->hp_waiter); 872 increase_priority_inheritance(next, sem->hp_waiter);
869 } 873 }
870 874
871 /* wake up next */ 875 /* wake up next */
@@ -875,8 +879,8 @@ int gsnedf_fmlp_unlock(struct litmus_lock* l)
875 sem->owner = NULL; 879 sem->owner = NULL;
876 880
877 /* we lose the benefit of priority inheritance (if any) */ 881 /* we lose the benefit of priority inheritance (if any) */
878 if (tsk_rt(t)->inh_task) 882 if (tsk_rt(t)->eff_prio)
879 clear_priority_inheritance(t); 883 decrease_priority_inheritance(t, NULL);
880 884
881out: 885out:
882 spin_unlock_irqrestore(&sem->wait.lock, flags); 886 spin_unlock_irqrestore(&sem->wait.lock, flags);