diff options
author | Bryan Ward <bcw@cs.unc.edu> | 2012-12-19 11:36:26 -0500 |
---|---|---|
committer | Bryan Ward <bcw@cs.unc.edu> | 2013-04-16 14:39:29 -0400 |
commit | 89fd339cb393a0b4f6d8923d7468cf87dcd85de8 (patch) | |
tree | dc5a490f85e2e770948f38f2f9ebd4cb183c5ed2 | |
parent | 836dabfa1ffb64b76541ebc3ca37d2b327a7c8e4 (diff) |
Bug fixes.
-rw-r--r-- | litmus/sched_psn_edf.c | 87 |
1 files changed, 55 insertions, 32 deletions
diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c index c76c0412f80..9da32a773db 100644 --- a/litmus/sched_psn_edf.c +++ b/litmus/sched_psn_edf.c | |||
@@ -78,31 +78,31 @@ static void preempt(psnedf_domain_t *pedf) | |||
78 | 78 | ||
79 | static void boost_priority(struct task_struct* t) | 79 | static void boost_priority(struct task_struct* t) |
80 | { | 80 | { |
81 | unsigned long flags; | 81 | unsigned long flags; |
82 | psnedf_domain_t* pedf = task_pedf(t); | 82 | psnedf_domain_t* pedf = task_pedf(t); |
83 | lt_t now; | 83 | lt_t now; |
84 | 84 | ||
85 | raw_spin_lock_irqsave(&pedf->slock, flags); | 85 | raw_spin_lock_irqsave(&pedf->slock, flags); |
86 | now = litmus_clock(); | 86 | now = litmus_clock(); |
87 | 87 | ||
88 | TRACE_TASK(t, "priority boosted at %llu\n", now); | 88 | TRACE_TASK(t, "priority boosted at %llu\n", now); |
89 | 89 | ||
90 | tsk_rt(t)->priority_boosted = 1; | 90 | tsk_rt(t)->priority_boosted = 1; |
91 | tsk_rt(t)->boost_start_time = now; | 91 | tsk_rt(t)->boost_start_time = now; |
92 | 92 | ||
93 | if (pedf->scheduled != t) { | 93 | if (pedf->scheduled != t) { |
94 | /* holder may be queued: first stop queue changes */ | 94 | /* holder may be queued: first stop queue changes */ |
95 | raw_spin_lock(&pedf->domain.release_lock); | 95 | raw_spin_lock(&pedf->domain.release_lock); |
96 | if (is_queued(t) && | 96 | if (is_queued(t) && |
97 | /* If it is queued, then we need to re-order. */ | 97 | /* If it is queued, then we need to re-order. */ |
98 | bheap_decrease(edf_ready_order, tsk_rt(t)->heap_node) && | 98 | bheap_decrease(edf_ready_order, tsk_rt(t)->heap_node) && |
99 | /* If we bubbled to the top, then we need to check for preemptions. */ | 99 | /* If we bubbled to the top, then we need to check for preemptions. */ |
100 | edf_preemption_needed(&pedf->domain, pedf->scheduled)) | 100 | edf_preemption_needed(&pedf->domain, pedf->scheduled)) |
101 | preempt(pedf); | 101 | preempt(pedf); |
102 | raw_spin_unlock(&pedf->domain.release_lock); | 102 | raw_spin_unlock(&pedf->domain.release_lock); |
103 | } /* else: nothing to do since the job is not queued while scheduled */ | 103 | } /* else: nothing to do since the job is not queued while scheduled */ |
104 | 104 | ||
105 | raw_spin_unlock_irqrestore(&pedf->slock, flags); | 105 | raw_spin_unlock_irqrestore(&pedf->slock, flags); |
106 | } | 106 | } |
107 | 107 | ||
108 | static void unboost_priority(struct task_struct* t) | 108 | static void unboost_priority(struct task_struct* t) |
@@ -580,7 +580,7 @@ int psnedf_dgl_lock(struct litmus_lock* l) | |||
580 | return l->ops->dynamic_group_lock(l, dgl_from_lock(l)->dgl_resources); | 580 | return l->ops->dynamic_group_lock(l, dgl_from_lock(l)->dgl_resources); |
581 | } | 581 | } |
582 | 582 | ||
583 | /* for compatibility, assume unlock releasess the whole group. */ | 583 | /* for compatibility, assume unlock releases the whole group. */ |
584 | int psnedf_dgl_unlock(struct litmus_lock* l) | 584 | int psnedf_dgl_unlock(struct litmus_lock* l) |
585 | { | 585 | { |
586 | return l->ops->dynamic_group_unlock(l, dgl_from_lock(l)->dgl_resources); | 586 | return l->ops->dynamic_group_unlock(l, dgl_from_lock(l)->dgl_resources); |
@@ -686,12 +686,24 @@ int psnedf_dgl_dynamic_group_lock(struct litmus_lock* l, resource_mask_t resourc | |||
686 | return 0; | 686 | return 0; |
687 | } | 687 | } |
688 | 688 | ||
689 | inline int num_boosted(struct dgl_semaphore *sem) | ||
690 | { | ||
691 | int ret = 0; | ||
692 | int i; | ||
693 | for(i = 0; i < NR_CPUS; i++){ | ||
694 | ret += sem->boosted[i]; | ||
695 | } | ||
696 | return ret; | ||
697 | } | ||
698 | |||
689 | int psnedf_dgl_dynamic_group_unlock(struct litmus_lock* l, resource_mask_t resources) | 699 | int psnedf_dgl_dynamic_group_unlock(struct litmus_lock* l, resource_mask_t resources) |
690 | { | 700 | { |
691 | struct task_struct *t = current, *next; | 701 | struct task_struct *t = current, *tsk; |
692 | struct dgl_semaphore *sem = dgl_from_lock(l); | 702 | struct dgl_semaphore *sem = dgl_from_lock(l); |
693 | unsigned long flags; | 703 | unsigned long flags; |
694 | int err = 0; | 704 | int err = 0; |
705 | resource_mask_t logically_locked; | ||
706 | struct list_head *pos, *tmp; | ||
695 | 707 | ||
696 | TRACE("Trying to unlock a DGL\n"); | 708 | TRACE("Trying to unlock a DGL\n"); |
697 | 709 | ||
@@ -707,6 +719,8 @@ int psnedf_dgl_dynamic_group_unlock(struct litmus_lock* l, resource_mask_t resou | |||
707 | TRACE("Trying to lock unowned resources: %d\t%d\n", resources, t->resources); | 719 | TRACE("Trying to lock unowned resources: %d\t%d\n", resources, t->resources); |
708 | err = -EINVAL; | 720 | err = -EINVAL; |
709 | goto out; | 721 | goto out; |
722 | } else { | ||
723 | sem->locked -= resources; | ||
710 | } | 724 | } |
711 | 725 | ||
712 | // if the job released all of the resources it owned, then unboost. | 726 | // if the job released all of the resources it owned, then unboost. |
@@ -717,21 +731,30 @@ int psnedf_dgl_dynamic_group_unlock(struct litmus_lock* l, resource_mask_t resou | |||
717 | t->resources = t->resources & ~resources; | 731 | t->resources = t->resources & ~resources; |
718 | } | 732 | } |
719 | 733 | ||
720 | next = __waitqueue_peek_first(&sem->wait); | 734 | logically_locked = sem->locked; |
721 | while( next && ~(next->resources & sem->locked) == -1 && | 735 | |
722 | !sem->boosted[task_cpu(next)] ){ | 736 | list_for_each_safe(pos, tmp, &sem->wait.task_list) { |
737 | tsk = (struct task_struct*) list_entry(pos, wait_queue_t, | ||
738 | task_list)->private; | ||
723 | 739 | ||
724 | //next should not change, it should just be removed | ||
725 | //from the head of the queue. | ||
726 | next = __waitqueue_remove_first(&sem->wait); | ||
727 | 740 | ||
728 | boost_priority(next); | 741 | if ( (logically_locked == -1) && (num_boosted(sem) == NR_CPUS) ){ |
742 | break; | ||
743 | } | ||
744 | |||
745 | // the resources requested are unlocked | ||
746 | if( !(tsk->resources & logically_locked) && !sem->boosted[task_cpu(tsk)]) { | ||
729 | 747 | ||
730 | sem->locked = sem->locked | next->resources; | 748 | list_del_init(pos); |
731 | 749 | ||
732 | wake_up_process(next); | 750 | sem->locked = sem->locked | tsk->resources; |
751 | sem->boosted[task_cpu(t)] = true; | ||
752 | |||
753 | boost_priority(tsk); | ||
754 | wake_up_process(tsk); | ||
755 | } | ||
733 | 756 | ||
734 | next = __waitqueue_peek_first(&sem->wait); | 757 | logically_locked = logically_locked | tsk->resources; |
735 | } | 758 | } |
736 | 759 | ||
737 | out: | 760 | out: |