aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBryan Ward <bcw@cs.unc.edu>2013-04-16 14:22:45 -0400
committerBryan Ward <bcw@cs.unc.edu>2013-04-16 14:40:24 -0400
commitd6ba4603c299657235ce49bbfb9542d3e4511c26 (patch)
tree5422ac51c75c88b7e6b83f65e07c51d8ae3c8a72
parent89fd339cb393a0b4f6d8923d7468cf87dcd85de8 (diff)
Fixed a boosting bug.
-rw-r--r--include/litmus/debug_trace.h18
-rw-r--r--litmus/locking.c2
-rw-r--r--litmus/sched_psn_edf.c67
3 files changed, 66 insertions, 21 deletions
diff --git a/include/litmus/debug_trace.h b/include/litmus/debug_trace.h
index 1266ac6a760..928b1dfd1db 100644
--- a/include/litmus/debug_trace.h
+++ b/include/litmus/debug_trace.h
@@ -17,10 +17,22 @@ extern atomic_t __log_seq_no;
17#define TRACE_ARGS atomic_add_return(1, &__log_seq_no), \ 17#define TRACE_ARGS atomic_add_return(1, &__log_seq_no), \
18 raw_smp_processor_id(), \ 18 raw_smp_processor_id(), \
19 __FUNCTION__, __FILE__, __LINE__ 19 __FUNCTION__, __FILE__, __LINE__
20#define STRACE(fmt, args...) \
21 sched_trace_log_message("%d P%d [%s@%s:%d]: " fmt, \
22 TRACE_ARGS, ## args)
23#define STRACE2(fmt, args...) \
24 sched_trace_log_message("%d P%d [%s@%s:%d]: " fmt, \
25 TRACE_ARGS, ## args)
20#else 26#else
21#define TRACE_PREFIX "%d P%d: " 27#define TRACE_PREFIX "%d P%d: "
22#define TRACE_ARGS atomic_add_return(1, &__log_seq_no), \ 28#define TRACE_ARGS atomic_add_return(1, &__log_seq_no), \
23 raw_smp_processor_id() 29 raw_smp_processor_id()
30#define STRACE(fmt, args...) \
31 sched_trace_log_message("%d P%d : " fmt, \
32 TRACE_ARGS, ## args)
33#define STRACE2(fmt, args...) \
34 sched_trace_log_message("%d P%d : " fmt, \
35 TRACE_ARGS, ## args)
24#endif 36#endif
25 37
26#define TRACE(fmt, args...) \ 38#define TRACE(fmt, args...) \
@@ -34,7 +46,13 @@ extern atomic_t __log_seq_no;
34 t ? (t)->rt_param.job_params.job_no : 0, \ 46 t ? (t)->rt_param.job_params.job_no : 0, \
35 ##args) 47 ##args)
36 48
49#define STRACE_TASK(t, fmt, args...) \
50 STRACE("(%s/%d:%d) " fmt, (t)->comm, (t)->pid, \
51 (t)->rt_param.job_params.job_no, ##args)
52
37#define TRACE_CUR(fmt, args...) \ 53#define TRACE_CUR(fmt, args...) \
38 TRACE_TASK(current, fmt, ## args) 54 TRACE_TASK(current, fmt, ## args)
39 55
56
57
40#endif 58#endif
diff --git a/litmus/locking.c b/litmus/locking.c
index 65fd51f2c80..665eef30fe0 100644
--- a/litmus/locking.c
+++ b/litmus/locking.c
@@ -69,7 +69,6 @@ asmlinkage long sys_dynamic_group_lock(resource_mask_t lock_ods)
69 if (entry && is_lock(entry)) { 69 if (entry && is_lock(entry)) {
70 l = get_lock(entry); 70 l = get_lock(entry);
71 if (l->type == DGL_SEM){ 71 if (l->type == DGL_SEM){
72 TRACE_CUR("attempts to lock %d\n", lock_ods);
73 err = l->ops->dynamic_group_lock(l, lock_ods); 72 err = l->ops->dynamic_group_lock(l, lock_ods);
74 } 73 }
75 } 74 }
@@ -91,7 +90,6 @@ asmlinkage long sys_dynamic_group_unlock(resource_mask_t lock_ods)
91 if (entry && is_lock(entry)) { 90 if (entry && is_lock(entry)) {
92 l = get_lock(entry); 91 l = get_lock(entry);
93 if (l->type == DGL_SEM){ 92 if (l->type == DGL_SEM){
94 TRACE_CUR("attempts to unlock all resources in 0x%p\n",l);
95 err = l->ops->dynamic_group_unlock(l, lock_ods); 93 err = l->ops->dynamic_group_unlock(l, lock_ods);
96 } else{ 94 } else{
97 TRACE_CUR("Wrong Type: %d\n", l->type); 95 TRACE_CUR("Wrong Type: %d\n", l->type);
diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c
index 9da32a773db..451433c9d54 100644
--- a/litmus/sched_psn_edf.c
+++ b/litmus/sched_psn_edf.c
@@ -59,7 +59,7 @@ static void psnedf_domain_init(psnedf_domain_t* pedf,
59static void requeue(struct task_struct* t, rt_domain_t *edf) 59static void requeue(struct task_struct* t, rt_domain_t *edf)
60{ 60{
61 if (t->state != TASK_RUNNING) 61 if (t->state != TASK_RUNNING)
62 TRACE_TASK(t, "requeue: !TASK_RUNNING\n"); 62 STRACE_TASK(t, "requeue: !TASK_RUNNING\n");
63 63
64 tsk_rt(t)->completed = 0; 64 tsk_rt(t)->completed = 0;
65 if (is_early_releasing(t) || is_released(t, litmus_clock())) 65 if (is_early_releasing(t) || is_released(t, litmus_clock()))
@@ -85,7 +85,7 @@ static void boost_priority(struct task_struct* t)
85 raw_spin_lock_irqsave(&pedf->slock, flags); 85 raw_spin_lock_irqsave(&pedf->slock, flags);
86 now = litmus_clock(); 86 now = litmus_clock();
87 87
88 TRACE_TASK(t, "priority boosted at %llu\n", now); 88 STRACE_TASK(t, "priority boosted at %llu\n", now);
89 89
90 tsk_rt(t)->priority_boosted = 1; 90 tsk_rt(t)->priority_boosted = 1;
91 tsk_rt(t)->boost_start_time = now; 91 tsk_rt(t)->boost_start_time = now;
@@ -117,7 +117,7 @@ static void unboost_priority(struct task_struct* t)
117 /* assumption: this only happens when the job is scheduled */ 117 /* assumption: this only happens when the job is scheduled */
118 BUG_ON(pedf->scheduled != t); 118 BUG_ON(pedf->scheduled != t);
119 119
120 TRACE_TASK(t, "priority restored at %llu\n", now); 120 STRACE_TASK(t, "priority restored at %llu\n", now);
121 121
122 /* priority boosted jobs must be scheduled */ 122 /* priority boosted jobs must be scheduled */
123 BUG_ON(pedf->scheduled != t); 123 BUG_ON(pedf->scheduled != t);
@@ -159,7 +159,7 @@ static int psnedf_check_resched(rt_domain_t *edf)
159static void job_completion(struct task_struct* t, int forced) 159static void job_completion(struct task_struct* t, int forced)
160{ 160{
161 sched_trace_task_completion(t,forced); 161 sched_trace_task_completion(t,forced);
162 TRACE_TASK(t, "job_completion().\n"); 162 STRACE_TASK(t, "job_completion().\n");
163 163
164 tsk_rt(t)->completed = 1; 164 tsk_rt(t)->completed = 1;
165 prepare_for_next_period(t); 165 prepare_for_next_period(t);
@@ -199,6 +199,12 @@ static struct task_struct* psnedf_schedule(struct task_struct * prev)
199 int out_of_time, sleep, preempt, 199 int out_of_time, sleep, preempt,
200 np, exists, blocks, resched; 200 np, exists, blocks, resched;
201 201
202 if (prev && is_realtime(prev)){
203 TRACE_TASK(prev, "Rescheduling\n");
204 }else{
205 TRACE("Rescheduling\n");
206 }
207
202 raw_spin_lock(&pedf->slock); 208 raw_spin_lock(&pedf->slock);
203 209
204 /* sanity checking 210 /* sanity checking
@@ -266,10 +272,10 @@ static struct task_struct* psnedf_schedule(struct task_struct * prev)
266 next = prev; 272 next = prev;
267 273
268 if (next) { 274 if (next) {
269 TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); 275 STRACE_TASK(next, "scheduled at %llu\n", litmus_clock());
270 tsk_rt(next)->completed = 0; 276 set_rt_flags(next, RT_F_RUNNING);
271 } else { 277 } else {
272 TRACE("becoming idle at %llu\n", litmus_clock()); 278 STRACE("becoming idle at %llu\n", litmus_clock());
273 } 279 }
274 280
275 pedf->scheduled = next; 281 pedf->scheduled = next;
@@ -347,7 +353,7 @@ static void psnedf_task_wake_up(struct task_struct *task)
347 } 353 }
348 354
349 raw_spin_unlock_irqrestore(&pedf->slock, flags); 355 raw_spin_unlock_irqrestore(&pedf->slock, flags);
350 TRACE_TASK(task, "wake up done\n"); 356 STRACE_TASK(task, "wake up done\n");
351} 357}
352 358
353static void psnedf_task_block(struct task_struct *t) 359static void psnedf_task_block(struct task_struct *t)
@@ -641,7 +647,7 @@ int psnedf_dgl_dynamic_group_lock(struct litmus_lock* l, resource_mask_t resourc
641 wait_queue_t wait; 647 wait_queue_t wait;
642 unsigned long flags; 648 unsigned long flags;
643 649
644 TRACE("Trying to lock a DGL\n"); 650 TRACE_CUR("Trying to lock a DGL\n");
645 651
646 if (!is_realtime(t)) 652 if (!is_realtime(t))
647 return -EPERM; 653 return -EPERM;
@@ -656,6 +662,9 @@ int psnedf_dgl_dynamic_group_lock(struct litmus_lock* l, resource_mask_t resourc
656 // if sem->locked & resources == 0, then all resources are available, 662 // if sem->locked & resources == 0, then all resources are available,
657 // otherwise we must suspend. 663 // otherwise we must suspend.
658 if (sem->locked & resources){ 664 if (sem->locked & resources){
665
666 STRACE("Resources locked, suspending\n");
667
659 init_waitqueue_entry(&wait, t); 668 init_waitqueue_entry(&wait, t);
660 669
661 set_task_state(t, TASK_UNINTERRUPTIBLE); 670 set_task_state(t, TASK_UNINTERRUPTIBLE);
@@ -670,6 +679,9 @@ int psnedf_dgl_dynamic_group_lock(struct litmus_lock* l, resource_mask_t resourc
670 679
671 TS_LOCK_RESUME; 680 TS_LOCK_RESUME;
672 } else { 681 } else {
682
683 STRACE("Acquired a resource\n");
684
673 sem->locked = sem->locked | resources; 685 sem->locked = sem->locked | resources;
674 686
675 // if a job requests a resource, then it was scheduled, and therefore 687 // if a job requests a resource, then it was scheduled, and therefore
@@ -705,18 +717,21 @@ int psnedf_dgl_dynamic_group_unlock(struct litmus_lock* l, resource_mask_t resou
705 resource_mask_t logically_locked; 717 resource_mask_t logically_locked;
706 struct list_head *pos, *tmp; 718 struct list_head *pos, *tmp;
707 719
708 TRACE("Trying to unlock a DGL\n"); 720 TRACE_CUR("Trying to unlock a DGL\n");
721
722 //Unlocking but priority is not boosted
723 BUG_ON(!sem->boosted[task_cpu(t)]);
709 724
710 spin_lock_irqsave(&sem->wait.lock, flags); 725 spin_lock_irqsave(&sem->wait.lock, flags);
711 726
712 // ~resources | t->resources checks that t owns the resources being released 727 // ~resources | t->resources checks that t owns the resources being released
713 // note that a job can release a subset of the resources it has acquired. 728 // note that a job can release a subset of the resources it has acquired.
714 if ( !is_mask_valid(l, resources)){ 729 if ( !is_mask_valid(l, resources)){
715 TRACE("Invalid mask %d\n", resources); 730 STRACE("Invalid mask %d\n", resources);
716 err = -EINVAL; 731 err = -EINVAL;
717 goto out; 732 goto out;
718 } else if ( (~resources | t->resources) != -1){ 733 } else if ( (~resources | t->resources) != -1){
719 TRACE("Trying to lock unowned resources: %d\t%d\n", resources, t->resources); 734 STRACE("Trying to lock unowned resources: %d\t%d\n", resources, t->resources);
720 err = -EINVAL; 735 err = -EINVAL;
721 goto out; 736 goto out;
722 } else { 737 } else {
@@ -725,9 +740,12 @@ int psnedf_dgl_dynamic_group_unlock(struct litmus_lock* l, resource_mask_t resou
725 740
726 // if the job released all of the resources it owned, then unboost. 741 // if the job released all of the resources it owned, then unboost.
727 if (resources == t->resources){ 742 if (resources == t->resources){
743 STRACE("Released all resources\n");
728 unboost_priority(t); 744 unboost_priority(t);
745 sem->boosted[task_cpu(t)] = false;
729 } else { 746 } else {
730 // update t->resources to reflect the resources currently owned. 747 // update t->resources to reflect the resources currently owned.
748 STRACE("Unlocked a subset of locked resources\n");
731 t->resources = t->resources & ~resources; 749 t->resources = t->resources & ~resources;
732 } 750 }
733 751
@@ -736,21 +754,30 @@ int psnedf_dgl_dynamic_group_unlock(struct litmus_lock* l, resource_mask_t resou
736 list_for_each_safe(pos, tmp, &sem->wait.task_list) { 754 list_for_each_safe(pos, tmp, &sem->wait.task_list) {
737 tsk = (struct task_struct*) list_entry(pos, wait_queue_t, 755 tsk = (struct task_struct*) list_entry(pos, wait_queue_t,
738 task_list)->private; 756 task_list)->private;
757 STRACE_TASK(tsk, "Evaluating\n");
739 758
740 759 if ( (logically_locked == -1) || (num_boosted(sem) == NR_CPUS) ){
741 if ( (logically_locked == -1) && (num_boosted(sem) == NR_CPUS) ){ 760 STRACE_TASK(tsk, "All procs boosted, or all resources locked\n");
742 break; 761 break;
743 } 762 }
744 763
745 // the resources requested are unlocked 764 STRACE_TASK(tsk, "Logically locked: %o\n", logically_locked);
765 STRACE_TASK(tsk, "tsk->resources: %o\n", tsk->resources);
766 STRACE_TASK(tsk, "!(tsk->resources & logically_locked): %o\n", !(tsk->resources & logically_locked));
767 STRACE_TASK(tsk, "!sem->boosted: %d\n", !sem->boosted[task_cpu(tsk)]);
768
769 // the resources requested are unlocked, tsk acquires its resources
746 if( !(tsk->resources & logically_locked) && !sem->boosted[task_cpu(tsk)]) { 770 if( !(tsk->resources & logically_locked) && !sem->boosted[task_cpu(tsk)]) {
747 771
772 STRACE_TASK(tsk, "Acquired a resource\n");
773
748 list_del_init(pos); 774 list_del_init(pos);
749 775
750 sem->locked = sem->locked | tsk->resources; 776 sem->locked = sem->locked | tsk->resources;
751 sem->boosted[task_cpu(t)] = true;
752 777
778 sem->boosted[task_cpu(tsk)] = true;
753 boost_priority(tsk); 779 boost_priority(tsk);
780
754 wake_up_process(tsk); 781 wake_up_process(tsk);
755 } 782 }
756 783
@@ -824,6 +851,7 @@ static long psnedf_allocate_lock(struct litmus_lock **lock, int type,
824 void* __user config) 851 void* __user config)
825{ 852{
826 int err = -ENXIO; 853 int err = -ENXIO;
854 int config_num;
827 struct srp_semaphore* srp; 855 struct srp_semaphore* srp;
828 struct od_table_entry* entry; 856 struct od_table_entry* entry;
829 857
@@ -854,8 +882,9 @@ static long psnedf_allocate_lock(struct litmus_lock **lock, int type,
854 * and that a config > 0 means point this resource to the existing DGL 882 * and that a config > 0 means point this resource to the existing DGL
855 * for the resource in entry number config. 883 * for the resource in entry number config.
856 */ 884 */
857 TRACE("config: %d\n", *(int*)(config)); 885 config_num = *(int*)(config);
858 if (*(int*)(config) < 0){ 886 TRACE("config: %d\n", config_num);
887 if (config_num < 0){
859 *lock = psnedf_new_dgl(); 888 *lock = psnedf_new_dgl();
860 if (*lock) 889 if (*lock)
861 err = 0; 890 err = 0;
@@ -863,7 +892,7 @@ static long psnedf_allocate_lock(struct litmus_lock **lock, int type,
863 err = -ENOMEM; 892 err = -ENOMEM;
864 /* In this case, we are adding a resource to an existing lock */ 893 /* In this case, we are adding a resource to an existing lock */
865 } else { 894 } else {
866 entry = get_entry_for_od(*(int*)(config)); 895 entry = get_entry_for_od(config_num);
867 if (entry && entry->obj && entry->obj->type == DGL_SEM){ 896 if (entry && entry->obj && entry->obj->type == DGL_SEM){
868 *lock = (struct litmus_lock*) entry->obj->obj; 897 *lock = (struct litmus_lock*) entry->obj->obj;
869 err = 0; 898 err = 0;