diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2013-01-24 17:34:45 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2013-01-24 17:41:41 -0500 |
commit | e20223fcfd6ad9274e9e1aab11a73eaa72c7a4f5 (patch) | |
tree | d8f56c8d1d21ecec20c7d4a0fb6e63c415eaf815 | |
parent | 821a500d34ae6b2487a2ebecc6ab276fdfde0680 (diff) |
Fix inheritance propagation for klmirqd&aux tasks.
Bug: Inheritance not propagated to klmirqd and aux
tasks when the task these threads inherit from
has a change in its own priority.
(Also removed per-task NV interrupt tracking since
we cannnot identify exact interrupt ownership under
GPUSync.)
-rw-r--r-- | include/litmus/rt_param.h | 2 | ||||
-rw-r--r-- | include/litmus/sched_trace.h | 3 | ||||
-rw-r--r-- | litmus/aux_tasks.c | 14 | ||||
-rw-r--r-- | litmus/litmus.c | 3 | ||||
-rw-r--r-- | litmus/nvidia_info.c | 54 | ||||
-rw-r--r-- | litmus/sched_cedf.c | 100 | ||||
-rw-r--r-- | litmus/sched_gsn_edf.c | 4 | ||||
-rw-r--r-- | litmus/sched_task_trace.c | 3 |
8 files changed, 133 insertions, 50 deletions
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h index dd1ef076a4b2..49b2b45396e4 100644 --- a/include/litmus/rt_param.h +++ b/include/litmus/rt_param.h | |||
@@ -303,8 +303,6 @@ struct rt_param { | |||
303 | #endif | 303 | #endif |
304 | 304 | ||
305 | #ifdef CONFIG_LITMUS_NVIDIA | 305 | #ifdef CONFIG_LITMUS_NVIDIA |
306 | /* number of top-half interrupts handled on behalf of current job */ | ||
307 | atomic_t nv_int_count; | ||
308 | long unsigned int held_gpus; // bitmap of held GPUs. | 306 | long unsigned int held_gpus; // bitmap of held GPUs. |
309 | struct binheap_node gpu_owner_node; // just one GPU for now... | 307 | struct binheap_node gpu_owner_node; // just one GPU for now... |
310 | unsigned int hide_from_gpu:1; | 308 | unsigned int hide_from_gpu:1; |
diff --git a/include/litmus/sched_trace.h b/include/litmus/sched_trace.h index 5fbd7f37b26d..2598cdf6088e 100644 --- a/include/litmus/sched_trace.h +++ b/include/litmus/sched_trace.h | |||
@@ -56,8 +56,7 @@ struct st_completion_data { /* A job completed. */ | |||
56 | * next task automatically; set to 0 otherwise. | 56 | * next task automatically; set to 0 otherwise. |
57 | */ | 57 | */ |
58 | u8 __uflags:7; | 58 | u8 __uflags:7; |
59 | u16 nv_int_count; | 59 | u8 __unused[7]; |
60 | u8 __unused[5]; | ||
61 | } __attribute__((packed)); | 60 | } __attribute__((packed)); |
62 | 61 | ||
63 | struct st_block_data { /* A task blocks. */ | 62 | struct st_block_data { /* A task blocks. */ |
diff --git a/litmus/aux_tasks.c b/litmus/aux_tasks.c index 07907e22bc09..5aa9f7634fbf 100644 --- a/litmus/aux_tasks.c +++ b/litmus/aux_tasks.c | |||
@@ -133,6 +133,8 @@ int aux_task_owner_increase_priority(struct task_struct *t) | |||
133 | struct task_struct *hp = NULL; | 133 | struct task_struct *hp = NULL; |
134 | struct task_struct *hp_eff = NULL; | 134 | struct task_struct *hp_eff = NULL; |
135 | 135 | ||
136 | int increase_aux = 0; | ||
137 | |||
136 | BUG_ON(!is_realtime(t)); | 138 | BUG_ON(!is_realtime(t)); |
137 | BUG_ON(!tsk_rt(t)->has_aux_tasks); | 139 | BUG_ON(!tsk_rt(t)->has_aux_tasks); |
138 | 140 | ||
@@ -155,12 +157,19 @@ int aux_task_owner_increase_priority(struct task_struct *t) | |||
155 | if (hp != t) { /* our position in the heap may have changed. hp is already at the root. */ | 157 | if (hp != t) { /* our position in the heap may have changed. hp is already at the root. */ |
156 | binheap_decrease(&tsk_rt(t)->aux_task_owner_node, &tsk_aux(leader)->aux_task_owners); | 158 | binheap_decrease(&tsk_rt(t)->aux_task_owner_node, &tsk_aux(leader)->aux_task_owners); |
157 | } | 159 | } |
160 | else { | ||
161 | /* unconditionally propagate - t already has the updated eff and is at the root, | ||
162 | so we can't detect a change in inheritance, but we know that priority has | ||
163 | indeed increased/changed. */ | ||
164 | increase_aux = 1; | ||
165 | } | ||
158 | 166 | ||
159 | hp = container_of( | 167 | hp = container_of( |
160 | binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), | 168 | binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), |
161 | struct task_struct, rt_param); | 169 | struct task_struct, rt_param); |
162 | 170 | ||
163 | if (effective_priority(hp) != hp_eff) { /* the eff. prio. of hp has changed */ | 171 | /* check if the eff. prio. of hp has changed */ |
172 | if (increase_aux || (effective_priority(hp) != hp_eff)) { | ||
164 | hp_eff = effective_priority(hp); | 173 | hp_eff = effective_priority(hp); |
165 | TRACE_CUR("%s/%d is new hp in group %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid); | 174 | TRACE_CUR("%s/%d is new hp in group %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid); |
166 | retval = aux_tasks_increase_priority(leader, hp_eff); | 175 | retval = aux_tasks_increase_priority(leader, hp_eff); |
@@ -207,7 +216,8 @@ int aux_task_owner_decrease_priority(struct task_struct *t) | |||
207 | container_of( | 216 | container_of( |
208 | binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), | 217 | binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), |
209 | struct task_struct, rt_param); | 218 | struct task_struct, rt_param); |
210 | if (effective_priority(new_hp) != hp_eff) { /* eff prio. of hp has changed */ | 219 | /* if the new_hp is still t, or if the effective priority has changed */ |
220 | if ((new_hp == t) || (effective_priority(new_hp) != hp_eff)) { | ||
211 | hp_eff = effective_priority(new_hp); | 221 | hp_eff = effective_priority(new_hp); |
212 | TRACE_CUR("%s/%d is no longer hp in group %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid); | 222 | TRACE_CUR("%s/%d is no longer hp in group %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid); |
213 | retval = aux_tasks_decrease_priority(leader, hp_eff); | 223 | retval = aux_tasks_decrease_priority(leader, hp_eff); |
diff --git a/litmus/litmus.c b/litmus/litmus.c index 2911e7ec7029..35bc70455425 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c | |||
@@ -493,9 +493,6 @@ long __litmus_admit_task(struct task_struct* tsk) | |||
493 | bheap_node_init(&tsk_rt(tsk)->heap_node, tsk); | 493 | bheap_node_init(&tsk_rt(tsk)->heap_node, tsk); |
494 | } | 494 | } |
495 | 495 | ||
496 | #ifdef CONFIG_LITMUS_NVIDIA | ||
497 | atomic_set(&tsk_rt(tsk)->nv_int_count, 0); | ||
498 | #endif | ||
499 | #if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_AFFINITY_LOCKING) | 496 | #if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_AFFINITY_LOCKING) |
500 | init_gpu_affinity_state(tsk); | 497 | init_gpu_affinity_state(tsk); |
501 | #endif | 498 | #endif |
diff --git a/litmus/nvidia_info.c b/litmus/nvidia_info.c index dda863009fee..ab62ca1b5b11 100644 --- a/litmus/nvidia_info.c +++ b/litmus/nvidia_info.c | |||
@@ -553,7 +553,10 @@ static int gpu_klmirqd_increase_priority(struct task_struct *klmirqd, struct tas | |||
553 | { | 553 | { |
554 | int retval = 0; | 554 | int retval = 0; |
555 | 555 | ||
556 | TRACE_CUR("Increasing priority of nv klmirqd: %s/%d.\n", klmirqd->comm, klmirqd->pid); | 556 | TRACE_CUR("Increasing priority of %s/%d to %s/%d.\n", |
557 | klmirqd->comm, klmirqd->pid, | ||
558 | (hp) ? hp->comm : "nil", | ||
559 | (hp) ? hp->pid : -1); | ||
557 | 560 | ||
558 | /* the klmirqd thread should never attempt to hold a litmus-level real-time | 561 | /* the klmirqd thread should never attempt to hold a litmus-level real-time |
559 | * so nested support is not required */ | 562 | * so nested support is not required */ |
@@ -566,7 +569,10 @@ static int gpu_klmirqd_decrease_priority(struct task_struct *klmirqd, struct tas | |||
566 | { | 569 | { |
567 | int retval = 0; | 570 | int retval = 0; |
568 | 571 | ||
569 | TRACE_CUR("Decreasing priority of nv klmirqd: %s/%d.\n", klmirqd->comm, klmirqd->pid); | 572 | TRACE_CUR("Decreasing priority of %s/%d to %s/%d.\n", |
573 | klmirqd->comm, klmirqd->pid, | ||
574 | (hp) ? hp->comm : "nil", | ||
575 | (hp) ? hp->pid : -1); | ||
570 | 576 | ||
571 | /* the klmirqd thread should never attempt to hold a litmus-level real-time | 577 | /* the klmirqd thread should never attempt to hold a litmus-level real-time |
572 | * so nested support is not required */ | 578 | * so nested support is not required */ |
@@ -617,9 +623,12 @@ long enable_gpu_owner(struct task_struct *t) | |||
617 | 623 | ||
618 | if (hp == t) { | 624 | if (hp == t) { |
619 | /* we're the new hp */ | 625 | /* we're the new hp */ |
620 | TRACE_CUR("%s/%d is new hp on GPU %d.\n", t->comm, t->pid, gpu); | 626 | TRACE_CUR("%s/%d (eff_prio = %s/%d) is new hp on GPU %d.\n", |
627 | t->comm, t->pid, | ||
628 | effective_priority(t)->comm, effective_priority(t)->pid, | ||
629 | gpu); | ||
621 | 630 | ||
622 | retval = gpu_klmirqd_increase_priority(reg->thread, (tsk_rt(hp)->inh_task)? tsk_rt(hp)->inh_task : hp); | 631 | retval = gpu_klmirqd_increase_priority(reg->thread, effective_priority(t)); |
623 | } | 632 | } |
624 | #endif | 633 | #endif |
625 | 634 | ||
@@ -671,13 +680,15 @@ long disable_gpu_owner(struct task_struct *t) | |||
671 | } | 680 | } |
672 | 681 | ||
673 | if (hp == t && new_hp != t) { | 682 | if (hp == t && new_hp != t) { |
674 | struct task_struct *to_inh = NULL; | 683 | struct task_struct *to_inh = (new_hp) ? effective_priority(new_hp) : NULL; |
675 | 684 | ||
676 | TRACE_CUR("%s/%d is no longer hp on GPU %d.\n", t->comm, t->pid, gpu); | 685 | TRACE_CUR("%s/%d is no longer hp on GPU %d; new hp = %s/%d (eff_prio = %s/%d).\n", |
677 | 686 | t->comm, t->pid, | |
678 | if (new_hp) { | 687 | gpu, |
679 | to_inh = (tsk_rt(new_hp)->inh_task) ? tsk_rt(new_hp)->inh_task : new_hp; | 688 | (new_hp) ? new_hp->comm : "nil", |
680 | } | 689 | (new_hp) ? new_hp->pid : -1, |
690 | (to_inh) ? to_inh->comm : "nil", | ||
691 | (to_inh) ? to_inh->pid : -1); | ||
681 | 692 | ||
682 | retval = gpu_klmirqd_decrease_priority(reg->thread, to_inh); | 693 | retval = gpu_klmirqd_decrease_priority(reg->thread, to_inh); |
683 | } | 694 | } |
@@ -707,6 +718,10 @@ int gpu_owner_increase_priority(struct task_struct *t) | |||
707 | struct task_struct *hp = NULL; | 718 | struct task_struct *hp = NULL; |
708 | struct task_struct *hp_eff = NULL; | 719 | struct task_struct *hp_eff = NULL; |
709 | 720 | ||
721 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
722 | int increase_klmirqd = 0; | ||
723 | #endif | ||
724 | |||
710 | BUG_ON(!is_realtime(t)); | 725 | BUG_ON(!is_realtime(t)); |
711 | BUG_ON(!tsk_rt(t)->held_gpus); | 726 | BUG_ON(!tsk_rt(t)->held_gpus); |
712 | 727 | ||
@@ -728,14 +743,24 @@ int gpu_owner_increase_priority(struct task_struct *t) | |||
728 | if (hp != t) { /* our position in the heap may have changed. hp is already at the root. */ | 743 | if (hp != t) { /* our position in the heap may have changed. hp is already at the root. */ |
729 | binheap_decrease(&tsk_rt(t)->gpu_owner_node, ®->owners); | 744 | binheap_decrease(&tsk_rt(t)->gpu_owner_node, ®->owners); |
730 | } | 745 | } |
731 | |||
732 | #ifdef CONFIG_LITMUS_SOFTIRQD | 746 | #ifdef CONFIG_LITMUS_SOFTIRQD |
747 | else { | ||
748 | /* unconditionally propagate - t already has the updated eff and is at the root, | ||
749 | so we can't detect a change in inheritance, but we know that priority has | ||
750 | indeed increased/changed. */ | ||
751 | increase_klmirqd = 1; | ||
752 | } | ||
753 | |||
733 | hp = container_of(binheap_top_entry(®->owners, struct rt_param, gpu_owner_node), | 754 | hp = container_of(binheap_top_entry(®->owners, struct rt_param, gpu_owner_node), |
734 | struct task_struct, rt_param); | 755 | struct task_struct, rt_param); |
735 | 756 | ||
736 | if (effective_priority(hp) != hp_eff) { /* the eff. prio. of hp has changed */ | 757 | /* check if the eff. prio. of hp has changed */ |
758 | if (increase_klmirqd || (effective_priority(hp) != hp_eff)) { | ||
737 | hp_eff = effective_priority(hp); | 759 | hp_eff = effective_priority(hp); |
738 | TRACE_CUR("%s/%d is new hp on GPU %d.\n", t->comm, t->pid, gpu); | 760 | TRACE_CUR("%s/%d (eff_prio = %s/%d) is new hp on GPU %d.\n", |
761 | t->comm, t->pid, | ||
762 | hp_eff->comm, hp_eff->pid, | ||
763 | gpu); | ||
739 | 764 | ||
740 | retval = gpu_klmirqd_increase_priority(reg->thread, hp_eff); | 765 | retval = gpu_klmirqd_increase_priority(reg->thread, hp_eff); |
741 | } | 766 | } |
@@ -781,7 +806,8 @@ int gpu_owner_decrease_priority(struct task_struct *t) | |||
781 | struct task_struct *new_hp = | 806 | struct task_struct *new_hp = |
782 | container_of(binheap_top_entry(®->owners, struct rt_param, gpu_owner_node), | 807 | container_of(binheap_top_entry(®->owners, struct rt_param, gpu_owner_node), |
783 | struct task_struct, rt_param); | 808 | struct task_struct, rt_param); |
784 | if (effective_priority(new_hp) != hp_eff) { /* eff prio. of hp has changed */ | 809 | /* if the new_hp is still t, or if the effective priority has changed */ |
810 | if ((new_hp == t) || (effective_priority(new_hp) != hp_eff)) { | ||
785 | hp_eff = effective_priority(new_hp); | 811 | hp_eff = effective_priority(new_hp); |
786 | TRACE_CUR("%s/%d is no longer hp on GPU %d.\n", t->comm, t->pid, gpu); | 812 | TRACE_CUR("%s/%d is no longer hp on GPU %d.\n", t->comm, t->pid, gpu); |
787 | retval = gpu_klmirqd_decrease_priority(reg->thread, hp_eff); | 813 | retval = gpu_klmirqd_decrease_priority(reg->thread, hp_eff); |
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index dd64211a1402..b3281e40df52 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c | |||
@@ -415,12 +415,19 @@ static noinline void job_completion(struct task_struct *t, int forced) | |||
415 | 415 | ||
416 | sched_trace_task_completion(t, forced); | 416 | sched_trace_task_completion(t, forced); |
417 | 417 | ||
418 | #ifdef CONFIG_LITMUS_NVIDIA | ||
419 | atomic_set(&tsk_rt(t)->nv_int_count, 0); | ||
420 | #endif | ||
421 | |||
422 | TRACE_TASK(t, "job_completion().\n"); | 418 | TRACE_TASK(t, "job_completion().\n"); |
423 | 419 | ||
420 | #ifdef CONFIG_LITMUS_LOCKING | ||
421 | if (!is_persistent(t) && tsk_rt(t)->inh_task) { | ||
422 | /* job completing while inheriting a priority */ | ||
423 | TRACE_TASK(t, | ||
424 | "WARNING: Completing job while still inheriting a " | ||
425 | "priority (%s/%d)!\n", | ||
426 | tsk_rt(t)->inh_task->comm, | ||
427 | tsk_rt(t)->inh_task->pid); | ||
428 | } | ||
429 | #endif | ||
430 | |||
424 | /* set flags */ | 431 | /* set flags */ |
425 | tsk_rt(t)->completed = 1; | 432 | tsk_rt(t)->completed = 1; |
426 | /* prepare for next period */ | 433 | /* prepare for next period */ |
@@ -1027,7 +1034,7 @@ static void cedf_task_wake_up(struct task_struct *task) | |||
1027 | release_at(task, now); | 1034 | release_at(task, now); |
1028 | sched_trace_task_release(task); | 1035 | sched_trace_task_release(task); |
1029 | } | 1036 | } |
1030 | else if (task->rt.time_slice) { | 1037 | else { |
1031 | /* periodic task model. don't force job to end. | 1038 | /* periodic task model. don't force job to end. |
1032 | * rely on user to say when jobs complete or when budget expires. */ | 1039 | * rely on user to say when jobs complete or when budget expires. */ |
1033 | tsk_rt(task)->completed = 0; | 1040 | tsk_rt(task)->completed = 0; |
@@ -1156,6 +1163,27 @@ static int __increase_priority_inheritance(struct task_struct* t, | |||
1156 | int check_preempt = 0; | 1163 | int check_preempt = 0; |
1157 | cedf_domain_t* cluster; | 1164 | cedf_domain_t* cluster; |
1158 | 1165 | ||
1166 | if (prio_inh && (effective_priority(prio_inh) != prio_inh)) { | ||
1167 | TRACE_TASK(t, "Inheriting from %s/%d instead of the eff_prio = %s/%d!\n", | ||
1168 | prio_inh->comm, prio_inh->pid, | ||
1169 | effective_priority(prio_inh)->comm, | ||
1170 | effective_priority(prio_inh)->pid); | ||
1171 | #ifndef CONFIG_LITMUS_NESTED_LOCKING | ||
1172 | /* Tasks should only inherit the base priority of a task. | ||
1173 | If 't' inherits a priority, then tsk_rt(t)->inh_task should | ||
1174 | be passed to this function instead. This includes transitive | ||
1175 | inheritance relations (tsk_rt(tsk_rt(...)->inh_task)->inh_task). */ | ||
1176 | BUG(); | ||
1177 | #else | ||
1178 | /* Not a bug with nested locking since inheritance propagation is | ||
1179 | not atomic. */ | ||
1180 | |||
1181 | /* TODO: Is the following 'helping' short-cut safe? | ||
1182 | prio_inh = effective_priority(prio_inh); | ||
1183 | */ | ||
1184 | #endif | ||
1185 | } | ||
1186 | |||
1159 | if (prio_inh && prio_inh == effective_priority(t)) { | 1187 | if (prio_inh && prio_inh == effective_priority(t)) { |
1160 | /* relationship already established. */ | 1188 | /* relationship already established. */ |
1161 | TRACE_TASK(t, "already has effective priority of %s/%d\n", | 1189 | TRACE_TASK(t, "already has effective priority of %s/%d\n", |
@@ -1211,6 +1239,20 @@ static int __increase_priority_inheritance(struct task_struct* t, | |||
1211 | } | 1239 | } |
1212 | raw_spin_unlock(&cluster->domain.release_lock); | 1240 | raw_spin_unlock(&cluster->domain.release_lock); |
1213 | 1241 | ||
1242 | #ifdef CONFIG_REALTIME_AUX_TASKS | ||
1243 | /* propagate to aux tasks */ | ||
1244 | if (tsk_rt(t)->has_aux_tasks) { | ||
1245 | aux_task_owner_increase_priority(t); | ||
1246 | } | ||
1247 | #endif | ||
1248 | |||
1249 | #ifdef CONFIG_LITMUS_NVIDIA | ||
1250 | /* propagate to gpu klmirqd */ | ||
1251 | if (tsk_rt(t)->held_gpus) { | ||
1252 | gpu_owner_increase_priority(t); | ||
1253 | } | ||
1254 | #endif | ||
1255 | |||
1214 | /* If holder was enqueued in a release heap, then the following | 1256 | /* If holder was enqueued in a release heap, then the following |
1215 | * preemption check is pointless, but we can't easily detect | 1257 | * preemption check is pointless, but we can't easily detect |
1216 | * that case. If you want to fix this, then consider that | 1258 | * that case. If you want to fix this, then consider that |
@@ -1225,20 +1267,6 @@ static int __increase_priority_inheritance(struct task_struct* t, | |||
1225 | &cluster->domain.ready_queue); | 1267 | &cluster->domain.ready_queue); |
1226 | check_for_preemptions(cluster); | 1268 | check_for_preemptions(cluster); |
1227 | } | 1269 | } |
1228 | |||
1229 | #ifdef CONFIG_REALTIME_AUX_TASKS | ||
1230 | /* propagate to aux tasks */ | ||
1231 | if (tsk_rt(t)->has_aux_tasks) { | ||
1232 | aux_task_owner_increase_priority(t); | ||
1233 | } | ||
1234 | #endif | ||
1235 | |||
1236 | #ifdef CONFIG_LITMUS_NVIDIA | ||
1237 | /* propagate to gpu klmirqd */ | ||
1238 | if (tsk_rt(t)->held_gpus) { | ||
1239 | gpu_owner_increase_priority(t); | ||
1240 | } | ||
1241 | #endif | ||
1242 | } | 1270 | } |
1243 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 1271 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
1244 | } | 1272 | } |
@@ -1266,6 +1294,8 @@ static void increase_priority_inheritance(struct task_struct* t, struct task_str | |||
1266 | 1294 | ||
1267 | raw_spin_lock(&cluster->cluster_lock); | 1295 | raw_spin_lock(&cluster->cluster_lock); |
1268 | 1296 | ||
1297 | TRACE_TASK(t, "to inherit from %s/%d\n", prio_inh->comm, prio_inh->pid); | ||
1298 | |||
1269 | __increase_priority_inheritance(t, prio_inh); | 1299 | __increase_priority_inheritance(t, prio_inh); |
1270 | 1300 | ||
1271 | raw_spin_unlock(&cluster->cluster_lock); | 1301 | raw_spin_unlock(&cluster->cluster_lock); |
@@ -1288,11 +1318,32 @@ static int __decrease_priority_inheritance(struct task_struct* t, | |||
1288 | { | 1318 | { |
1289 | int success = 1; | 1319 | int success = 1; |
1290 | 1320 | ||
1321 | if (prio_inh && (effective_priority(prio_inh) != prio_inh)) { | ||
1322 | TRACE_TASK(t, "Inheriting from %s/%d instead of the eff_prio = %s/%d!\n", | ||
1323 | prio_inh->comm, prio_inh->pid, | ||
1324 | effective_priority(prio_inh)->comm, | ||
1325 | effective_priority(prio_inh)->pid); | ||
1326 | #ifndef CONFIG_LITMUS_NESTED_LOCKING | ||
1327 | /* Tasks should only inherit the base priority of a task. | ||
1328 | If 't' inherits a priority, then tsk_rt(t)->inh_task should | ||
1329 | be passed to this function instead. This includes transitive | ||
1330 | inheritance relations (tsk_rt(tsk_rt(...)->inh_task)->inh_task). */ | ||
1331 | BUG(); | ||
1332 | #else | ||
1333 | /* Not a bug with nested locking since inheritance propagation is | ||
1334 | not atomic. */ | ||
1335 | |||
1336 | /* TODO: Is the following 'helping' short-cut safe? | ||
1337 | prio_inh = effective_priority(prio_inh); | ||
1338 | */ | ||
1339 | #endif | ||
1340 | } | ||
1341 | |||
1291 | if (prio_inh == tsk_rt(t)->inh_task) { | 1342 | if (prio_inh == tsk_rt(t)->inh_task) { |
1292 | /* relationship already established. */ | 1343 | /* relationship already established. */ |
1293 | TRACE_TASK(t, "already inherits priority from %s/%d\n", | 1344 | TRACE_TASK(t, "already inherits priority from %s/%d\n", |
1294 | (prio_inh) ? prio_inh->comm : "(nil)", | 1345 | (prio_inh) ? prio_inh->comm : "(nil)", |
1295 | (prio_inh) ? prio_inh->pid : 0); | 1346 | (prio_inh) ? prio_inh->pid : -1); |
1296 | goto out; | 1347 | goto out; |
1297 | } | 1348 | } |
1298 | 1349 | ||
@@ -1372,6 +1423,11 @@ static void decrease_priority_inheritance(struct task_struct* t, | |||
1372 | cedf_domain_t* cluster = task_cpu_cluster(t); | 1423 | cedf_domain_t* cluster = task_cpu_cluster(t); |
1373 | 1424 | ||
1374 | raw_spin_lock(&cluster->cluster_lock); | 1425 | raw_spin_lock(&cluster->cluster_lock); |
1426 | |||
1427 | TRACE_TASK(t, "to inherit from %s/%d (decrease)\n", | ||
1428 | (prio_inh) ? prio_inh->comm : "nil", | ||
1429 | (prio_inh) ? prio_inh->pid : -1); | ||
1430 | |||
1375 | __decrease_priority_inheritance(t, prio_inh); | 1431 | __decrease_priority_inheritance(t, prio_inh); |
1376 | 1432 | ||
1377 | raw_spin_unlock(&cluster->cluster_lock); | 1433 | raw_spin_unlock(&cluster->cluster_lock); |
@@ -1407,6 +1463,10 @@ static void nested_increase_priority_inheritance(struct task_struct* t, | |||
1407 | increase_priority_inheritance(t, prio_inh); // increase our prio. | 1463 | increase_priority_inheritance(t, prio_inh); // increase our prio. |
1408 | } | 1464 | } |
1409 | 1465 | ||
1466 | /* note: cluster lock is not held continuously during propagation, so there | ||
1467 | may be momentary inconsistencies while nested priority propagation 'chases' | ||
1468 | other updates. */ | ||
1469 | |||
1410 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); // unlock the t's heap. | 1470 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); // unlock the t's heap. |
1411 | 1471 | ||
1412 | 1472 | ||
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index b3309ee2561e..8a8a6f1c306b 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c | |||
@@ -405,10 +405,6 @@ static noinline void job_completion(struct task_struct *t, int forced) | |||
405 | 405 | ||
406 | sched_trace_task_completion(t, forced); | 406 | sched_trace_task_completion(t, forced); |
407 | 407 | ||
408 | #ifdef CONFIG_LITMUS_NVIDIA | ||
409 | atomic_set(&tsk_rt(t)->nv_int_count, 0); | ||
410 | #endif | ||
411 | |||
412 | TRACE_TASK(t, "job_completion().\n"); | 408 | TRACE_TASK(t, "job_completion().\n"); |
413 | 409 | ||
414 | /* set flags */ | 410 | /* set flags */ |
diff --git a/litmus/sched_task_trace.c b/litmus/sched_task_trace.c index b14b0100e09c..1693d70d0911 100644 --- a/litmus/sched_task_trace.c +++ b/litmus/sched_task_trace.c | |||
@@ -191,9 +191,6 @@ feather_callback void do_sched_trace_task_completion(unsigned long id, | |||
191 | if (rec) { | 191 | if (rec) { |
192 | rec->data.completion.when = now(); | 192 | rec->data.completion.when = now(); |
193 | rec->data.completion.forced = forced; | 193 | rec->data.completion.forced = forced; |
194 | #ifdef LITMUS_NVIDIA | ||
195 | rec->data.completion.nv_int_count = (u16)atomic_read(&tsk_rt(t)->nv_int_count); | ||
196 | #endif | ||
197 | put_record(rec); | 194 | put_record(rec); |
198 | } | 195 | } |
199 | } | 196 | } |