diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-09-17 19:31:04 -0400 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-09-17 19:31:04 -0400 |
commit | ba54b1096870fba6e3bbb99aafc713e76b747353 (patch) | |
tree | 26727a7f89ca520392fec0b559f09500cb9934ac | |
parent | ef0974bf8e768b38d728f3bba147332ddea1e11b (diff) |
Fixed three bugs with aux threads and nested locks
Fixes two bugs with nested locks:
1) List of aux threads could become corrupted.
-- moved modifications to be within scheduler lock.
2) Fixed bad EDF comparison ordering that could lead
to schedule thrashing in an infinite loop.
3) Prevent aux threads from inheriting a priority from
a task that is blocked on a real-time litmus lock.
(since the aux threads can't possibly hold these locks,
we don't have to worry about inheritance.)
-rw-r--r-- | include/litmus/locking.h | 3 | ||||
-rw-r--r-- | include/litmus/rt_param.h | 1 | ||||
-rw-r--r-- | litmus/aux_tasks.c | 58 | ||||
-rw-r--r-- | litmus/edf_common.c | 10 | ||||
-rw-r--r-- | litmus/ikglp_lock.c | 2 | ||||
-rw-r--r-- | litmus/kfmlp_lock.c | 2 | ||||
-rw-r--r-- | litmus/locking.c | 29 | ||||
-rw-r--r-- | litmus/rsm_lock.c | 2 | ||||
-rw-r--r-- | litmus/sched_cedf.c | 17 | ||||
-rw-r--r-- | litmus/sched_gsn_edf.c | 19 |
10 files changed, 98 insertions, 45 deletions
diff --git a/include/litmus/locking.h b/include/litmus/locking.h index 296bbf6f7af0..4a5f198a0407 100644 --- a/include/litmus/locking.h +++ b/include/litmus/locking.h | |||
@@ -156,5 +156,8 @@ struct litmus_lock_ops { | |||
156 | #endif | 156 | #endif |
157 | 157 | ||
158 | 158 | ||
159 | void suspend_for_lock(void); | ||
160 | |||
161 | |||
159 | #endif | 162 | #endif |
160 | 163 | ||
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h index 8b9e14c461dc..44f85a366574 100644 --- a/include/litmus/rt_param.h +++ b/include/litmus/rt_param.h | |||
@@ -288,6 +288,7 @@ struct rt_param { | |||
288 | #ifdef CONFIG_REALTIME_AUX_TASKS | 288 | #ifdef CONFIG_REALTIME_AUX_TASKS |
289 | unsigned int is_aux_task:1; | 289 | unsigned int is_aux_task:1; |
290 | unsigned int has_aux_tasks:1; | 290 | unsigned int has_aux_tasks:1; |
291 | unsigned int hide_from_aux_tasks:1; | ||
291 | 292 | ||
292 | struct list_head aux_task_node; | 293 | struct list_head aux_task_node; |
293 | struct binheap_node aux_task_owner_node; | 294 | struct binheap_node aux_task_owner_node; |
diff --git a/litmus/aux_tasks.c b/litmus/aux_tasks.c index b0617accdf7f..efda7dc0bd76 100644 --- a/litmus/aux_tasks.c +++ b/litmus/aux_tasks.c | |||
@@ -55,7 +55,7 @@ int exit_aux_task(struct task_struct *t) | |||
55 | tsk_rt(t)->is_aux_task = 0; | 55 | tsk_rt(t)->is_aux_task = 0; |
56 | 56 | ||
57 | if (tsk_rt(t)->inh_task) { | 57 | if (tsk_rt(t)->inh_task) { |
58 | litmus->decrease_prio(t, NULL); | 58 | litmus->__decrease_prio(t, NULL); |
59 | } | 59 | } |
60 | 60 | ||
61 | return retval; | 61 | return retval; |
@@ -114,31 +114,37 @@ int aux_task_owner_increase_priority(struct task_struct *t) | |||
114 | int retval = 0; | 114 | int retval = 0; |
115 | struct task_struct *leader; | 115 | struct task_struct *leader; |
116 | struct task_struct *hp = NULL; | 116 | struct task_struct *hp = NULL; |
117 | struct task_struct *hp_eff = NULL; | ||
117 | 118 | ||
118 | BUG_ON(!tsk_rt(t)->has_aux_tasks); | ||
119 | BUG_ON(!is_realtime(t)); | 119 | BUG_ON(!is_realtime(t)); |
120 | BUG_ON(!binheap_is_in_heap(&tsk_rt(t)->aux_task_owner_node)); | 120 | BUG_ON(!tsk_rt(t)->has_aux_tasks); |
121 | 121 | ||
122 | leader = t->group_leader; | 122 | leader = t->group_leader; |
123 | 123 | ||
124 | if (!binheap_is_in_heap(&tsk_rt(t)->aux_task_owner_node)) { | ||
125 | WARN_ON(!is_running(t)); | ||
126 | TRACE_CUR("aux tasks may not inherit from %s/%d in group %s/%d\n", | ||
127 | t->comm, t->pid, leader->comm, leader->pid); | ||
128 | goto out; | ||
129 | } | ||
130 | |||
124 | TRACE_CUR("task %s/%d in group %s/%d increasing priority.\n", t->comm, t->pid, leader->comm, leader->pid); | 131 | TRACE_CUR("task %s/%d in group %s/%d increasing priority.\n", t->comm, t->pid, leader->comm, leader->pid); |
125 | 132 | ||
126 | hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), | 133 | hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), |
127 | struct task_struct, rt_param); | 134 | struct task_struct, rt_param); |
135 | hp_eff = effective_priority(hp); | ||
128 | 136 | ||
129 | if (hp == t) { | 137 | if (hp != t) { /* our position in the heap may have changed. hp is already at the root. */ |
130 | goto out; // already hp, nothing to do. | 138 | binheap_decrease(&tsk_rt(t)->aux_task_owner_node, &tsk_aux(leader)->aux_task_owners); |
131 | } | 139 | } |
132 | 140 | ||
133 | binheap_decrease(&tsk_rt(t)->aux_task_owner_node, &tsk_aux(leader)->aux_task_owners); | ||
134 | |||
135 | hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), | 141 | hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), |
136 | struct task_struct, rt_param); | 142 | struct task_struct, rt_param); |
137 | 143 | ||
138 | if (hp == t) { | 144 | if (effective_priority(hp) != hp_eff) { /* the eff. prio. of hp has changed */ |
145 | hp_eff = effective_priority(hp); | ||
139 | TRACE_CUR("%s/%d is new hp in group %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid); | 146 | TRACE_CUR("%s/%d is new hp in group %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid); |
140 | retval = aux_tasks_increase_priority(leader, | 147 | retval = aux_tasks_increase_priority(leader, hp_eff); |
141 | (tsk_rt(hp)->inh_task) ? tsk_rt(hp)->inh_task : hp); | ||
142 | } | 148 | } |
143 | 149 | ||
144 | out: | 150 | out: |
@@ -150,30 +156,41 @@ int aux_task_owner_decrease_priority(struct task_struct *t) | |||
150 | int retval = 0; | 156 | int retval = 0; |
151 | struct task_struct *leader; | 157 | struct task_struct *leader; |
152 | struct task_struct *hp = NULL; | 158 | struct task_struct *hp = NULL; |
153 | struct task_struct *new_hp = NULL; | 159 | struct task_struct *hp_eff = NULL; |
154 | 160 | ||
155 | BUG_ON(!tsk_rt(t)->has_aux_tasks); | ||
156 | BUG_ON(!is_realtime(t)); | 161 | BUG_ON(!is_realtime(t)); |
157 | BUG_ON(!binheap_is_in_heap(&tsk_rt(t)->aux_task_owner_node)); | 162 | BUG_ON(!tsk_rt(t)->has_aux_tasks); |
158 | 163 | ||
159 | leader = t->group_leader; | 164 | leader = t->group_leader; |
160 | 165 | ||
166 | if (!binheap_is_in_heap(&tsk_rt(t)->aux_task_owner_node)) { | ||
167 | WARN_ON(!is_running(t)); | ||
168 | TRACE_CUR("aux tasks may not inherit from %s/%d in group %s/%d\n", | ||
169 | t->comm, t->pid, leader->comm, leader->pid); | ||
170 | goto out; | ||
171 | } | ||
172 | |||
161 | TRACE_CUR("task %s/%d in group %s/%d decresing priority.\n", t->comm, t->pid, leader->comm, leader->pid); | 173 | TRACE_CUR("task %s/%d in group %s/%d decresing priority.\n", t->comm, t->pid, leader->comm, leader->pid); |
162 | 174 | ||
163 | hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), | 175 | hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), |
164 | struct task_struct, rt_param); | 176 | struct task_struct, rt_param); |
177 | hp_eff = effective_priority(hp); | ||
165 | binheap_delete(&tsk_rt(t)->aux_task_owner_node, &tsk_aux(leader)->aux_task_owners); | 178 | binheap_delete(&tsk_rt(t)->aux_task_owner_node, &tsk_aux(leader)->aux_task_owners); |
166 | binheap_add(&tsk_rt(t)->aux_task_owner_node, &tsk_aux(leader)->aux_task_owners, | 179 | binheap_add(&tsk_rt(t)->aux_task_owner_node, &tsk_aux(leader)->aux_task_owners, |
167 | struct rt_param, aux_task_owner_node); | 180 | struct rt_param, aux_task_owner_node); |
168 | new_hp = container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), | ||
169 | struct task_struct, rt_param); | ||
170 | 181 | ||
171 | if (hp == t && new_hp != t) { | 182 | if (hp == t) { /* t was originally the hp */ |
172 | TRACE_CUR("%s/%d is no longer hp in group %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid); | 183 | struct task_struct *new_hp = |
173 | retval = aux_tasks_decrease_priority(leader, | 184 | container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), |
174 | (tsk_rt(new_hp)->inh_task) ? tsk_rt(new_hp)->inh_task : new_hp); | 185 | struct task_struct, rt_param); |
186 | if (effective_priority(new_hp) != hp_eff) { /* eff prio. of hp has changed */ | ||
187 | hp_eff = effective_priority(new_hp); | ||
188 | TRACE_CUR("%s/%d is no longer hp in group %s/%d.\n", t->comm, t->pid, leader->comm, leader->pid); | ||
189 | retval = aux_tasks_decrease_priority(leader, hp_eff); | ||
190 | } | ||
175 | } | 191 | } |
176 | 192 | ||
193 | out: | ||
177 | return retval; | 194 | return retval; |
178 | } | 195 | } |
179 | 196 | ||
@@ -302,8 +319,7 @@ static long __do_enable_aux_tasks(void) | |||
302 | tsk_rt(t)->task_params.period); | 319 | tsk_rt(t)->task_params.period); |
303 | 320 | ||
304 | /* inspect heap_node to see if it is an rt task */ | 321 | /* inspect heap_node to see if it is an rt task */ |
305 | if (tsk_rt(t)->task_params.period == 0) { //|| | 322 | if (tsk_rt(t)->task_params.period == 0) { |
306 | // tsk_rt(t)->task_params.period == MAGIC_AUX_TASK_PERIOD) { | ||
307 | if (!tsk_rt(t)->is_aux_task) { | 323 | if (!tsk_rt(t)->is_aux_task) { |
308 | TRACE_CUR("AUX task in %s/%d: %s/%d:\n", leader->comm, leader->pid, t->comm, t->pid); | 324 | TRACE_CUR("AUX task in %s/%d: %s/%d:\n", leader->comm, leader->pid, t->comm, t->pid); |
309 | /* hasn't been aux_tasks_increase_priorityted into rt. make it a aux. */ | 325 | /* hasn't been aux_tasks_increase_priorityted into rt. make it a aux. */ |
diff --git a/litmus/edf_common.c b/litmus/edf_common.c index 7e0d3a5d0c4d..f4881452373d 100644 --- a/litmus/edf_common.c +++ b/litmus/edf_common.c | |||
@@ -237,6 +237,7 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second) | |||
237 | second_task->rt_param.is_proxy_thread) { | 237 | second_task->rt_param.is_proxy_thread) { |
238 | return 1; | 238 | return 1; |
239 | } | 239 | } |
240 | else if (first_task->rt_param.is_proxy_thread == second_task->rt_param.is_proxy_thread) { | ||
240 | #endif | 241 | #endif |
241 | 242 | ||
242 | #ifdef CONFIG_REALTIME_AUX_TASKS | 243 | #ifdef CONFIG_REALTIME_AUX_TASKS |
@@ -244,6 +245,7 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second) | |||
244 | if (tsk_rt(first)->is_aux_task < tsk_rt(second)->is_aux_task) { | 245 | if (tsk_rt(first)->is_aux_task < tsk_rt(second)->is_aux_task) { |
245 | return 1; | 246 | return 1; |
246 | } | 247 | } |
248 | else if (tsk_rt(first)->is_aux_task == tsk_rt(second)->is_aux_task) { | ||
247 | #endif | 249 | #endif |
248 | 250 | ||
249 | /* Something could be wrong if you get this far. */ | 251 | /* Something could be wrong if you get this far. */ |
@@ -281,6 +283,14 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second) | |||
281 | return 1; | 283 | return 1; |
282 | } | 284 | } |
283 | } | 285 | } |
286 | |||
287 | #ifdef CONFIG_REALTIME_AUX_TASKS | ||
288 | } | ||
289 | #endif | ||
290 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
291 | } | ||
292 | #endif | ||
293 | |||
284 | } | 294 | } |
285 | } | 295 | } |
286 | } | 296 | } |
diff --git a/litmus/ikglp_lock.c b/litmus/ikglp_lock.c index 83b708ab85cb..bd7bfc0f48ac 100644 --- a/litmus/ikglp_lock.c +++ b/litmus/ikglp_lock.c | |||
@@ -963,7 +963,7 @@ int ikglp_lock(struct litmus_lock* l) | |||
963 | 963 | ||
964 | TS_LOCK_SUSPEND; | 964 | TS_LOCK_SUSPEND; |
965 | 965 | ||
966 | schedule(); | 966 | suspend_for_lock(); |
967 | 967 | ||
968 | TS_LOCK_RESUME; | 968 | TS_LOCK_RESUME; |
969 | 969 | ||
diff --git a/litmus/kfmlp_lock.c b/litmus/kfmlp_lock.c index bff857ed8d4e..ab472330095d 100644 --- a/litmus/kfmlp_lock.c +++ b/litmus/kfmlp_lock.c | |||
@@ -267,7 +267,7 @@ int kfmlp_lock(struct litmus_lock* l) | |||
267 | * when we wake up; we are guaranteed to have the lock since | 267 | * when we wake up; we are guaranteed to have the lock since |
268 | * there is only one wake up per release (or steal). | 268 | * there is only one wake up per release (or steal). |
269 | */ | 269 | */ |
270 | schedule(); | 270 | suspend_for_lock(); |
271 | 271 | ||
272 | 272 | ||
273 | if(my_queue->owner == t) { | 273 | if(my_queue->owner == t) { |
diff --git a/litmus/locking.c b/litmus/locking.c index 12a23eb715cc..16c936ba8139 100644 --- a/litmus/locking.c +++ b/litmus/locking.c | |||
@@ -540,6 +540,35 @@ out: | |||
540 | return passed; | 540 | return passed; |
541 | } | 541 | } |
542 | 542 | ||
543 | |||
544 | void suspend_for_lock(void) | ||
545 | { | ||
546 | #ifdef CONFIG_REALTIME_AUX_TASKS | ||
547 | unsigned int restore = 0; | ||
548 | struct task_struct *t = current; | ||
549 | unsigned int hide; | ||
550 | |||
551 | if (tsk_rt(t)->has_aux_tasks) { | ||
552 | /* hide from aux tasks so they can't inherit our priority when we block | ||
553 | * for a litmus lock. inheritance is already going to a litmus lock | ||
554 | * holder. */ | ||
555 | hide = tsk_rt(t)->hide_from_aux_tasks; | ||
556 | restore = 1; | ||
557 | tsk_rt(t)->hide_from_aux_tasks = 1; | ||
558 | } | ||
559 | #endif | ||
560 | |||
561 | schedule(); | ||
562 | |||
563 | #ifdef CONFIG_REALTIME_AUX_TASKS | ||
564 | if (restore) { | ||
565 | /* restore our state */ | ||
566 | tsk_rt(t)->hide_from_aux_tasks = hide; | ||
567 | } | ||
568 | #endif | ||
569 | } | ||
570 | |||
571 | |||
543 | #else // CONFIG_LITMUS_LOCKING | 572 | #else // CONFIG_LITMUS_LOCKING |
544 | 573 | ||
545 | struct fdso_ops generic_lock_ops = {}; | 574 | struct fdso_ops generic_lock_ops = {}; |
diff --git a/litmus/rsm_lock.c b/litmus/rsm_lock.c index 75ed87c5ed48..3dfd8ae9d221 100644 --- a/litmus/rsm_lock.c +++ b/litmus/rsm_lock.c | |||
@@ -289,7 +289,7 @@ int rsm_mutex_lock(struct litmus_lock* l) | |||
289 | * there is only one wake up per release. | 289 | * there is only one wake up per release. |
290 | */ | 290 | */ |
291 | 291 | ||
292 | schedule(); | 292 | suspend_for_lock(); |
293 | 293 | ||
294 | TS_LOCK_RESUME; | 294 | TS_LOCK_RESUME; |
295 | 295 | ||
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index f5c9807090a1..6746d4d6033e 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c | |||
@@ -1032,7 +1032,7 @@ static void cedf_task_wake_up(struct task_struct *task) | |||
1032 | #endif | 1032 | #endif |
1033 | 1033 | ||
1034 | #ifdef CONFIG_REALTIME_AUX_TASKS | 1034 | #ifdef CONFIG_REALTIME_AUX_TASKS |
1035 | if (tsk_rt(task)->has_aux_tasks) { | 1035 | if (tsk_rt(task)->has_aux_tasks && !tsk_rt(task)->hide_from_aux_tasks) { |
1036 | TRACE_CUR("%s/%d is ready so aux tasks may not inherit.\n", task->comm, task->pid); | 1036 | TRACE_CUR("%s/%d is ready so aux tasks may not inherit.\n", task->comm, task->pid); |
1037 | disable_aux_task_owner(task); | 1037 | disable_aux_task_owner(task); |
1038 | } | 1038 | } |
@@ -1057,7 +1057,7 @@ static void cedf_task_block(struct task_struct *t) | |||
1057 | unlink(t); | 1057 | unlink(t); |
1058 | 1058 | ||
1059 | #ifdef CONFIG_REALTIME_AUX_TASKS | 1059 | #ifdef CONFIG_REALTIME_AUX_TASKS |
1060 | if (tsk_rt(t)->has_aux_tasks) { | 1060 | if (tsk_rt(t)->has_aux_tasks && !tsk_rt(t)->hide_from_aux_tasks) { |
1061 | 1061 | ||
1062 | TRACE_CUR("%s/%d is blocked so aux tasks may inherit.\n", t->comm, t->pid); | 1062 | TRACE_CUR("%s/%d is blocked so aux tasks may inherit.\n", t->comm, t->pid); |
1063 | enable_aux_task_owner(t); | 1063 | enable_aux_task_owner(t); |
@@ -1079,19 +1079,16 @@ static void cedf_task_exit(struct task_struct * t) | |||
1079 | cedf_change_prio_pai_tasklet(t, NULL); | 1079 | cedf_change_prio_pai_tasklet(t, NULL); |
1080 | #endif | 1080 | #endif |
1081 | 1081 | ||
1082 | #ifdef CONFIG_REALTIME_AUX_TASKS | ||
1083 | if (tsk_rt(t)->is_aux_task) { | ||
1084 | exit_aux_task(t); /* cannot be called with gsnedf_lock held */ | ||
1085 | } | ||
1086 | #endif | ||
1087 | |||
1088 | /* unlink if necessary */ | 1082 | /* unlink if necessary */ |
1089 | raw_spin_lock_irqsave(&cluster->cluster_lock, flags); | 1083 | raw_spin_lock_irqsave(&cluster->cluster_lock, flags); |
1090 | 1084 | ||
1091 | #ifdef CONFIG_REALTIME_AUX_TASKS | 1085 | #ifdef CONFIG_REALTIME_AUX_TASKS |
1092 | /* make sure we clean up on our way out */ | 1086 | /* make sure we clean up on our way out */ |
1093 | if(tsk_rt(t)->has_aux_tasks) { | 1087 | if (unlikely(tsk_rt(t)->is_aux_task)) { |
1094 | disable_aux_task_owner(t); /* must be called witl gsnedf_lock held */ | 1088 | exit_aux_task(t); |
1089 | } | ||
1090 | else if(tsk_rt(t)->has_aux_tasks) { | ||
1091 | disable_aux_task_owner(t); | ||
1095 | } | 1092 | } |
1096 | #endif | 1093 | #endif |
1097 | 1094 | ||
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index ed9b4697a5a2..04b189e54b03 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c | |||
@@ -1050,7 +1050,7 @@ static void gsnedf_task_wake_up(struct task_struct *task) | |||
1050 | #endif | 1050 | #endif |
1051 | 1051 | ||
1052 | #ifdef CONFIG_REALTIME_AUX_TASKS | 1052 | #ifdef CONFIG_REALTIME_AUX_TASKS |
1053 | if (tsk_rt(task)->has_aux_tasks) { | 1053 | if (tsk_rt(task)->has_aux_tasks && !tsk_rt(task)->hide_from_aux_tasks) { |
1054 | TRACE_CUR("%s/%d is ready so aux tasks may not inherit.\n", task->comm, task->pid); | 1054 | TRACE_CUR("%s/%d is ready so aux tasks may not inherit.\n", task->comm, task->pid); |
1055 | disable_aux_task_owner(task); | 1055 | disable_aux_task_owner(task); |
1056 | } | 1056 | } |
@@ -1072,7 +1072,7 @@ static void gsnedf_task_block(struct task_struct *t) | |||
1072 | unlink(t); | 1072 | unlink(t); |
1073 | 1073 | ||
1074 | #ifdef CONFIG_REALTIME_AUX_TASKS | 1074 | #ifdef CONFIG_REALTIME_AUX_TASKS |
1075 | if (tsk_rt(t)->has_aux_tasks) { | 1075 | if (tsk_rt(t)->has_aux_tasks && !tsk_rt(t)->hide_from_aux_tasks) { |
1076 | 1076 | ||
1077 | TRACE_CUR("%s/%d is blocked so aux tasks may inherit.\n", t->comm, t->pid); | 1077 | TRACE_CUR("%s/%d is blocked so aux tasks may inherit.\n", t->comm, t->pid); |
1078 | enable_aux_task_owner(t); | 1078 | enable_aux_task_owner(t); |
@@ -1093,19 +1093,16 @@ static void gsnedf_task_exit(struct task_struct * t) | |||
1093 | gsnedf_change_prio_pai_tasklet(t, NULL); | 1093 | gsnedf_change_prio_pai_tasklet(t, NULL); |
1094 | #endif | 1094 | #endif |
1095 | 1095 | ||
1096 | #ifdef CONFIG_REALTIME_AUX_TASKS | ||
1097 | if (tsk_rt(t)->is_aux_task) { | ||
1098 | exit_aux_task(t); /* cannot be called with gsnedf_lock held */ | ||
1099 | } | ||
1100 | #endif | ||
1101 | |||
1102 | /* unlink if necessary */ | 1096 | /* unlink if necessary */ |
1103 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | 1097 | raw_spin_lock_irqsave(&gsnedf_lock, flags); |
1104 | 1098 | ||
1105 | #ifdef CONFIG_REALTIME_AUX_TASKS | 1099 | #ifdef CONFIG_REALTIME_AUX_TASKS |
1106 | /* make sure we clean up on our way out */ | 1100 | /* make sure we clean up on our way out */ |
1107 | if(tsk_rt(t)->has_aux_tasks) { | 1101 | if (unlikely(tsk_rt(t)->is_aux_task)) { |
1108 | disable_aux_task_owner(t); /* must be called witl gsnedf_lock held */ | 1102 | exit_aux_task(t); |
1103 | } | ||
1104 | else if(tsk_rt(t)->has_aux_tasks) { | ||
1105 | disable_aux_task_owner(t); | ||
1109 | } | 1106 | } |
1110 | #endif | 1107 | #endif |
1111 | 1108 | ||
@@ -1664,7 +1661,7 @@ int gsnedf_fmlp_lock(struct litmus_lock* l) | |||
1664 | * there is only one wake up per release. | 1661 | * there is only one wake up per release. |
1665 | */ | 1662 | */ |
1666 | 1663 | ||
1667 | schedule(); | 1664 | suspend_for_lock(); |
1668 | 1665 | ||
1669 | TS_LOCK_RESUME; | 1666 | TS_LOCK_RESUME; |
1670 | 1667 | ||