diff options
Diffstat (limited to 'litmus/prioq_lock.c')
-rw-r--r-- | litmus/prioq_lock.c | 493 |
1 files changed, 378 insertions, 115 deletions
diff --git a/litmus/prioq_lock.c b/litmus/prioq_lock.c index 0091e4c1901e..ff6419ba1a13 100644 --- a/litmus/prioq_lock.c +++ b/litmus/prioq_lock.c | |||
@@ -12,10 +12,10 @@ | |||
12 | #include <litmus/gpu_affinity.h> | 12 | #include <litmus/gpu_affinity.h> |
13 | #endif | 13 | #endif |
14 | 14 | ||
15 | static void __attribute__((unused)) | 15 | void __attribute__((unused)) |
16 | __dump_lock_info(struct prioq_mutex *mutex) | 16 | __dump_prioq_lock_info(struct prioq_mutex *mutex) |
17 | { | 17 | { |
18 | #ifdef CONFIG_SCHED_DEBUG_TRACE | 18 | #ifdef CONFIG_SCHED_DEBUG_TRACE |
19 | TRACE_CUR("%s (mutex: %p):\n", mutex->litmus_lock.name, mutex); | 19 | TRACE_CUR("%s (mutex: %p):\n", mutex->litmus_lock.name, mutex); |
20 | TRACE_CUR("owner: %s/%d (inh: %s/%d)\n", | 20 | TRACE_CUR("owner: %s/%d (inh: %s/%d)\n", |
21 | (mutex->owner) ? | 21 | (mutex->owner) ? |
@@ -48,12 +48,12 @@ __dump_lock_info(struct prioq_mutex *mutex) | |||
48 | int enabled = 1; | 48 | int enabled = 1; |
49 | #endif | 49 | #endif |
50 | q = list_entry(pos, wait_queue_t, task_list); | 50 | q = list_entry(pos, wait_queue_t, task_list); |
51 | 51 | ||
52 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | 52 | #ifdef CONFIG_LITMUS_DGL_SUPPORT |
53 | if(q->func == dgl_wake_up) { | 53 | if(q->func == dgl_wake_up) { |
54 | dgl_wait = (dgl_wait_state_t*) q->private; | 54 | dgl_wait = (dgl_wait_state_t*) q->private; |
55 | blocked_task = dgl_wait->task; | 55 | blocked_task = dgl_wait->task; |
56 | 56 | ||
57 | if(tsk_rt(blocked_task)->blocked_lock != &mutex->litmus_lock) | 57 | if(tsk_rt(blocked_task)->blocked_lock != &mutex->litmus_lock) |
58 | enabled = 0; | 58 | enabled = 0; |
59 | } | 59 | } |
@@ -92,14 +92,14 @@ static void __add_wait_queue_sorted(wait_queue_head_t *q, wait_queue_t *add_node | |||
92 | struct task_struct *queued_task; | 92 | struct task_struct *queued_task; |
93 | struct task_struct *add_task; | 93 | struct task_struct *add_task; |
94 | struct list_head *pos; | 94 | struct list_head *pos; |
95 | 95 | ||
96 | if (list_empty(pq)) { | 96 | if (list_empty(pq)) { |
97 | list_add_tail(&add_node->task_list, pq); | 97 | list_add_tail(&add_node->task_list, pq); |
98 | return; | 98 | return; |
99 | } | 99 | } |
100 | 100 | ||
101 | add_task = get_queued_task(add_node); | 101 | add_task = get_queued_task(add_node); |
102 | 102 | ||
103 | /* less priority than tail? if so, go to tail */ | 103 | /* less priority than tail? if so, go to tail */ |
104 | q_node = list_entry(pq->prev, wait_queue_t, task_list); | 104 | q_node = list_entry(pq->prev, wait_queue_t, task_list); |
105 | queued_task = get_queued_task(q_node); | 105 | queued_task = get_queued_task(q_node); |
@@ -107,7 +107,7 @@ static void __add_wait_queue_sorted(wait_queue_head_t *q, wait_queue_t *add_node | |||
107 | list_add_tail(&add_node->task_list, pq); | 107 | list_add_tail(&add_node->task_list, pq); |
108 | return; | 108 | return; |
109 | } | 109 | } |
110 | 110 | ||
111 | /* belongs at head or between nodes */ | 111 | /* belongs at head or between nodes */ |
112 | list_for_each(pos, pq) { | 112 | list_for_each(pos, pq) { |
113 | q_node = list_entry(pos, wait_queue_t, task_list); | 113 | q_node = list_entry(pos, wait_queue_t, task_list); |
@@ -117,8 +117,9 @@ static void __add_wait_queue_sorted(wait_queue_head_t *q, wait_queue_t *add_node | |||
117 | return; | 117 | return; |
118 | } | 118 | } |
119 | } | 119 | } |
120 | 120 | ||
121 | BUG(); | 121 | WARN_ON(1); |
122 | list_add_tail(&add_node->task_list, pq); | ||
122 | } | 123 | } |
123 | 124 | ||
124 | static inline void __add_wait_queue_sorted_exclusive(wait_queue_head_t *q, wait_queue_t *wait) | 125 | static inline void __add_wait_queue_sorted_exclusive(wait_queue_head_t *q, wait_queue_t *wait) |
@@ -132,7 +133,7 @@ static void __prioq_increase_pos(struct prioq_mutex *mutex, struct task_struct * | |||
132 | wait_queue_t *q; | 133 | wait_queue_t *q; |
133 | struct list_head *pos; | 134 | struct list_head *pos; |
134 | struct task_struct *queued; | 135 | struct task_struct *queued; |
135 | 136 | ||
136 | /* TODO: Make this efficient instead of remove/add */ | 137 | /* TODO: Make this efficient instead of remove/add */ |
137 | list_for_each(pos, &mutex->wait.task_list) { | 138 | list_for_each(pos, &mutex->wait.task_list) { |
138 | q = list_entry(pos, wait_queue_t, task_list); | 139 | q = list_entry(pos, wait_queue_t, task_list); |
@@ -143,16 +144,17 @@ static void __prioq_increase_pos(struct prioq_mutex *mutex, struct task_struct * | |||
143 | return; | 144 | return; |
144 | } | 145 | } |
145 | } | 146 | } |
146 | 147 | ||
147 | BUG(); | 148 | BUG(); |
148 | } | 149 | } |
149 | 150 | ||
151 | |||
150 | static void __prioq_decrease_pos(struct prioq_mutex *mutex, struct task_struct *t) | 152 | static void __prioq_decrease_pos(struct prioq_mutex *mutex, struct task_struct *t) |
151 | { | 153 | { |
152 | wait_queue_t *q; | 154 | wait_queue_t *q; |
153 | struct list_head *pos; | 155 | struct list_head *pos; |
154 | struct task_struct *queued; | 156 | struct task_struct *queued; |
155 | 157 | ||
156 | /* TODO: Make this efficient instead of remove/add */ | 158 | /* TODO: Make this efficient instead of remove/add */ |
157 | list_for_each(pos, &mutex->wait.task_list) { | 159 | list_for_each(pos, &mutex->wait.task_list) { |
158 | q = list_entry(pos, wait_queue_t, task_list); | 160 | q = list_entry(pos, wait_queue_t, task_list); |
@@ -163,10 +165,143 @@ static void __prioq_decrease_pos(struct prioq_mutex *mutex, struct task_struct * | |||
163 | return; | 165 | return; |
164 | } | 166 | } |
165 | } | 167 | } |
166 | 168 | ||
167 | BUG(); | 169 | BUG(); |
168 | } | 170 | } |
169 | 171 | ||
172 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
173 | static int __prioq_dgl_increase_pos(struct prioq_mutex *mutex, struct task_struct *t) | ||
174 | { | ||
175 | // TODO: | ||
176 | // (1) Increase position for 't' in all of it's DGLs. | ||
177 | // (2) Check to see if 't' can take the DGLs atomically | ||
178 | // (3) If it can take the DGLs, do so. | ||
179 | // (4) Cleanup? | ||
180 | // (5) Wake up 't' | ||
181 | |||
182 | |||
183 | wait_queue_t *q; | ||
184 | struct list_head *pos; | ||
185 | struct task_struct *queued; | ||
186 | int i; | ||
187 | int ret = 0; | ||
188 | |||
189 | list_for_each(pos, &mutex->wait.task_list) { | ||
190 | q = list_entry(pos, wait_queue_t, task_list); | ||
191 | if(q->func == dgl_wake_up) { | ||
192 | // we're looking at a dgl request | ||
193 | dgl_wait_state_t *dgl_wait = (dgl_wait_state_t*) q->private; | ||
194 | queued = dgl_wait->task; | ||
195 | |||
196 | if (queued == t) // is it the one we're looking for? | ||
197 | { | ||
198 | // reposition on the other mutexes | ||
199 | for(i = 0; i < dgl_wait->size; ++i) { | ||
200 | struct prioq_mutex *pm = (struct prioq_mutex *) dgl_wait->locks[i]; | ||
201 | if (pm != mutex) | ||
202 | __prioq_increase_pos(pm, t); | ||
203 | } | ||
204 | // reposition on this mutex | ||
205 | __remove_wait_queue(&mutex->wait, q); | ||
206 | __add_wait_queue_sorted(&mutex->wait, q); | ||
207 | |||
208 | |||
209 | if(__attempt_atomic_dgl_acquire(NULL, dgl_wait)) { | ||
210 | /* it can't take the lock. do nothing. */ | ||
211 | } | ||
212 | else { | ||
213 | TRACE_CUR("%s/%d can take its entire DGL atomically via inheritance!\n", | ||
214 | dgl_wait->task->comm, dgl_wait->task->pid); | ||
215 | |||
216 | /* we took the lock! we've already been removed from mutex->wait.task_list */ | ||
217 | |||
218 | TRACE_TASK(t, "waking up since it is no longer blocked.\n"); | ||
219 | |||
220 | tsk_rt(t)->blocked_lock = NULL; | ||
221 | mb(); | ||
222 | |||
223 | #if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA) | ||
224 | // re-enable tracking | ||
225 | if(tsk_rt(t)->held_gpus) { | ||
226 | tsk_rt(t)->suspend_gpu_tracker_on_block = 0; | ||
227 | } | ||
228 | #endif | ||
229 | wake_up_process(t); | ||
230 | ret = 1; | ||
231 | } | ||
232 | break; | ||
233 | } | ||
234 | } | ||
235 | else { | ||
236 | // not dgl request. | ||
237 | queued = (struct task_struct*) q->private; | ||
238 | if (queued == t) { // is this the one we're looking for? | ||
239 | // if so, reposition it. | ||
240 | __remove_wait_queue(&mutex->wait, q); | ||
241 | __add_wait_queue_sorted(&mutex->wait, q); | ||
242 | break; | ||
243 | } | ||
244 | } | ||
245 | } | ||
246 | |||
247 | return ret; | ||
248 | } | ||
249 | |||
250 | static void __prioq_dgl_decrease_pos(struct prioq_mutex *mutex, struct task_struct *t) | ||
251 | { | ||
252 | // TODO: | ||
253 | // (1) Increase position for 't' in all of it's DGLs. | ||
254 | // (2) Check to see if 't' can take the DGLs atomically | ||
255 | // (3) If it can take the DGLs, do so. | ||
256 | // (4) Cleanup? | ||
257 | // (5) Wake up 't' | ||
258 | |||
259 | |||
260 | wait_queue_t *q; | ||
261 | struct list_head *pos; | ||
262 | struct task_struct *queued; | ||
263 | int i; | ||
264 | |||
265 | list_for_each(pos, &mutex->wait.task_list) { | ||
266 | q = list_entry(pos, wait_queue_t, task_list); | ||
267 | if(q->func == dgl_wake_up) { | ||
268 | // we're looking at a dgl request | ||
269 | dgl_wait_state_t *dgl_wait = (dgl_wait_state_t*) q->private; | ||
270 | queued = dgl_wait->task; | ||
271 | |||
272 | if (queued == t) // is it the one we're looking for? | ||
273 | { | ||
274 | // reposition on the other mutexes | ||
275 | for(i = 0; i < dgl_wait->size; ++i) { | ||
276 | struct prioq_mutex *pm = (struct prioq_mutex *)dgl_wait->locks[i]; | ||
277 | if (pm != mutex) | ||
278 | __prioq_decrease_pos(pm, t); | ||
279 | } | ||
280 | // reposition on this mutex | ||
281 | __remove_wait_queue(&mutex->wait, q); | ||
282 | __add_wait_queue_sorted(&mutex->wait, q); | ||
283 | return; | ||
284 | } | ||
285 | } | ||
286 | else { | ||
287 | // not dgl request. | ||
288 | queued = (struct task_struct*) q->private; | ||
289 | if (queued == t) { // is this the one we're looking for? | ||
290 | // if so, reposition it. | ||
291 | __remove_wait_queue(&mutex->wait, q); | ||
292 | __add_wait_queue_sorted(&mutex->wait, q); | ||
293 | return; | ||
294 | } | ||
295 | } | ||
296 | } | ||
297 | |||
298 | BUG(); | ||
299 | } | ||
300 | #endif | ||
301 | |||
302 | |||
303 | |||
304 | |||
170 | 305 | ||
171 | /* caller is responsible for locking */ | 306 | /* caller is responsible for locking */ |
172 | static struct task_struct* __prioq_mutex_find_hp_waiter(struct prioq_mutex *mutex, | 307 | static struct task_struct* __prioq_mutex_find_hp_waiter(struct prioq_mutex *mutex, |
@@ -182,7 +317,10 @@ static struct task_struct* __prioq_mutex_find_hp_waiter(struct prioq_mutex *mute | |||
182 | queued = get_queued_task(q); | 317 | queued = get_queued_task(q); |
183 | 318 | ||
184 | /* Compare task prios, find high prio task. */ | 319 | /* Compare task prios, find high prio task. */ |
185 | if (queued && queued != skip && litmus->compare(queued, found)) { | 320 | if (queued && |
321 | (queued != skip) && | ||
322 | (tsk_rt(queued)->blocked_lock == &mutex->litmus_lock) && | ||
323 | litmus->compare(queued, found)) { | ||
186 | found = queued; | 324 | found = queued; |
187 | } | 325 | } |
188 | } | 326 | } |
@@ -198,6 +336,12 @@ int prioq_mutex_is_owner(struct litmus_lock *l, struct task_struct *t) | |||
198 | return(mutex->owner == t); | 336 | return(mutex->owner == t); |
199 | } | 337 | } |
200 | 338 | ||
339 | struct task_struct* prioq_mutex_get_owner(struct litmus_lock *l) | ||
340 | { | ||
341 | struct prioq_mutex *mutex = prioq_mutex_from_lock(l); | ||
342 | return(mutex->owner); | ||
343 | } | ||
344 | |||
201 | // return 1 if resource was immediatly acquired. | 345 | // return 1 if resource was immediatly acquired. |
202 | // Assumes mutex->lock is held. | 346 | // Assumes mutex->lock is held. |
203 | // Must set task state to TASK_UNINTERRUPTIBLE if task blocks. | 347 | // Must set task state to TASK_UNINTERRUPTIBLE if task blocks. |
@@ -211,31 +355,16 @@ int prioq_mutex_dgl_lock(struct litmus_lock *l, dgl_wait_state_t* dgl_wait, | |||
211 | 355 | ||
212 | BUG_ON(t != current); | 356 | BUG_ON(t != current); |
213 | 357 | ||
214 | if (mutex->owner) { | ||
215 | TRACE_TASK(t, "Enqueuing on lock %d (held by %s/%d).\n", | ||
216 | l->ident, mutex->owner->comm, mutex->owner->pid); | ||
217 | |||
218 | init_dgl_waitqueue_entry(wq_node, dgl_wait); | ||
219 | 358 | ||
220 | set_task_state(t, TASK_UNINTERRUPTIBLE); | 359 | init_dgl_waitqueue_entry(wq_node, dgl_wait); |
221 | __add_wait_queue_sorted_exclusive(&mutex->wait, wq_node); | ||
222 | } else { | ||
223 | TRACE_TASK(t, "Acquired lock %d with no blocking.\n", l->ident); | ||
224 | 360 | ||
225 | /* it's ours now */ | 361 | set_task_state(t, TASK_UNINTERRUPTIBLE); |
226 | mutex->owner = t; | 362 | __add_wait_queue_sorted_exclusive(&mutex->wait, wq_node); |
227 | |||
228 | raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); | ||
229 | binheap_add(&l->nest.hp_binheap_node, &tsk_rt(t)->hp_blocked_tasks, | ||
230 | struct nested_info, hp_binheap_node); | ||
231 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); | ||
232 | |||
233 | acquired_immediatly = 1; | ||
234 | } | ||
235 | 363 | ||
236 | return acquired_immediatly; | 364 | return acquired_immediatly; |
237 | } | 365 | } |
238 | 366 | ||
367 | |||
239 | void prioq_mutex_enable_priority(struct litmus_lock *l, | 368 | void prioq_mutex_enable_priority(struct litmus_lock *l, |
240 | dgl_wait_state_t* dgl_wait) | 369 | dgl_wait_state_t* dgl_wait) |
241 | { | 370 | { |
@@ -244,11 +373,23 @@ void prioq_mutex_enable_priority(struct litmus_lock *l, | |||
244 | struct task_struct *owner = mutex->owner; | 373 | struct task_struct *owner = mutex->owner; |
245 | unsigned long flags = 0; // these are unused under DGL coarse-grain locking | 374 | unsigned long flags = 0; // these are unused under DGL coarse-grain locking |
246 | 375 | ||
376 | /************************************** | ||
377 | * This code looks like it supports fine-grain locking, but it does not! | ||
378 | * TODO: Gaurantee that mutex->lock is held by the caller to support fine-grain locking. | ||
379 | **************************************/ | ||
380 | |||
247 | BUG_ON(owner == t); | 381 | BUG_ON(owner == t); |
248 | 382 | ||
249 | tsk_rt(t)->blocked_lock = l; | 383 | tsk_rt(t)->blocked_lock = l; |
250 | mb(); | 384 | mb(); |
251 | 385 | ||
386 | TRACE_TASK(t, "Enabling prio on lock %d. I am %s/%d : cur hp_waiter is %s/%d.\n", | ||
387 | l->ident, | ||
388 | (t) ? t->comm : "null", | ||
389 | (t) ? t->pid : 0, | ||
390 | (mutex->hp_waiter) ? mutex->hp_waiter->comm : "null", | ||
391 | (mutex->hp_waiter) ? mutex->hp_waiter->pid : 0); | ||
392 | |||
252 | if (litmus->compare(t, mutex->hp_waiter)) { | 393 | if (litmus->compare(t, mutex->hp_waiter)) { |
253 | struct task_struct *old_max_eff_prio; | 394 | struct task_struct *old_max_eff_prio; |
254 | struct task_struct *new_max_eff_prio; | 395 | struct task_struct *new_max_eff_prio; |
@@ -260,6 +401,12 @@ void prioq_mutex_enable_priority(struct litmus_lock *l, | |||
260 | else | 401 | else |
261 | TRACE_TASK(t, "has higher prio than hp_waiter (NIL).\n"); | 402 | TRACE_TASK(t, "has higher prio than hp_waiter (NIL).\n"); |
262 | 403 | ||
404 | |||
405 | if (!owner) { | ||
406 | TRACE_TASK(t, "Enabling priority, but this lock %d is idle.\n", l->ident); | ||
407 | goto out; | ||
408 | } | ||
409 | |||
263 | raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); | 410 | raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); |
264 | 411 | ||
265 | old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | 412 | old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); |
@@ -289,26 +436,55 @@ void prioq_mutex_enable_priority(struct litmus_lock *l, | |||
289 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | 436 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); |
290 | unlock_fine_irqrestore(&mutex->lock, flags); | 437 | unlock_fine_irqrestore(&mutex->lock, flags); |
291 | } | 438 | } |
439 | |||
440 | return; | ||
292 | } | 441 | } |
293 | else { | 442 | |
294 | TRACE_TASK(t, "no change in hp_waiter.\n"); | 443 | TRACE_TASK(t, "no change in hp_waiter.\n"); |
295 | unlock_fine_irqrestore(&mutex->lock, flags); | 444 | |
296 | } | 445 | out: |
446 | unlock_fine_irqrestore(&mutex->lock, flags); | ||
297 | } | 447 | } |
298 | 448 | ||
299 | static void select_next_lock_if_primary(struct litmus_lock *l, | 449 | static void select_next_lock_if_primary(struct litmus_lock *l, |
300 | dgl_wait_state_t *dgl_wait) | 450 | dgl_wait_state_t *dgl_wait) |
301 | { | 451 | { |
302 | if(tsk_rt(dgl_wait->task)->blocked_lock == l) { | 452 | struct task_struct *t = dgl_wait->task; |
453 | |||
454 | if(tsk_rt(t)->blocked_lock == l) { | ||
455 | struct prioq_mutex *mutex = prioq_mutex_from_lock(l); | ||
456 | |||
303 | TRACE_CUR("Lock %d in DGL was primary for %s/%d.\n", | 457 | TRACE_CUR("Lock %d in DGL was primary for %s/%d.\n", |
304 | l->ident, dgl_wait->task->comm, dgl_wait->task->pid); | 458 | l->ident, t->comm, t->pid); |
305 | tsk_rt(dgl_wait->task)->blocked_lock = NULL; | 459 | |
460 | tsk_rt(t)->blocked_lock = NULL; | ||
306 | mb(); | 461 | mb(); |
462 | |||
463 | |||
464 | /* determine new hp_waiter if necessary */ | ||
465 | if (t == mutex->hp_waiter) { | ||
466 | |||
467 | TRACE_TASK(t, "Deciding to not be hp waiter on lock %d any more.\n", l->ident); | ||
468 | /* next has the highest priority --- it doesn't need to | ||
469 | * inherit. However, we need to make sure that the | ||
470 | * next-highest priority in the queue is reflected in | ||
471 | * hp_waiter. */ | ||
472 | mutex->hp_waiter = __prioq_mutex_find_hp_waiter(mutex, t); | ||
473 | l->nest.hp_waiter_eff_prio = (mutex->hp_waiter) ? | ||
474 | effective_priority(mutex->hp_waiter) : | ||
475 | NULL; | ||
476 | |||
477 | if (mutex->hp_waiter) | ||
478 | TRACE_TASK(mutex->hp_waiter, "is new highest-prio waiter\n"); | ||
479 | else | ||
480 | TRACE("no further waiters\n"); | ||
481 | } | ||
482 | |||
307 | select_next_lock(dgl_wait /*, l*/); // pick the next lock to be blocked on | 483 | select_next_lock(dgl_wait /*, l*/); // pick the next lock to be blocked on |
308 | } | 484 | } |
309 | else { | 485 | else { |
310 | TRACE_CUR("Got lock early! Lock %d in DGL was NOT primary for %s/%d.\n", | 486 | TRACE_CUR("Got lock early! Lock %d in DGL was NOT primary for %s/%d.\n", |
311 | l->ident, dgl_wait->task->comm, dgl_wait->task->pid); | 487 | l->ident, t->comm, t->pid); |
312 | } | 488 | } |
313 | } | 489 | } |
314 | #endif | 490 | #endif |
@@ -316,13 +492,30 @@ static void select_next_lock_if_primary(struct litmus_lock *l, | |||
316 | 492 | ||
317 | 493 | ||
318 | 494 | ||
495 | |||
496 | |||
497 | |||
498 | |||
499 | |||
500 | |||
501 | |||
502 | |||
503 | |||
504 | |||
505 | |||
506 | |||
507 | |||
508 | |||
509 | |||
510 | |||
511 | |||
319 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | 512 | #ifdef CONFIG_LITMUS_DGL_SUPPORT |
320 | 513 | ||
321 | int prioq_mutex_dgl_can_quick_lock(struct litmus_lock *l, struct task_struct *t) | 514 | int prioq_mutex_dgl_can_quick_lock(struct litmus_lock *l, struct task_struct *t) |
322 | { | 515 | { |
323 | struct prioq_mutex *mutex = prioq_mutex_from_lock(l); | 516 | struct prioq_mutex *mutex = prioq_mutex_from_lock(l); |
324 | 517 | ||
325 | if(!mutex->owner && mutex->hp_waiter == t) { | 518 | if(!mutex->owner) { |
326 | wait_queue_t *front = list_entry(mutex->wait.task_list.next, wait_queue_t, task_list); | 519 | wait_queue_t *front = list_entry(mutex->wait.task_list.next, wait_queue_t, task_list); |
327 | struct task_struct *at_front = get_queued_task(front); | 520 | struct task_struct *at_front = get_queued_task(front); |
328 | if(t == at_front) { | 521 | if(t == at_front) { |
@@ -336,40 +529,68 @@ void prioq_mutex_dgl_quick_lock(struct litmus_lock *l, struct litmus_lock *cur_l | |||
336 | struct task_struct* t, wait_queue_t *q) | 529 | struct task_struct* t, wait_queue_t *q) |
337 | { | 530 | { |
338 | struct prioq_mutex *mutex = prioq_mutex_from_lock(l); | 531 | struct prioq_mutex *mutex = prioq_mutex_from_lock(l); |
339 | 532 | ||
340 | BUG_ON(mutex->owner); | 533 | BUG_ON(mutex->owner); |
341 | BUG_ON(mutex->hp_waiter != t); | ||
342 | BUG_ON(t != get_queued_task(list_entry(mutex->wait.task_list.next, wait_queue_t, task_list))); | 534 | BUG_ON(t != get_queued_task(list_entry(mutex->wait.task_list.next, wait_queue_t, task_list))); |
343 | 535 | ||
344 | 536 | ||
345 | mutex->owner = t; | 537 | mutex->owner = t; |
346 | 538 | ||
347 | if (l != cur_lock) { | 539 | if (l != cur_lock) { |
348 | /* we have to update the state of the other lock for it */ | 540 | /* we have to update the state of the other lock for it */ |
349 | __remove_wait_queue(&mutex->wait, q); | 541 | __remove_wait_queue(&mutex->wait, q); |
350 | 542 | ||
351 | mutex->hp_waiter = __prioq_mutex_find_hp_waiter(mutex, t); | 543 | mutex->hp_waiter = __prioq_mutex_find_hp_waiter(mutex, t); |
352 | l->nest.hp_waiter_eff_prio = (mutex->hp_waiter) ? | 544 | l->nest.hp_waiter_eff_prio = (mutex->hp_waiter) ? |
353 | effective_priority(mutex->hp_waiter) : | 545 | effective_priority(mutex->hp_waiter) : |
354 | NULL; | 546 | NULL; |
355 | 547 | ||
356 | if (mutex->hp_waiter) | 548 | if (mutex->hp_waiter) |
357 | TRACE_TASK(mutex->hp_waiter, "is new highest-prio waiter\n"); | 549 | TRACE_TASK(mutex->hp_waiter, "is new highest-prio waiter\n"); |
358 | else | 550 | else |
359 | TRACE("no further waiters\n"); | 551 | TRACE("no further waiters\n"); |
360 | 552 | ||
361 | raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); | 553 | raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); |
362 | 554 | ||
363 | binheap_add(&l->nest.hp_binheap_node, | 555 | binheap_add(&l->nest.hp_binheap_node, |
364 | &tsk_rt(t)->hp_blocked_tasks, | 556 | &tsk_rt(t)->hp_blocked_tasks, |
365 | struct nested_info, hp_binheap_node); | 557 | struct nested_info, hp_binheap_node); |
366 | 558 | ||
367 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); | 559 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); |
368 | } | 560 | } |
561 | else { | ||
562 | /* the unlock call that triggered the quick_lock call will handle | ||
563 | * the acquire of cur_lock. | ||
564 | */ | ||
565 | } | ||
369 | } | 566 | } |
370 | #endif | 567 | #endif |
371 | 568 | ||
372 | 569 | ||
570 | |||
571 | |||
572 | |||
573 | |||
574 | |||
575 | |||
576 | |||
577 | |||
578 | |||
579 | |||
580 | |||
581 | |||
582 | |||
583 | |||
584 | |||
585 | |||
586 | |||
587 | |||
588 | |||
589 | |||
590 | |||
591 | |||
592 | |||
593 | |||
373 | int prioq_mutex_lock(struct litmus_lock* l) | 594 | int prioq_mutex_lock(struct litmus_lock* l) |
374 | { | 595 | { |
375 | struct task_struct *t = current; | 596 | struct task_struct *t = current; |
@@ -394,9 +615,12 @@ int prioq_mutex_lock(struct litmus_lock* l) | |||
394 | 615 | ||
395 | /* block if there is an owner, or if hp_waiter is blocked for DGL and | 616 | /* block if there is an owner, or if hp_waiter is blocked for DGL and |
396 | * prio(t) < prio(hp_waiter) */ | 617 | * prio(t) < prio(hp_waiter) */ |
397 | if (mutex->owner) { | 618 | if (mutex->owner || |
619 | (waitqueue_active(&mutex->wait) && litmus->compare(mutex->hp_waiter, t))) { | ||
398 | TRACE_TASK(t, "Blocking on lock %d (held by %s/%d).\n", | 620 | TRACE_TASK(t, "Blocking on lock %d (held by %s/%d).\n", |
399 | l->ident, mutex->owner->comm, mutex->owner->pid); | 621 | l->ident, |
622 | (mutex->owner) ? mutex->owner->comm : "null", | ||
623 | (mutex->owner) ? mutex->owner->pid : 0); | ||
400 | 624 | ||
401 | #if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA) | 625 | #if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA) |
402 | // KLUDGE: don't count this suspension as time in the critical gpu | 626 | // KLUDGE: don't count this suspension as time in the critical gpu |
@@ -421,7 +645,9 @@ int prioq_mutex_lock(struct litmus_lock* l) | |||
421 | __add_wait_queue_sorted_exclusive(&mutex->wait, &wait); | 645 | __add_wait_queue_sorted_exclusive(&mutex->wait, &wait); |
422 | 646 | ||
423 | /* check if we need to activate priority inheritance */ | 647 | /* check if we need to activate priority inheritance */ |
424 | if (litmus->compare(t, mutex->hp_waiter)) { | 648 | /* We can't be the hp waiter if there is no owner - task waiting for |
649 | * the full DGL must be the hp_waiter. */ | ||
650 | if (owner && litmus->compare(t, mutex->hp_waiter)) { | ||
425 | 651 | ||
426 | struct task_struct *old_max_eff_prio; | 652 | struct task_struct *old_max_eff_prio; |
427 | struct task_struct *new_max_eff_prio; | 653 | struct task_struct *new_max_eff_prio; |
@@ -437,10 +663,10 @@ int prioq_mutex_lock(struct litmus_lock* l) | |||
437 | 663 | ||
438 | old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | 664 | old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); |
439 | mutex->hp_waiter = t; | 665 | mutex->hp_waiter = t; |
440 | 666 | ||
441 | TRACE_TASK(t, "prioq_mutex %d state after enqeue in priority queue\n", l->ident); | 667 | TRACE_TASK(t, "prioq_mutex %d state after enqeue in priority queue\n", l->ident); |
442 | __dump_lock_info(mutex); | 668 | __dump_prioq_lock_info(mutex); |
443 | 669 | ||
444 | l->nest.hp_waiter_eff_prio = effective_priority(mutex->hp_waiter); | 670 | l->nest.hp_waiter_eff_prio = effective_priority(mutex->hp_waiter); |
445 | binheap_decrease(&l->nest.hp_binheap_node, | 671 | binheap_decrease(&l->nest.hp_binheap_node, |
446 | &tsk_rt(owner)->hp_blocked_tasks); | 672 | &tsk_rt(owner)->hp_blocked_tasks); |
@@ -471,8 +697,8 @@ int prioq_mutex_lock(struct litmus_lock* l) | |||
471 | TRACE_TASK(t, "no change in hp_waiter.\n"); | 697 | TRACE_TASK(t, "no change in hp_waiter.\n"); |
472 | 698 | ||
473 | TRACE_TASK(t, "prioq_mutex %d state after enqeue in priority queue\n", l->ident); | 699 | TRACE_TASK(t, "prioq_mutex %d state after enqeue in priority queue\n", l->ident); |
474 | __dump_lock_info(mutex); | 700 | __dump_prioq_lock_info(mutex); |
475 | 701 | ||
476 | unlock_fine_irqrestore(&mutex->lock, flags); | 702 | unlock_fine_irqrestore(&mutex->lock, flags); |
477 | } | 703 | } |
478 | 704 | ||
@@ -525,8 +751,6 @@ int prioq_mutex_unlock(struct litmus_lock* l) | |||
525 | 751 | ||
526 | struct task_struct *old_max_eff_prio; | 752 | struct task_struct *old_max_eff_prio; |
527 | 753 | ||
528 | int wake_up_task = 1; | ||
529 | |||
530 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | 754 | #ifdef CONFIG_LITMUS_DGL_SUPPORT |
531 | dgl_wait_state_t *dgl_wait = NULL; | 755 | dgl_wait_state_t *dgl_wait = NULL; |
532 | raw_spinlock_t *dgl_lock = litmus->get_dgl_spinlock(t); | 756 | raw_spinlock_t *dgl_lock = litmus->get_dgl_spinlock(t); |
@@ -584,14 +808,40 @@ int prioq_mutex_unlock(struct litmus_lock* l) | |||
584 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); | 808 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); |
585 | 809 | ||
586 | 810 | ||
587 | /* check if there are jobs waiting for this resource */ | 811 | |
812 | |||
813 | mutex->owner = NULL; | ||
814 | |||
588 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | 815 | #ifdef CONFIG_LITMUS_DGL_SUPPORT |
589 | __waitqueue_dgl_remove_first(&mutex->wait, &dgl_wait, &next); | 816 | if(waitqueue_active(&mutex->wait)) { |
590 | if(dgl_wait) { | 817 | /* Priority queue-based locks must be _acquired_ atomically under DGLs |
591 | next = dgl_wait->task; | 818 | * in order to avoid deadlock. We leave this lock idle momentarily the |
819 | * DGL waiter can't acquire all locks at once. | ||
820 | */ | ||
821 | wait_queue_t *q = list_entry(mutex->wait.task_list.next, wait_queue_t, task_list); | ||
822 | if(q->func == dgl_wake_up) { | ||
823 | dgl_wait = (dgl_wait_state_t*) q->private; | ||
824 | |||
825 | TRACE_CUR("Checking to see if DGL waiter %s/%d can take its locks\n", | ||
826 | dgl_wait->task->comm, dgl_wait->task->pid); | ||
827 | |||
828 | if(__attempt_atomic_dgl_acquire(l, dgl_wait)) { | ||
829 | /* failed. can't take this lock yet. we remain at head of prioq | ||
830 | * allow hp requests in the future to go ahead of us. */ | ||
831 | select_next_lock_if_primary(l, dgl_wait); | ||
832 | goto out; | ||
833 | } | ||
834 | else { | ||
835 | TRACE_CUR("%s/%d can take its entire DGL atomically.\n", | ||
836 | dgl_wait->task->comm, dgl_wait->task->pid); | ||
837 | } | ||
838 | } | ||
839 | |||
840 | /* remove the first */ | ||
841 | next = __waitqueue_dgl_remove_first(&mutex->wait, &dgl_wait); | ||
592 | } | 842 | } |
593 | #else | 843 | #else |
594 | 844 | /* check if there are jobs waiting for this resource */ | |
595 | next = __waitqueue_remove_first(&mutex->wait); | 845 | next = __waitqueue_remove_first(&mutex->wait); |
596 | #endif | 846 | #endif |
597 | if (next) { | 847 | if (next) { |
@@ -623,14 +873,6 @@ int prioq_mutex_unlock(struct litmus_lock* l) | |||
623 | &tsk_rt(next)->hp_blocked_tasks, | 873 | &tsk_rt(next)->hp_blocked_tasks, |
624 | struct nested_info, hp_binheap_node); | 874 | struct nested_info, hp_binheap_node); |
625 | 875 | ||
626 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
627 | if(dgl_wait) { | ||
628 | select_next_lock_if_primary(l, dgl_wait); | ||
629 | --(dgl_wait->nr_remaining); | ||
630 | wake_up_task = (dgl_wait->nr_remaining == 0); | ||
631 | } | ||
632 | #endif | ||
633 | |||
634 | raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); | 876 | raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); |
635 | } | 877 | } |
636 | else { | 878 | else { |
@@ -678,28 +920,19 @@ int prioq_mutex_unlock(struct litmus_lock* l) | |||
678 | #endif | 920 | #endif |
679 | } | 921 | } |
680 | 922 | ||
681 | if(wake_up_task) { | 923 | TRACE_TASK(next, "waking up since it is no longer blocked.\n"); |
682 | TRACE_TASK(next, "waking up since it is no longer blocked.\n"); | ||
683 | 924 | ||
684 | tsk_rt(next)->blocked_lock = NULL; | 925 | tsk_rt(next)->blocked_lock = NULL; |
685 | mb(); | 926 | mb(); |
686 | 927 | ||
687 | #if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA) | 928 | #if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA) |
688 | // re-enable tracking | 929 | // re-enable tracking |
689 | if(tsk_rt(next)->held_gpus) { | 930 | if(tsk_rt(next)->held_gpus) { |
690 | tsk_rt(next)->suspend_gpu_tracker_on_block = 0; | 931 | tsk_rt(next)->suspend_gpu_tracker_on_block = 0; |
691 | } | 932 | } |
692 | #endif | 933 | #endif |
693 | 934 | ||
694 | wake_up_process(next); | 935 | wake_up_process(next); |
695 | } | ||
696 | else { | ||
697 | TRACE_TASK(next, "is still blocked.\n"); | ||
698 | } | ||
699 | } | ||
700 | else { | ||
701 | /* becomes available */ | ||
702 | mutex->owner = NULL; | ||
703 | } | 936 | } |
704 | 937 | ||
705 | unlock_fine_irqrestore(&mutex->lock, flags); | 938 | unlock_fine_irqrestore(&mutex->lock, flags); |
@@ -726,14 +959,34 @@ void prioq_mutex_propagate_increase_inheritance(struct litmus_lock* l, | |||
726 | lock_fine(&mutex->lock); | 959 | lock_fine(&mutex->lock); |
727 | unlock_fine(to_unlock); | 960 | unlock_fine(to_unlock); |
728 | 961 | ||
962 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
963 | { | ||
964 | int woke_up = __prioq_dgl_increase_pos(mutex, t); | ||
965 | if (woke_up) { | ||
966 | /* t got the DGL. it is not blocked anywhere. just return. */ | ||
967 | unlock_fine_irqrestore(&mutex->lock, irqflags); | ||
968 | return; | ||
969 | } | ||
970 | } | ||
971 | #else | ||
729 | __prioq_increase_pos(mutex, t); | 972 | __prioq_increase_pos(mutex, t); |
730 | 973 | #endif | |
974 | |||
731 | if(tsk_rt(t)->blocked_lock == l) { // prevent race on tsk_rt(t)->blocked | 975 | if(tsk_rt(t)->blocked_lock == l) { // prevent race on tsk_rt(t)->blocked |
732 | struct task_struct *owner = mutex->owner; | 976 | struct task_struct *owner = mutex->owner; |
733 | 977 | ||
734 | struct task_struct *old_max_eff_prio; | 978 | struct task_struct *old_max_eff_prio; |
735 | struct task_struct *new_max_eff_prio; | 979 | struct task_struct *new_max_eff_prio; |
736 | 980 | ||
981 | if (!owner) { | ||
982 | TRACE_TASK(t, "Owner on PRIOQ lock %d is null. Don't propagate.\n", l->ident); | ||
983 | if(t == mutex->hp_waiter) { | ||
984 | // reflect the changed prio. | ||
985 | l->nest.hp_waiter_eff_prio = effective_priority(mutex->hp_waiter); | ||
986 | } | ||
987 | return; | ||
988 | } | ||
989 | |||
737 | raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); | 990 | raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); |
738 | 991 | ||
739 | old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | 992 | old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); |
@@ -741,17 +994,17 @@ void prioq_mutex_propagate_increase_inheritance(struct litmus_lock* l, | |||
741 | if((t != mutex->hp_waiter) && litmus->compare(t, mutex->hp_waiter)) { | 994 | if((t != mutex->hp_waiter) && litmus->compare(t, mutex->hp_waiter)) { |
742 | TRACE_TASK(t, "is new highest-prio waiter by propagation.\n"); | 995 | TRACE_TASK(t, "is new highest-prio waiter by propagation.\n"); |
743 | mutex->hp_waiter = t; | 996 | mutex->hp_waiter = t; |
744 | 997 | ||
745 | TRACE_TASK(t, "prioq_mutex %d state after prio increase in priority queue\n", l->ident); | 998 | TRACE_TASK(t, "prioq_mutex %d state after prio increase in priority queue\n", l->ident); |
746 | __dump_lock_info(mutex); | 999 | __dump_prioq_lock_info(mutex); |
747 | } | 1000 | } |
748 | else { | 1001 | else { |
749 | TRACE_TASK(t, "prioq_mutex %d state after prio increase in priority queue\n", l->ident); | 1002 | TRACE_TASK(t, "prioq_mutex %d state after prio increase in priority queue\n", l->ident); |
750 | __dump_lock_info(mutex); | 1003 | __dump_prioq_lock_info(mutex); |
751 | } | 1004 | } |
752 | 1005 | ||
753 | if(t == mutex->hp_waiter) { | 1006 | if(t == mutex->hp_waiter) { |
754 | // reflect the decreased priority in the heap node. | 1007 | // reflect the increased priority in the heap node. |
755 | l->nest.hp_waiter_eff_prio = effective_priority(mutex->hp_waiter); | 1008 | l->nest.hp_waiter_eff_prio = effective_priority(mutex->hp_waiter); |
756 | 1009 | ||
757 | BUG_ON(!binheap_is_in_heap(&l->nest.hp_binheap_node)); | 1010 | BUG_ON(!binheap_is_in_heap(&l->nest.hp_binheap_node)); |
@@ -791,10 +1044,10 @@ void prioq_mutex_propagate_increase_inheritance(struct litmus_lock* l, | |||
791 | } | 1044 | } |
792 | else { | 1045 | else { |
793 | struct litmus_lock *still_blocked; | 1046 | struct litmus_lock *still_blocked; |
794 | 1047 | ||
795 | TRACE_TASK(t, "prioq_mutex %d state after prio increase in priority queue\n", l->ident); | 1048 | TRACE_TASK(t, "prioq_mutex %d state after prio increase in priority queue\n", l->ident); |
796 | __dump_lock_info(mutex); | 1049 | __dump_prioq_lock_info(mutex); |
797 | 1050 | ||
798 | still_blocked = tsk_rt(t)->blocked_lock; | 1051 | still_blocked = tsk_rt(t)->blocked_lock; |
799 | 1052 | ||
800 | TRACE_TASK(t, "is not blocked on lock %d.\n", l->ident); | 1053 | TRACE_TASK(t, "is not blocked on lock %d.\n", l->ident); |
@@ -836,8 +1089,12 @@ void prioq_mutex_propagate_decrease_inheritance(struct litmus_lock* l, | |||
836 | lock_fine(&mutex->lock); | 1089 | lock_fine(&mutex->lock); |
837 | unlock_fine(to_unlock); | 1090 | unlock_fine(to_unlock); |
838 | 1091 | ||
1092 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
1093 | __prioq_dgl_decrease_pos(mutex, t); | ||
1094 | #else | ||
839 | __prioq_decrease_pos(mutex, t); | 1095 | __prioq_decrease_pos(mutex, t); |
840 | 1096 | #endif | |
1097 | |||
841 | if(tsk_rt(t)->blocked_lock == l) { // prevent race on tsk_rt(t)->blocked | 1098 | if(tsk_rt(t)->blocked_lock == l) { // prevent race on tsk_rt(t)->blocked |
842 | if(t == mutex->hp_waiter) { | 1099 | if(t == mutex->hp_waiter) { |
843 | struct task_struct *owner = mutex->owner; | 1100 | struct task_struct *owner = mutex->owner; |
@@ -845,16 +1102,23 @@ void prioq_mutex_propagate_decrease_inheritance(struct litmus_lock* l, | |||
845 | struct task_struct *old_max_eff_prio; | 1102 | struct task_struct *old_max_eff_prio; |
846 | struct task_struct *new_max_eff_prio; | 1103 | struct task_struct *new_max_eff_prio; |
847 | 1104 | ||
1105 | if (!owner) { | ||
1106 | TRACE_TASK(t, "Owner on PRIOQ lock %d is null. Don't propagate.\n", l->ident); | ||
1107 | // reflect the changed prio. | ||
1108 | l->nest.hp_waiter_eff_prio = effective_priority(mutex->hp_waiter); | ||
1109 | return; | ||
1110 | } | ||
1111 | |||
848 | raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); | 1112 | raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); |
849 | 1113 | ||
850 | old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | 1114 | old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); |
851 | 1115 | ||
852 | binheap_delete(&l->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks); | 1116 | binheap_delete(&l->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks); |
853 | mutex->hp_waiter = __prioq_mutex_find_hp_waiter(mutex, NULL); | 1117 | mutex->hp_waiter = __prioq_mutex_find_hp_waiter(mutex, NULL); |
854 | 1118 | ||
855 | TRACE_TASK(t, "prioq_mutex %d state after prio decrease in priority queue\n", l->ident); | 1119 | TRACE_TASK(t, "prioq_mutex %d state after prio decrease in priority queue\n", l->ident); |
856 | __dump_lock_info(mutex); | 1120 | __dump_prioq_lock_info(mutex); |
857 | 1121 | ||
858 | l->nest.hp_waiter_eff_prio = (mutex->hp_waiter) ? | 1122 | l->nest.hp_waiter_eff_prio = (mutex->hp_waiter) ? |
859 | effective_priority(mutex->hp_waiter) : NULL; | 1123 | effective_priority(mutex->hp_waiter) : NULL; |
860 | binheap_add(&l->nest.hp_binheap_node, | 1124 | binheap_add(&l->nest.hp_binheap_node, |
@@ -904,18 +1168,18 @@ void prioq_mutex_propagate_decrease_inheritance(struct litmus_lock* l, | |||
904 | } | 1168 | } |
905 | else { | 1169 | else { |
906 | TRACE_TASK(t, "prioq_mutex %d state after prio decrease in priority queue\n", l->ident); | 1170 | TRACE_TASK(t, "prioq_mutex %d state after prio decrease in priority queue\n", l->ident); |
907 | __dump_lock_info(mutex); | 1171 | __dump_prioq_lock_info(mutex); |
908 | 1172 | ||
909 | TRACE_TASK(t, "is not hp_waiter. No propagation.\n"); | 1173 | TRACE_TASK(t, "is not hp_waiter. No propagation.\n"); |
910 | unlock_fine_irqrestore(&mutex->lock, irqflags); | 1174 | unlock_fine_irqrestore(&mutex->lock, irqflags); |
911 | } | 1175 | } |
912 | } | 1176 | } |
913 | else { | 1177 | else { |
914 | struct litmus_lock *still_blocked; | 1178 | struct litmus_lock *still_blocked; |
915 | 1179 | ||
916 | TRACE_TASK(t, "prioq_mutex %d state after prio decrease in priority queue\n", l->ident); | 1180 | TRACE_TASK(t, "prioq_mutex %d state after prio decrease in priority queue\n", l->ident); |
917 | __dump_lock_info(mutex); | 1181 | __dump_prioq_lock_info(mutex); |
918 | 1182 | ||
919 | still_blocked = tsk_rt(t)->blocked_lock; | 1183 | still_blocked = tsk_rt(t)->blocked_lock; |
920 | 1184 | ||
921 | TRACE_TASK(t, "is not blocked on lock %d.\n", l->ident); | 1185 | TRACE_TASK(t, "is not blocked on lock %d.\n", l->ident); |
@@ -1153,8 +1417,7 @@ struct litmus_lock* prioq_mutex_new(struct litmus_lock_ops* ops) | |||
1153 | #endif | 1417 | #endif |
1154 | 1418 | ||
1155 | ((struct litmus_lock*)mutex)->nest.hp_waiter_ptr = &mutex->hp_waiter; | 1419 | ((struct litmus_lock*)mutex)->nest.hp_waiter_ptr = &mutex->hp_waiter; |
1156 | ((struct litmus_lock*)mutex)->nest.owner_ptr = &mutex->owner; | 1420 | |
1157 | |||
1158 | ((struct litmus_lock*)mutex)->proc = &prioq_proc_ops; | 1421 | ((struct litmus_lock*)mutex)->proc = &prioq_proc_ops; |
1159 | 1422 | ||
1160 | return &mutex->litmus_lock; | 1423 | return &mutex->litmus_lock; |