diff options
Diffstat (limited to 'litmus/prioq_lock.c')
-rw-r--r-- | litmus/prioq_lock.c | 151 |
1 files changed, 76 insertions, 75 deletions
diff --git a/litmus/prioq_lock.c b/litmus/prioq_lock.c index faf8c15df542..142f56fe9099 100644 --- a/litmus/prioq_lock.c +++ b/litmus/prioq_lock.c | |||
@@ -165,12 +165,12 @@ static struct task_struct* __prioq_mutex_find_hp_waiter(struct prioq_mutex *mute | |||
165 | wait_queue_t *q; | 165 | wait_queue_t *q; |
166 | struct list_head *pos; | 166 | struct list_head *pos; |
167 | struct task_struct *queued = NULL, *found = NULL; | 167 | struct task_struct *queued = NULL, *found = NULL; |
168 | 168 | ||
169 | /* list in sorted order. higher-prio tasks likely at the front. */ | 169 | /* list in sorted order. higher-prio tasks likely at the front. */ |
170 | list_for_each(pos, &mutex->wait.task_list) { | 170 | list_for_each(pos, &mutex->wait.task_list) { |
171 | q = list_entry(pos, wait_queue_t, task_list); | 171 | q = list_entry(pos, wait_queue_t, task_list); |
172 | queued = get_queued_task(q); | 172 | queued = get_queued_task(q); |
173 | 173 | ||
174 | /* Compare task prios, find high prio task. */ | 174 | /* Compare task prios, find high prio task. */ |
175 | if (queued && | 175 | if (queued && |
176 | (queued != skip) && | 176 | (queued != skip) && |
@@ -195,12 +195,12 @@ static int ___prioq_dgl_acquire_via_inheritance(struct prioq_mutex *mutex, struc | |||
195 | struct litmus_lock *l; | 195 | struct litmus_lock *l; |
196 | BUG_ON(mutex->owner != NULL); | 196 | BUG_ON(mutex->owner != NULL); |
197 | BUG_ON(list_empty(&mutex->wait.task_list)); | 197 | BUG_ON(list_empty(&mutex->wait.task_list)); |
198 | 198 | ||
199 | l = &mutex->litmus_lock; | 199 | l = &mutex->litmus_lock; |
200 | 200 | ||
201 | if (dgl_wait) { | 201 | if (dgl_wait) { |
202 | BUG_ON(t != dgl_wait->task); | 202 | BUG_ON(t != dgl_wait->task); |
203 | 203 | ||
204 | /* we're a part of a DGL */ | 204 | /* we're a part of a DGL */ |
205 | if(__attempt_atomic_dgl_acquire(NULL, dgl_wait)) { | 205 | if(__attempt_atomic_dgl_acquire(NULL, dgl_wait)) { |
206 | TRACE_CUR("%s/%d cannot take entire DGL via inheritance.\n", | 206 | TRACE_CUR("%s/%d cannot take entire DGL via inheritance.\n", |
@@ -218,50 +218,50 @@ static int ___prioq_dgl_acquire_via_inheritance(struct prioq_mutex *mutex, struc | |||
218 | /* we're a regular singular request. we can always take the lock if | 218 | /* we're a regular singular request. we can always take the lock if |
219 | * there is no mutex owner. */ | 219 | * there is no mutex owner. */ |
220 | wait_queue_t *first; | 220 | wait_queue_t *first; |
221 | 221 | ||
222 | TRACE_CUR("%s/%d can take it's singular lock via inheritance!\n", | 222 | TRACE_CUR("%s/%d can take it's singular lock via inheritance!\n", |
223 | t->comm, t->pid); | 223 | t->comm, t->pid); |
224 | 224 | ||
225 | first = list_entry(mutex->wait.task_list.next, wait_queue_t, task_list); | 225 | first = list_entry(mutex->wait.task_list.next, wait_queue_t, task_list); |
226 | 226 | ||
227 | BUG_ON(get_queued_task(first) != t); | 227 | BUG_ON(get_queued_task(first) != t); |
228 | 228 | ||
229 | __remove_wait_queue(&mutex->wait, first); /* remove the blocked task */ | 229 | __remove_wait_queue(&mutex->wait, first); /* remove the blocked task */ |
230 | 230 | ||
231 | /* update/cleanup the state of the lock */ | 231 | /* update/cleanup the state of the lock */ |
232 | 232 | ||
233 | mutex->owner = t; /* take ownership!!! */ | 233 | mutex->owner = t; /* take ownership!!! */ |
234 | 234 | ||
235 | mutex->hp_waiter = __prioq_mutex_find_hp_waiter(mutex, t); | 235 | mutex->hp_waiter = __prioq_mutex_find_hp_waiter(mutex, t); |
236 | l->nest.hp_waiter_eff_prio = (mutex->hp_waiter) ? | 236 | l->nest.hp_waiter_eff_prio = (mutex->hp_waiter) ? |
237 | effective_priority(mutex->hp_waiter) : NULL; | 237 | effective_priority(mutex->hp_waiter) : NULL; |
238 | 238 | ||
239 | if (mutex->hp_waiter) | 239 | if (mutex->hp_waiter) |
240 | TRACE_CUR("%s/%d is new highest-prio waiter\n", | 240 | TRACE_CUR("%s/%d is new highest-prio waiter\n", |
241 | mutex->hp_waiter->comm, mutex->hp_waiter->pid); | 241 | mutex->hp_waiter->comm, mutex->hp_waiter->pid); |
242 | else | 242 | else |
243 | TRACE_CUR("no further waiters\n"); | 243 | TRACE_CUR("no further waiters\n"); |
244 | 244 | ||
245 | raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); | 245 | raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); |
246 | 246 | ||
247 | binheap_add(&l->nest.hp_binheap_node, | 247 | binheap_add(&l->nest.hp_binheap_node, |
248 | &tsk_rt(t)->hp_blocked_tasks, | 248 | &tsk_rt(t)->hp_blocked_tasks, |
249 | struct nested_info, hp_binheap_node); | 249 | struct nested_info, hp_binheap_node); |
250 | 250 | ||
251 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); | 251 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); |
252 | } | 252 | } |
253 | 253 | ||
254 | if (t) { | 254 | if (t) { |
255 | BUG_ON(mutex->owner != t); | 255 | BUG_ON(mutex->owner != t); |
256 | 256 | ||
257 | TRACE_CUR("%s/%d waking up since it is no longer blocked.\n", t->comm, t->pid); | 257 | TRACE_CUR("%s/%d waking up since it is no longer blocked.\n", t->comm, t->pid); |
258 | 258 | ||
259 | tsk_rt(t)->blocked_lock = NULL; | 259 | tsk_rt(t)->blocked_lock = NULL; |
260 | mb(); | 260 | mb(); |
261 | 261 | ||
262 | wake_up_for_lock(t); | 262 | wake_up_for_lock(t); |
263 | } | 263 | } |
264 | 264 | ||
265 | return (t != NULL); | 265 | return (t != NULL); |
266 | } | 266 | } |
267 | 267 | ||
@@ -276,7 +276,7 @@ static int __prioq_dgl_increase_pos(struct prioq_mutex *mutex, struct task_struc | |||
276 | // (1) Increase position for 't' for all locks it is waiting. | 276 | // (1) Increase position for 't' for all locks it is waiting. |
277 | // (2) Check to see if 't' can take the lock, DGL or singular lock. | 277 | // (2) Check to see if 't' can take the lock, DGL or singular lock. |
278 | // (3) If it can, do so and wake up 't'. | 278 | // (3) If it can, do so and wake up 't'. |
279 | 279 | ||
280 | struct list_head *pos; | 280 | struct list_head *pos; |
281 | struct task_struct *new_head; | 281 | struct task_struct *new_head; |
282 | struct task_struct *cur_head = NULL; | 282 | struct task_struct *cur_head = NULL; |
@@ -284,32 +284,32 @@ static int __prioq_dgl_increase_pos(struct prioq_mutex *mutex, struct task_struc | |||
284 | int woke_up = 0; | 284 | int woke_up = 0; |
285 | int found = 0; | 285 | int found = 0; |
286 | 286 | ||
287 | 287 | ||
288 | BUG_ON(list_empty(&mutex->wait.task_list)); | 288 | BUG_ON(list_empty(&mutex->wait.task_list)); |
289 | 289 | ||
290 | /* note the task at the head of the queue */ | 290 | /* note the task at the head of the queue */ |
291 | if(mutex->owner == NULL) { | 291 | if(mutex->owner == NULL) { |
292 | cur_head = get_head_task(mutex); | 292 | cur_head = get_head_task(mutex); |
293 | } | 293 | } |
294 | 294 | ||
295 | list_for_each(pos, &mutex->wait.task_list) { | 295 | list_for_each(pos, &mutex->wait.task_list) { |
296 | dgl_wait_state_t *temp_dgl_state; | 296 | dgl_wait_state_t *temp_dgl_state; |
297 | wait_queue_t *q = list_entry(pos, wait_queue_t, task_list); | 297 | wait_queue_t *q = list_entry(pos, wait_queue_t, task_list); |
298 | struct task_struct *queued = get_queued_task_and_dgl_wait(q, &temp_dgl_state); | 298 | struct task_struct *queued = get_queued_task_and_dgl_wait(q, &temp_dgl_state); |
299 | 299 | ||
300 | if (queued == t) { | 300 | if (queued == t) { |
301 | 301 | ||
302 | TRACE_CUR("found %s/%d in prioq of lock %d\n", | 302 | TRACE_CUR("found %s/%d in prioq of lock %d\n", |
303 | t->comm, t->pid, | 303 | t->comm, t->pid, |
304 | mutex->litmus_lock.ident); | 304 | mutex->litmus_lock.ident); |
305 | 305 | ||
306 | if(temp_dgl_state) { /* it's a DGL request */ | 306 | if(temp_dgl_state) { /* it's a DGL request */ |
307 | int i; | 307 | int i; |
308 | dgl_wait = temp_dgl_state; | 308 | dgl_wait = temp_dgl_state; |
309 | 309 | ||
310 | TRACE_CUR("found request for %s/%d is a DGL request of size %d.\n", | 310 | TRACE_CUR("found request for %s/%d is a DGL request of size %d.\n", |
311 | t->comm, t->pid, dgl_wait->size); | 311 | t->comm, t->pid, dgl_wait->size); |
312 | 312 | ||
313 | // reposition on the other mutexes | 313 | // reposition on the other mutexes |
314 | for(i = 0; i < dgl_wait->size; ++i) { | 314 | for(i = 0; i < dgl_wait->size; ++i) { |
315 | // assume they're all PRIOQ_MUTEX | 315 | // assume they're all PRIOQ_MUTEX |
@@ -318,7 +318,7 @@ static int __prioq_dgl_increase_pos(struct prioq_mutex *mutex, struct task_struc | |||
318 | __prioq_increase_pos(pm, t); | 318 | __prioq_increase_pos(pm, t); |
319 | } | 319 | } |
320 | } | 320 | } |
321 | 321 | ||
322 | // reposition on this mutex | 322 | // reposition on this mutex |
323 | __remove_wait_queue(&mutex->wait, q); | 323 | __remove_wait_queue(&mutex->wait, q); |
324 | __add_wait_queue_sorted(&mutex->wait, q); | 324 | __add_wait_queue_sorted(&mutex->wait, q); |
@@ -326,24 +326,24 @@ static int __prioq_dgl_increase_pos(struct prioq_mutex *mutex, struct task_struc | |||
326 | break; | 326 | break; |
327 | } | 327 | } |
328 | } | 328 | } |
329 | 329 | ||
330 | BUG_ON(!found); | 330 | BUG_ON(!found); |
331 | 331 | ||
332 | if (mutex->owner == NULL) { | 332 | if (mutex->owner == NULL) { |
333 | /* who is the new head? */ | 333 | /* who is the new head? */ |
334 | new_head = get_head_task(mutex); | 334 | new_head = get_head_task(mutex); |
335 | 335 | ||
336 | /* is the prioq mutex idle? */ | 336 | /* is the prioq mutex idle? */ |
337 | if(cur_head != new_head) { | 337 | if(cur_head != new_head) { |
338 | /* the new head might be able to take the lock */ | 338 | /* the new head might be able to take the lock */ |
339 | 339 | ||
340 | BUG_ON(new_head != t); /* the new head must be this task since our prio increased */ | 340 | BUG_ON(new_head != t); /* the new head must be this task since our prio increased */ |
341 | 341 | ||
342 | TRACE_CUR("Change in prioq head on idle prioq mutex %d: old = %s/%d new = %s/%d\n", | 342 | TRACE_CUR("Change in prioq head on idle prioq mutex %d: old = %s/%d new = %s/%d\n", |
343 | mutex->litmus_lock.ident, | 343 | mutex->litmus_lock.ident, |
344 | cur_head->comm, cur_head->pid, | 344 | cur_head->comm, cur_head->pid, |
345 | new_head->comm, new_head->pid); | 345 | new_head->comm, new_head->pid); |
346 | 346 | ||
347 | woke_up = ___prioq_dgl_acquire_via_inheritance(mutex, t, dgl_wait); | 347 | woke_up = ___prioq_dgl_acquire_via_inheritance(mutex, t, dgl_wait); |
348 | } | 348 | } |
349 | } | 349 | } |
@@ -358,9 +358,9 @@ static int ___prioq_dgl_decrease_pos_and_check_acquire(struct prioq_mutex *mutex | |||
358 | struct task_struct *cur_head = NULL; | 358 | struct task_struct *cur_head = NULL; |
359 | int woke_up = 0; | 359 | int woke_up = 0; |
360 | int found = 1; | 360 | int found = 1; |
361 | 361 | ||
362 | BUG_ON(list_empty(&mutex->wait.task_list)); | 362 | BUG_ON(list_empty(&mutex->wait.task_list)); |
363 | 363 | ||
364 | /* find the position of t in mutex's wait q if it's not provided */ | 364 | /* find the position of t in mutex's wait q if it's not provided */ |
365 | if (q == NULL) { | 365 | if (q == NULL) { |
366 | found = 0; | 366 | found = 0; |
@@ -375,21 +375,21 @@ static int ___prioq_dgl_decrease_pos_and_check_acquire(struct prioq_mutex *mutex | |||
375 | 375 | ||
376 | BUG_ON(!q); | 376 | BUG_ON(!q); |
377 | BUG_ON(!found); | 377 | BUG_ON(!found); |
378 | 378 | ||
379 | if(mutex->owner == NULL) { | 379 | if(mutex->owner == NULL) { |
380 | cur_head = get_head_task(mutex); | 380 | cur_head = get_head_task(mutex); |
381 | } | 381 | } |
382 | 382 | ||
383 | // update the position | 383 | // update the position |
384 | __remove_wait_queue(&mutex->wait, q); | 384 | __remove_wait_queue(&mutex->wait, q); |
385 | __add_wait_queue_sorted(&mutex->wait, q); | 385 | __add_wait_queue_sorted(&mutex->wait, q); |
386 | 386 | ||
387 | if(mutex->owner == NULL) { | 387 | if(mutex->owner == NULL) { |
388 | // get a reference to dgl_wait of the new head is a DGL request | 388 | // get a reference to dgl_wait of the new head is a DGL request |
389 | dgl_wait_state_t *dgl_wait; | 389 | dgl_wait_state_t *dgl_wait; |
390 | q = list_entry(mutex->wait.task_list.next, wait_queue_t, task_list); | 390 | q = list_entry(mutex->wait.task_list.next, wait_queue_t, task_list); |
391 | new_head = get_queued_task_and_dgl_wait(q, &dgl_wait); | 391 | new_head = get_queued_task_and_dgl_wait(q, &dgl_wait); |
392 | 392 | ||
393 | /* is the prioq mutex idle and did the head change? */ | 393 | /* is the prioq mutex idle and did the head change? */ |
394 | if(cur_head != new_head) { | 394 | if(cur_head != new_head) { |
395 | /* the new head might be able to take the lock */ | 395 | /* the new head might be able to take the lock */ |
@@ -397,7 +397,7 @@ static int ___prioq_dgl_decrease_pos_and_check_acquire(struct prioq_mutex *mutex | |||
397 | mutex->litmus_lock.ident, | 397 | mutex->litmus_lock.ident, |
398 | cur_head->comm, cur_head->pid, | 398 | cur_head->comm, cur_head->pid, |
399 | new_head->comm, new_head->pid); | 399 | new_head->comm, new_head->pid); |
400 | 400 | ||
401 | woke_up = ___prioq_dgl_acquire_via_inheritance(mutex, new_head, dgl_wait); | 401 | woke_up = ___prioq_dgl_acquire_via_inheritance(mutex, new_head, dgl_wait); |
402 | } | 402 | } |
403 | } | 403 | } |
@@ -410,28 +410,28 @@ static void __prioq_dgl_decrease_pos(struct prioq_mutex *mutex, struct task_stru | |||
410 | // (2) For every lock upon which 't' was the head AND that lock is idle: | 410 | // (2) For every lock upon which 't' was the head AND that lock is idle: |
411 | // (3) Can the new head take the lock? | 411 | // (3) Can the new head take the lock? |
412 | // (4) If it can, do so and wake up the new head. | 412 | // (4) If it can, do so and wake up the new head. |
413 | 413 | ||
414 | struct list_head *pos; | 414 | struct list_head *pos; |
415 | 415 | ||
416 | BUG_ON(list_empty(&mutex->wait.task_list)); | 416 | BUG_ON(list_empty(&mutex->wait.task_list)); |
417 | 417 | ||
418 | list_for_each(pos, &mutex->wait.task_list) { | 418 | list_for_each(pos, &mutex->wait.task_list) { |
419 | dgl_wait_state_t *dgl_wait; | 419 | dgl_wait_state_t *dgl_wait; |
420 | wait_queue_t *q = list_entry(pos, wait_queue_t, task_list); | 420 | wait_queue_t *q = list_entry(pos, wait_queue_t, task_list); |
421 | struct task_struct *queued = get_queued_task_and_dgl_wait(q, &dgl_wait); | 421 | struct task_struct *queued = get_queued_task_and_dgl_wait(q, &dgl_wait); |
422 | 422 | ||
423 | if (queued == t) { | 423 | if (queued == t) { |
424 | TRACE_CUR("found %s/%d in prioq of lock %d\n", | 424 | TRACE_CUR("found %s/%d in prioq of lock %d\n", |
425 | t->comm, t->pid, | 425 | t->comm, t->pid, |
426 | mutex->litmus_lock.ident); | 426 | mutex->litmus_lock.ident); |
427 | 427 | ||
428 | if (dgl_wait) { | 428 | if (dgl_wait) { |
429 | // reposition on all mutexes and check for wakeup | 429 | // reposition on all mutexes and check for wakeup |
430 | int i; | 430 | int i; |
431 | 431 | ||
432 | TRACE_CUR("found request for %s/%d is a DGL request of size %d.\n", | 432 | TRACE_CUR("found request for %s/%d is a DGL request of size %d.\n", |
433 | t->comm, t->pid, dgl_wait->size); | 433 | t->comm, t->pid, dgl_wait->size); |
434 | 434 | ||
435 | for(i = 0; i < dgl_wait->size; ++i) { | 435 | for(i = 0; i < dgl_wait->size; ++i) { |
436 | // assume they're all PRIOQ_MUTEX | 436 | // assume they're all PRIOQ_MUTEX |
437 | struct prioq_mutex *pm = (struct prioq_mutex *) dgl_wait->locks[i]; | 437 | struct prioq_mutex *pm = (struct prioq_mutex *) dgl_wait->locks[i]; |
@@ -442,12 +442,12 @@ static void __prioq_dgl_decrease_pos(struct prioq_mutex *mutex, struct task_stru | |||
442 | } | 442 | } |
443 | } | 443 | } |
444 | else { | 444 | else { |
445 | ___prioq_dgl_decrease_pos_and_check_acquire(mutex, t, q); | 445 | ___prioq_dgl_decrease_pos_and_check_acquire(mutex, t, q); |
446 | } | 446 | } |
447 | return; | 447 | return; |
448 | } | 448 | } |
449 | } | 449 | } |
450 | 450 | ||
451 | BUG(); | 451 | BUG(); |
452 | } | 452 | } |
453 | 453 | ||
@@ -481,7 +481,7 @@ int prioq_mutex_dgl_lock(struct litmus_lock *l, dgl_wait_state_t* dgl_wait, | |||
481 | 481 | ||
482 | init_dgl_waitqueue_entry(wq_node, dgl_wait); | 482 | init_dgl_waitqueue_entry(wq_node, dgl_wait); |
483 | 483 | ||
484 | set_task_state(t, TASK_UNINTERRUPTIBLE); | 484 | //set_task_state(t, TASK_UNINTERRUPTIBLE); /* done in do_litmus_dgl_atomic_lock() only if needed */ |
485 | __add_wait_queue_sorted_exclusive(&mutex->wait, wq_node); | 485 | __add_wait_queue_sorted_exclusive(&mutex->wait, wq_node); |
486 | 486 | ||
487 | return acquired_immediatly; | 487 | return acquired_immediatly; |
@@ -494,7 +494,8 @@ void prioq_mutex_enable_priority(struct litmus_lock *l, | |||
494 | struct prioq_mutex *mutex = prioq_mutex_from_lock(l); | 494 | struct prioq_mutex *mutex = prioq_mutex_from_lock(l); |
495 | struct task_struct *t = dgl_wait->task; | 495 | struct task_struct *t = dgl_wait->task; |
496 | struct task_struct *owner = mutex->owner; | 496 | struct task_struct *owner = mutex->owner; |
497 | unsigned long flags = 0; // these are unused under DGL coarse-grain locking | 497 | unsigned long flags; |
498 | local_save_flags(flags); // needed for coarse-grain DGLs? | ||
498 | 499 | ||
499 | /************************************** | 500 | /************************************** |
500 | * This code looks like it supports fine-grain locking, but it does not! | 501 | * This code looks like it supports fine-grain locking, but it does not! |
@@ -597,7 +598,7 @@ static void select_next_lock_if_primary(struct litmus_lock *l, | |||
597 | effective_priority(mutex->hp_waiter) : | 598 | effective_priority(mutex->hp_waiter) : |
598 | NULL; | 599 | NULL; |
599 | 600 | ||
600 | 601 | ||
601 | if (mutex->hp_waiter) | 602 | if (mutex->hp_waiter) |
602 | TRACE_CUR("%s/%d is new highest-prio waiter\n", | 603 | TRACE_CUR("%s/%d is new highest-prio waiter\n", |
603 | mutex->hp_waiter->comm, mutex->hp_waiter->pid); | 604 | mutex->hp_waiter->comm, mutex->hp_waiter->pid); |
@@ -822,30 +823,32 @@ int prioq_mutex_lock(struct litmus_lock* l) | |||
822 | } | 823 | } |
823 | 824 | ||
824 | 825 | ||
825 | |||
826 | int prioq_mutex_unlock(struct litmus_lock* l) | 826 | int prioq_mutex_unlock(struct litmus_lock* l) |
827 | { | 827 | { |
828 | int err = 0; | ||
828 | struct task_struct *t = current, *next = NULL; | 829 | struct task_struct *t = current, *next = NULL; |
830 | struct task_struct *old_max_eff_prio; | ||
829 | struct prioq_mutex *mutex = prioq_mutex_from_lock(l); | 831 | struct prioq_mutex *mutex = prioq_mutex_from_lock(l); |
830 | unsigned long flags; | 832 | unsigned long flags; |
831 | 833 | ||
832 | struct task_struct *old_max_eff_prio; | ||
833 | |||
834 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | 834 | #ifdef CONFIG_LITMUS_DGL_SUPPORT |
835 | raw_spinlock_t *dgl_lock; | ||
835 | dgl_wait_state_t *dgl_wait = NULL; | 836 | dgl_wait_state_t *dgl_wait = NULL; |
836 | raw_spinlock_t *dgl_lock = litmus->get_dgl_spinlock(t); | ||
837 | #endif | 837 | #endif |
838 | 838 | ||
839 | int err = 0; | ||
840 | |||
841 | if (mutex->owner != t) { | 839 | if (mutex->owner != t) { |
842 | err = -EINVAL; | 840 | err = -EINVAL; |
843 | return err; | 841 | return err; |
844 | } | 842 | } |
845 | 843 | ||
844 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
845 | dgl_lock = litmus->get_dgl_spinlock(current); | ||
846 | #endif | ||
847 | |||
846 | lock_global_irqsave(dgl_lock, flags); | 848 | lock_global_irqsave(dgl_lock, flags); |
847 | lock_fine_irqsave(&mutex->lock, flags); | 849 | lock_fine_irqsave(&mutex->lock, flags); |
848 | 850 | ||
851 | |||
849 | raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); | 852 | raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); |
850 | 853 | ||
851 | TRACE_TASK(t, "Freeing lock %d\n", l->ident); | 854 | TRACE_TASK(t, "Freeing lock %d\n", l->ident); |
@@ -855,13 +858,13 @@ int prioq_mutex_unlock(struct litmus_lock* l) | |||
855 | 858 | ||
856 | if(tsk_rt(t)->inh_task){ | 859 | if(tsk_rt(t)->inh_task){ |
857 | struct task_struct *new_max_eff_prio = | 860 | struct task_struct *new_max_eff_prio = |
858 | top_priority(&tsk_rt(t)->hp_blocked_tasks); | 861 | top_priority(&tsk_rt(t)->hp_blocked_tasks); |
859 | 862 | ||
860 | if((new_max_eff_prio == NULL) || | 863 | if((new_max_eff_prio == NULL) || |
861 | /* there was a change in eff prio */ | 864 | /* there was a change in eff prio */ |
862 | ( (new_max_eff_prio != old_max_eff_prio) && | 865 | ( (new_max_eff_prio != old_max_eff_prio) && |
863 | /* and owner had the old eff prio */ | 866 | /* and owner had the old eff prio */ |
864 | (effective_priority(t) == old_max_eff_prio)) ) | 867 | (effective_priority(t) == old_max_eff_prio)) ) |
865 | { | 868 | { |
866 | // old_max_eff_prio > new_max_eff_prio | 869 | // old_max_eff_prio > new_max_eff_prio |
867 | 870 | ||
@@ -888,8 +891,6 @@ int prioq_mutex_unlock(struct litmus_lock* l) | |||
888 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); | 891 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); |
889 | 892 | ||
890 | 893 | ||
891 | |||
892 | |||
893 | mutex->owner = NULL; | 894 | mutex->owner = NULL; |
894 | 895 | ||
895 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | 896 | #ifdef CONFIG_LITMUS_DGL_SUPPORT |
@@ -900,11 +901,11 @@ int prioq_mutex_unlock(struct litmus_lock* l) | |||
900 | */ | 901 | */ |
901 | wait_queue_t *q = list_entry(mutex->wait.task_list.next, wait_queue_t, task_list); | 902 | wait_queue_t *q = list_entry(mutex->wait.task_list.next, wait_queue_t, task_list); |
902 | get_queued_task_and_dgl_wait(q, &dgl_wait); | 903 | get_queued_task_and_dgl_wait(q, &dgl_wait); |
903 | 904 | ||
904 | if (dgl_wait) { | 905 | if (dgl_wait) { |
905 | TRACE_CUR("Checking to see if DGL waiter %s/%d can take its locks\n", | 906 | TRACE_CUR("Checking to see if DGL waiter %s/%d can take its locks\n", |
906 | dgl_wait->task->comm, dgl_wait->task->pid); | 907 | dgl_wait->task->comm, dgl_wait->task->pid); |
907 | 908 | ||
908 | if(__attempt_atomic_dgl_acquire(l, dgl_wait)) { | 909 | if(__attempt_atomic_dgl_acquire(l, dgl_wait)) { |
909 | /* failed. can't take this lock yet. we remain at head of prioq | 910 | /* failed. can't take this lock yet. we remain at head of prioq |
910 | * allow hp requests in the future to go ahead of us. */ | 911 | * allow hp requests in the future to go ahead of us. */ |
@@ -919,7 +920,7 @@ int prioq_mutex_unlock(struct litmus_lock* l) | |||
919 | 920 | ||
920 | /* remove the first */ | 921 | /* remove the first */ |
921 | next = __waitqueue_dgl_remove_first(&mutex->wait, &dgl_wait); | 922 | next = __waitqueue_dgl_remove_first(&mutex->wait, &dgl_wait); |
922 | 923 | ||
923 | BUG_ON(dgl_wait && (next != dgl_wait->task)); | 924 | BUG_ON(dgl_wait && (next != dgl_wait->task)); |
924 | } | 925 | } |
925 | #else | 926 | #else |
@@ -935,7 +936,7 @@ int prioq_mutex_unlock(struct litmus_lock* l) | |||
935 | if (next == mutex->hp_waiter) { | 936 | if (next == mutex->hp_waiter) { |
936 | 937 | ||
937 | TRACE_CUR("%s/%d was highest-prio waiter\n", next->comm, next->pid); | 938 | TRACE_CUR("%s/%d was highest-prio waiter\n", next->comm, next->pid); |
938 | 939 | ||
939 | /* next has the highest priority --- it doesn't need to | 940 | /* next has the highest priority --- it doesn't need to |
940 | * inherit. However, we need to make sure that the | 941 | * inherit. However, we need to make sure that the |
941 | * next-highest priority in the queue is reflected in | 942 | * next-highest priority in the queue is reflected in |
@@ -945,13 +946,13 @@ int prioq_mutex_unlock(struct litmus_lock* l) | |||
945 | effective_priority(mutex->hp_waiter) : | 946 | effective_priority(mutex->hp_waiter) : |
946 | NULL; | 947 | NULL; |
947 | 948 | ||
948 | 949 | ||
949 | if (mutex->hp_waiter) | 950 | if (mutex->hp_waiter) |
950 | TRACE_CUR("%s/%d is new highest-prio waiter\n", | 951 | TRACE_CUR("%s/%d is new highest-prio waiter\n", |
951 | mutex->hp_waiter->comm, mutex->hp_waiter->pid); | 952 | mutex->hp_waiter->comm, mutex->hp_waiter->pid); |
952 | else | 953 | else |
953 | TRACE_CUR("no further waiters\n"); | 954 | TRACE_CUR("no further waiters\n"); |
954 | 955 | ||
955 | 956 | ||
956 | raw_spin_lock(&tsk_rt(next)->hp_blocked_tasks_lock); | 957 | raw_spin_lock(&tsk_rt(next)->hp_blocked_tasks_lock); |
957 | 958 | ||
@@ -1019,8 +1020,8 @@ int prioq_mutex_unlock(struct litmus_lock* l) | |||
1019 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | 1020 | #ifdef CONFIG_LITMUS_DGL_SUPPORT |
1020 | out: | 1021 | out: |
1021 | #endif | 1022 | #endif |
1022 | unlock_global_irqrestore(dgl_lock, flags); | ||
1023 | 1023 | ||
1024 | unlock_global_irqrestore(dgl_lock, flags); | ||
1024 | TRACE_TASK(t, "-- Freed lock %d --\n", l->ident); | 1025 | TRACE_TASK(t, "-- Freed lock %d --\n", l->ident); |
1025 | 1026 | ||
1026 | return err; | 1027 | return err; |