diff options
| author | Thomas Gleixner <tglx@linutronix.de> | 2014-05-21 23:25:54 -0400 |
|---|---|---|
| committer | Thomas Gleixner <tglx@linutronix.de> | 2014-06-21 16:05:30 -0400 |
| commit | a57594a13a446d1a6ab1dcd48339f799ce586843 (patch) | |
| tree | a68374df7f7de66edce42f10ba014de8a0286f73 /kernel/locking | |
| parent | 2ffa5a5cd2fe792b6399c903d5172adf088d8ff7 (diff) | |
rtmutex: Clarify the boost/deboost part
Add a separate local variable for the boost/deboost logic to make the
code more readable. Add comments where appropriate.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/locking')
| -rw-r--r-- | kernel/locking/rtmutex.c | 58 |
1 files changed, 48 insertions, 10 deletions
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index cd517091fb21..3e9a75991e83 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c | |||
| @@ -345,9 +345,10 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, | |||
| 345 | struct rt_mutex_waiter *orig_waiter, | 345 | struct rt_mutex_waiter *orig_waiter, |
| 346 | struct task_struct *top_task) | 346 | struct task_struct *top_task) |
| 347 | { | 347 | { |
| 348 | struct rt_mutex *lock; | ||
| 349 | struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter; | 348 | struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter; |
| 349 | struct rt_mutex_waiter *prerequeue_top_waiter; | ||
| 350 | int detect_deadlock, ret = 0, depth = 0; | 350 | int detect_deadlock, ret = 0, depth = 0; |
| 351 | struct rt_mutex *lock; | ||
| 351 | unsigned long flags; | 352 | unsigned long flags; |
| 352 | 353 | ||
| 353 | detect_deadlock = debug_rt_mutex_detect_deadlock(orig_waiter, | 354 | detect_deadlock = debug_rt_mutex_detect_deadlock(orig_waiter, |
| @@ -454,9 +455,14 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, | |||
| 454 | goto out_unlock_pi; | 455 | goto out_unlock_pi; |
| 455 | } | 456 | } |
| 456 | 457 | ||
| 457 | top_waiter = rt_mutex_top_waiter(lock); | 458 | /* |
| 459 | * Store the current top waiter before doing the requeue | ||
| 460 | * operation on @lock. We need it for the boost/deboost | ||
| 461 | * decision below. | ||
| 462 | */ | ||
| 463 | prerequeue_top_waiter = rt_mutex_top_waiter(lock); | ||
| 458 | 464 | ||
| 459 | /* Requeue the waiter */ | 465 | /* Requeue the waiter in the lock waiter list. */ |
| 460 | rt_mutex_dequeue(lock, waiter); | 466 | rt_mutex_dequeue(lock, waiter); |
| 461 | waiter->prio = task->prio; | 467 | waiter->prio = task->prio; |
| 462 | rt_mutex_enqueue(lock, waiter); | 468 | rt_mutex_enqueue(lock, waiter); |
| @@ -465,35 +471,58 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, | |||
| 465 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); | 471 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
| 466 | put_task_struct(task); | 472 | put_task_struct(task); |
| 467 | 473 | ||
| 474 | /* | ||
| 475 | * We must abort the chain walk if there is no lock owner even | ||
| 476 | * in the dead lock detection case, as we have nothing to | ||
| 477 | * follow here. This is the end of the chain we are walking. | ||
| 478 | */ | ||
| 468 | if (!rt_mutex_owner(lock)) { | 479 | if (!rt_mutex_owner(lock)) { |
| 469 | /* | 480 | /* |
| 470 | * If the requeue above changed the top waiter, then we need | 481 | * If the requeue above changed the top waiter, then we need |
| 471 | * to wake the new top waiter up to try to get the lock. | 482 | * to wake the new top waiter up to try to get the lock. |
| 472 | */ | 483 | */ |
| 473 | 484 | if (prerequeue_top_waiter != rt_mutex_top_waiter(lock)) | |
| 474 | if (top_waiter != rt_mutex_top_waiter(lock)) | ||
| 475 | wake_up_process(rt_mutex_top_waiter(lock)->task); | 485 | wake_up_process(rt_mutex_top_waiter(lock)->task); |
| 476 | raw_spin_unlock(&lock->wait_lock); | 486 | raw_spin_unlock(&lock->wait_lock); |
| 477 | return 0; | 487 | return 0; |
| 478 | } | 488 | } |
| 479 | 489 | ||
| 480 | /* Grab the next task */ | 490 | /* Grab the next task, i.e. the owner of @lock */ |
| 481 | task = rt_mutex_owner(lock); | 491 | task = rt_mutex_owner(lock); |
| 482 | get_task_struct(task); | 492 | get_task_struct(task); |
| 483 | raw_spin_lock_irqsave(&task->pi_lock, flags); | 493 | raw_spin_lock_irqsave(&task->pi_lock, flags); |
| 484 | 494 | ||
| 485 | if (waiter == rt_mutex_top_waiter(lock)) { | 495 | if (waiter == rt_mutex_top_waiter(lock)) { |
| 486 | /* Boost the owner */ | 496 | /* |
| 487 | rt_mutex_dequeue_pi(task, top_waiter); | 497 | * The waiter became the new top (highest priority) |
| 498 | * waiter on the lock. Replace the previous top waiter | ||
| 499 | * in the owner tasks pi waiters list with this waiter | ||
| 500 | * and adjust the priority of the owner. | ||
| 501 | */ | ||
| 502 | rt_mutex_dequeue_pi(task, prerequeue_top_waiter); | ||
| 488 | rt_mutex_enqueue_pi(task, waiter); | 503 | rt_mutex_enqueue_pi(task, waiter); |
| 489 | __rt_mutex_adjust_prio(task); | 504 | __rt_mutex_adjust_prio(task); |
| 490 | 505 | ||
| 491 | } else if (top_waiter == waiter) { | 506 | } else if (prerequeue_top_waiter == waiter) { |
| 492 | /* Deboost the owner */ | 507 | /* |
| 508 | * The waiter was the top waiter on the lock, but is | ||
| 509 | * no longer the top prority waiter. Replace waiter in | ||
| 510 | * the owner tasks pi waiters list with the new top | ||
| 511 | * (highest priority) waiter and adjust the priority | ||
| 512 | * of the owner. | ||
| 513 | * The new top waiter is stored in @waiter so that | ||
| 514 | * @waiter == @top_waiter evaluates to true below and | ||
| 515 | * we continue to deboost the rest of the chain. | ||
| 516 | */ | ||
| 493 | rt_mutex_dequeue_pi(task, waiter); | 517 | rt_mutex_dequeue_pi(task, waiter); |
| 494 | waiter = rt_mutex_top_waiter(lock); | 518 | waiter = rt_mutex_top_waiter(lock); |
| 495 | rt_mutex_enqueue_pi(task, waiter); | 519 | rt_mutex_enqueue_pi(task, waiter); |
| 496 | __rt_mutex_adjust_prio(task); | 520 | __rt_mutex_adjust_prio(task); |
| 521 | } else { | ||
| 522 | /* | ||
| 523 | * Nothing changed. No need to do any priority | ||
| 524 | * adjustment. | ||
| 525 | */ | ||
| 497 | } | 526 | } |
| 498 | 527 | ||
| 499 | /* | 528 | /* |
| @@ -506,6 +535,10 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, | |||
| 506 | 535 | ||
| 507 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); | 536 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
| 508 | 537 | ||
| 538 | /* | ||
| 539 | * Store the top waiter of @lock for the end of chain walk | ||
| 540 | * decision below. | ||
| 541 | */ | ||
| 509 | top_waiter = rt_mutex_top_waiter(lock); | 542 | top_waiter = rt_mutex_top_waiter(lock); |
| 510 | raw_spin_unlock(&lock->wait_lock); | 543 | raw_spin_unlock(&lock->wait_lock); |
| 511 | 544 | ||
| @@ -516,6 +549,11 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, | |||
| 516 | if (!next_lock) | 549 | if (!next_lock) |
| 517 | goto out_put_task; | 550 | goto out_put_task; |
| 518 | 551 | ||
| 552 | /* | ||
| 553 | * If the current waiter is not the top waiter on the lock, | ||
| 554 | * then we can stop the chain walk here if we are not in full | ||
| 555 | * deadlock detection mode. | ||
| 556 | */ | ||
| 519 | if (!detect_deadlock && waiter != top_waiter) | 557 | if (!detect_deadlock && waiter != top_waiter) |
| 520 | goto out_put_task; | 558 | goto out_put_task; |
| 521 | 559 | ||
