aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/locking
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2014-06-10 19:01:13 -0400
committerThomas Gleixner <tglx@linutronix.de>2014-06-21 16:05:30 -0400
commit358c331f391f3e0432f4f96f25017d12ac8d10b1 (patch)
tree6346e9262b2005bb21483f77c9bd09dace32d0b5 /kernel/locking
parent88f2b4c15e561bb5c28709d666364f273bf54b98 (diff)
rtmutex: Simplify and document try_to_take_rtmutex()
The current implementation of try_to_take_rtmutex() is correct, but requires more than a single brain twist to understand the clever encoded conditionals. Untangle it and document the cases proper. Looks less efficient at the first glance, but actually reduces the binary code size on x8664 by 80 bytes. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/locking')
-rw-r--r--kernel/locking/rtmutex.c133
1 files changed, 88 insertions, 45 deletions
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 50bc93b3552f..39c9f8075e14 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -533,76 +533,119 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
533 * 533 *
534 * Must be called with lock->wait_lock held. 534 * Must be called with lock->wait_lock held.
535 * 535 *
536 * @lock: the lock to be acquired. 536 * @lock: The lock to be acquired.
537 * @task: the task which wants to acquire the lock 537 * @task: The task which wants to acquire the lock
538 * @waiter: the waiter that is queued to the lock's wait list. (could be NULL) 538 * @waiter: The waiter that is queued to the lock's wait list if the
539 * callsite called task_blocked_on_lock(), otherwise NULL
539 */ 540 */
540static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, 541static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
541 struct rt_mutex_waiter *waiter) 542 struct rt_mutex_waiter *waiter)
542{ 543{
544 unsigned long flags;
545
543 /* 546 /*
544 * We have to be careful here if the atomic speedups are 547 * Before testing whether we can acquire @lock, we set the
545 * enabled, such that, when 548 * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all
546 * - no other waiter is on the lock 549 * other tasks which try to modify @lock into the slow path
547 * - the lock has been released since we did the cmpxchg 550 * and they serialize on @lock->wait_lock.
548 * the lock can be released or taken while we are doing the
549 * checks and marking the lock with RT_MUTEX_HAS_WAITERS.
550 * 551 *
551 * The atomic acquire/release aware variant of 552 * The RT_MUTEX_HAS_WAITERS bit can have a transitional state
552 * mark_rt_mutex_waiters uses a cmpxchg loop. After setting 553 * as explained at the top of this file if and only if:
553 * the WAITERS bit, the atomic release / acquire can not
554 * happen anymore and lock->wait_lock protects us from the
555 * non-atomic case.
556 * 554 *
557 * Note, that this might set lock->owner = 555 * - There is a lock owner. The caller must fixup the
558 * RT_MUTEX_HAS_WAITERS in the case the lock is not contended 556 * transient state if it does a trylock or leaves the lock
559 * any more. This is fixed up when we take the ownership. 557 * function due to a signal or timeout.
560 * This is the transitional state explained at the top of this file. 558 *
559 * - @task acquires the lock and there are no other
560 * waiters. This is undone in rt_mutex_set_owner(@task) at
561 * the end of this function.
561 */ 562 */
562 mark_rt_mutex_waiters(lock); 563 mark_rt_mutex_waiters(lock);
563 564
565 /*
566 * If @lock has an owner, give up.
567 */
564 if (rt_mutex_owner(lock)) 568 if (rt_mutex_owner(lock))
565 return 0; 569 return 0;
566 570
567 /* 571 /*
568 * It will get the lock because of one of these conditions: 572 * If @waiter != NULL, @task has already enqueued the waiter
569 * 1) there is no waiter 573 * into @lock waiter list. If @waiter == NULL then this is a
570 * 2) higher priority than waiters 574 * trylock attempt.
571 * 3) it is top waiter
572 */ 575 */
573 if (rt_mutex_has_waiters(lock)) { 576 if (waiter) {
574 if (task->prio >= rt_mutex_top_waiter(lock)->prio) { 577 /*
575 if (!waiter || waiter != rt_mutex_top_waiter(lock)) 578 * If waiter is not the highest priority waiter of
576 return 0; 579 * @lock, give up.
577 } 580 */
578 } 581 if (waiter != rt_mutex_top_waiter(lock))
579 582 return 0;
580 if (waiter || rt_mutex_has_waiters(lock)) {
581 unsigned long flags;
582 struct rt_mutex_waiter *top;
583
584 raw_spin_lock_irqsave(&task->pi_lock, flags);
585 583
586 /* remove the queued waiter. */ 584 /*
587 if (waiter) { 585 * We can acquire the lock. Remove the waiter from the
588 rt_mutex_dequeue(lock, waiter); 586 * lock waiters list.
589 task->pi_blocked_on = NULL; 587 */
590 } 588 rt_mutex_dequeue(lock, waiter);
591 589
590 } else {
592 /* 591 /*
593 * We have to enqueue the top waiter(if it exists) into 592 * If the lock has waiters already we check whether @task is
594 * task->pi_waiters list. 593 * eligible to take over the lock.
594 *
595 * If there are no other waiters, @task can acquire
596 * the lock. @task->pi_blocked_on is NULL, so it does
597 * not need to be dequeued.
595 */ 598 */
596 if (rt_mutex_has_waiters(lock)) { 599 if (rt_mutex_has_waiters(lock)) {
597 top = rt_mutex_top_waiter(lock); 600 /*
598 rt_mutex_enqueue_pi(task, top); 601 * If @task->prio is greater than or equal to
602 * the top waiter priority (kernel view),
603 * @task lost.
604 */
605 if (task->prio >= rt_mutex_top_waiter(lock)->prio)
606 return 0;
607
608 /*
609 * The current top waiter stays enqueued. We
610 * don't have to change anything in the lock
611 * waiters order.
612 */
613 } else {
614 /*
615 * No waiters. Take the lock without the
616 * pi_lock dance.@task->pi_blocked_on is NULL
617 * and we have no waiters to enqueue in @task
618 * pi waiters list.
619 */
620 goto takeit;
599 } 621 }
600 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
601 } 622 }
602 623
624 /*
625 * Clear @task->pi_blocked_on. Requires protection by
626 * @task->pi_lock. Redundant operation for the @waiter == NULL
627 * case, but conditionals are more expensive than a redundant
628 * store.
629 */
630 raw_spin_lock_irqsave(&task->pi_lock, flags);
631 task->pi_blocked_on = NULL;
632 /*
633 * Finish the lock acquisition. @task is the new owner. If
634 * other waiters exist we have to insert the highest priority
635 * waiter into @task->pi_waiters list.
636 */
637 if (rt_mutex_has_waiters(lock))
638 rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock));
639 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
640
641takeit:
603 /* We got the lock. */ 642 /* We got the lock. */
604 debug_rt_mutex_lock(lock); 643 debug_rt_mutex_lock(lock);
605 644
645 /*
646 * This either preserves the RT_MUTEX_HAS_WAITERS bit if there
647 * are still waiters or clears it.
648 */
606 rt_mutex_set_owner(lock, task); 649 rt_mutex_set_owner(lock, task);
607 650
608 rt_mutex_deadlock_account_lock(lock, task); 651 rt_mutex_deadlock_account_lock(lock, task);