aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/locking
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2014-05-21 23:25:57 -0400
committerThomas Gleixner <tglx@linutronix.de>2014-06-21 16:05:31 -0400
commit67792e2cabadbadd1a93f6790fa7bcbd47eca7c3 (patch)
treeda0fbc4068e2b6ac73006f58878b036bf607d918 /kernel/locking
parent8930ed80f970a90a795239e7415c9b0e6f964649 (diff)
rtmutex: Avoid pointless requeueing in the deadlock detection chain walk
In case the dead lock detector is enabled we follow the lock chain to the end in rt_mutex_adjust_prio_chain, even if we could stop earlier due to the priority/waiter constellation. But once we are no longer the top priority waiter in a certain step or the task holding the lock has already the same priority then there is no point in dequeing and enqueing along the lock chain as there is no change at all. So stop the queueing at this point. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Reviewed-by: Steven Rostedt <rostedt@goodmis.org> Cc: Lai Jiangshan <laijs@cn.fujitsu.com> Link: http://lkml.kernel.org/r/20140522031950.280830190@linutronix.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/locking')
-rw-r--r--kernel/locking/rtmutex.c77
1 files changed, 70 insertions, 7 deletions
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index c6ffdaa21b67..a0ea2a141b3b 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -419,6 +419,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
419 struct rt_mutex *lock; 419 struct rt_mutex *lock;
420 bool detect_deadlock; 420 bool detect_deadlock;
421 unsigned long flags; 421 unsigned long flags;
422 bool requeue = true;
422 423
423 detect_deadlock = rt_mutex_cond_detect_deadlock(orig_waiter, chwalk); 424 detect_deadlock = rt_mutex_cond_detect_deadlock(orig_waiter, chwalk);
424 425
@@ -508,18 +509,31 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
508 goto out_unlock_pi; 509 goto out_unlock_pi;
509 /* 510 /*
510 * If deadlock detection is off, we stop here if we 511 * If deadlock detection is off, we stop here if we
511 * are not the top pi waiter of the task. 512 * are not the top pi waiter of the task. If deadlock
513 * detection is enabled we continue, but stop the
514 * requeueing in the chain walk.
512 */ 515 */
513 if (!detect_deadlock && top_waiter != task_top_pi_waiter(task)) 516 if (top_waiter != task_top_pi_waiter(task)) {
514 goto out_unlock_pi; 517 if (!detect_deadlock)
518 goto out_unlock_pi;
519 else
520 requeue = false;
521 }
515 } 522 }
516 523
517 /* 524 /*
518 * When deadlock detection is off then we check, if further 525 * If the waiter priority is the same as the task priority
519 * priority adjustment is necessary. 526 * then there is no further priority adjustment necessary. If
527 * deadlock detection is off, we stop the chain walk. If its
528 * enabled we continue, but stop the requeueing in the chain
529 * walk.
520 */ 530 */
521 if (!detect_deadlock && waiter->prio == task->prio) 531 if (waiter->prio == task->prio) {
522 goto out_unlock_pi; 532 if (!detect_deadlock)
533 goto out_unlock_pi;
534 else
535 requeue = false;
536 }
523 537
524 /* 538 /*
525 * [4] Get the next lock 539 * [4] Get the next lock
@@ -553,6 +567,55 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
553 } 567 }
554 568
555 /* 569 /*
570 * If we just follow the lock chain for deadlock detection, no
571 * need to do all the requeue operations. To avoid a truckload
572 * of conditionals around the various places below, just do the
573 * minimum chain walk checks.
574 */
575 if (!requeue) {
576 /*
577 * No requeue[7] here. Just release @task [8]
578 */
579 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
580 put_task_struct(task);
581
582 /*
583 * [9] check_exit_conditions_3 protected by lock->wait_lock.
584 * If there is no owner of the lock, end of chain.
585 */
586 if (!rt_mutex_owner(lock)) {
587 raw_spin_unlock(&lock->wait_lock);
588 return 0;
589 }
590
591 /* [10] Grab the next task, i.e. owner of @lock */
592 task = rt_mutex_owner(lock);
593 get_task_struct(task);
594 raw_spin_lock_irqsave(&task->pi_lock, flags);
595
596 /*
597 * No requeue [11] here. We just do deadlock detection.
598 *
599 * [12] Store whether owner is blocked
600 * itself. Decision is made after dropping the locks
601 */
602 next_lock = task_blocked_on_lock(task);
603 /*
604 * Get the top waiter for the next iteration
605 */
606 top_waiter = rt_mutex_top_waiter(lock);
607
608 /* [13] Drop locks */
609 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
610 raw_spin_unlock(&lock->wait_lock);
611
612 /* If owner is not blocked, end of chain. */
613 if (!next_lock)
614 goto out_put_task;
615 goto again;
616 }
617
618 /*
556 * Store the current top waiter before doing the requeue 619 * Store the current top waiter before doing the requeue
557 * operation on @lock. We need it for the boost/deboost 620 * operation on @lock. We need it for the boost/deboost
558 * decision below. 621 * decision below.