diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-06-24 17:46:01 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-06-24 17:46:01 -0400 |
commit | a262948335bc5359b82f0ed5ef35f6e82ca44d16 (patch) | |
tree | ed03ae6d9e95d0f317115906ec79e540806e98f4 /kernel/locking/rtmutex.c | |
parent | e3d8238d7f5c3f539a29f5ac596cd342d847e099 (diff) | |
parent | 9f40a51a35a0e1445cc4873251c3df2631eda294 (diff) |
Merge branch 'sched-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Thomas Gleixner:
"These locking updates depend on the alreay merged sched/core branch:
- Lockless top waiter wakeup for rtmutex (Davidlohr)
- Reduce hash bucket lock contention for PI futexes (Sebastian)
- Documentation update (Davidlohr)"
* 'sched-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
locking/rtmutex: Update stale plist comments
futex: Lower the lock contention on the HB lock during wake up
locking/rtmutex: Implement lockless top-waiter wakeup
Diffstat (limited to 'kernel/locking/rtmutex.c')
-rw-r--r-- | kernel/locking/rtmutex.c | 87 |
1 files changed, 56 insertions, 31 deletions
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 36573e96a477..5674b073473c 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c | |||
@@ -300,7 +300,7 @@ static void __rt_mutex_adjust_prio(struct task_struct *task) | |||
300 | * of task. We do not use the spin_xx_mutex() variants here as we are | 300 | * of task. We do not use the spin_xx_mutex() variants here as we are |
301 | * outside of the debug path.) | 301 | * outside of the debug path.) |
302 | */ | 302 | */ |
303 | static void rt_mutex_adjust_prio(struct task_struct *task) | 303 | void rt_mutex_adjust_prio(struct task_struct *task) |
304 | { | 304 | { |
305 | unsigned long flags; | 305 | unsigned long flags; |
306 | 306 | ||
@@ -624,7 +624,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, | |||
624 | */ | 624 | */ |
625 | prerequeue_top_waiter = rt_mutex_top_waiter(lock); | 625 | prerequeue_top_waiter = rt_mutex_top_waiter(lock); |
626 | 626 | ||
627 | /* [7] Requeue the waiter in the lock waiter list. */ | 627 | /* [7] Requeue the waiter in the lock waiter tree. */ |
628 | rt_mutex_dequeue(lock, waiter); | 628 | rt_mutex_dequeue(lock, waiter); |
629 | waiter->prio = task->prio; | 629 | waiter->prio = task->prio; |
630 | rt_mutex_enqueue(lock, waiter); | 630 | rt_mutex_enqueue(lock, waiter); |
@@ -662,7 +662,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, | |||
662 | /* | 662 | /* |
663 | * The waiter became the new top (highest priority) | 663 | * The waiter became the new top (highest priority) |
664 | * waiter on the lock. Replace the previous top waiter | 664 | * waiter on the lock. Replace the previous top waiter |
665 | * in the owner tasks pi waiters list with this waiter | 665 | * in the owner tasks pi waiters tree with this waiter |
666 | * and adjust the priority of the owner. | 666 | * and adjust the priority of the owner. |
667 | */ | 667 | */ |
668 | rt_mutex_dequeue_pi(task, prerequeue_top_waiter); | 668 | rt_mutex_dequeue_pi(task, prerequeue_top_waiter); |
@@ -673,7 +673,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, | |||
673 | /* | 673 | /* |
674 | * The waiter was the top waiter on the lock, but is | 674 | * The waiter was the top waiter on the lock, but is |
675 | * no longer the top prority waiter. Replace waiter in | 675 | * no longer the top prority waiter. Replace waiter in |
676 | * the owner tasks pi waiters list with the new top | 676 | * the owner tasks pi waiters tree with the new top |
677 | * (highest priority) waiter and adjust the priority | 677 | * (highest priority) waiter and adjust the priority |
678 | * of the owner. | 678 | * of the owner. |
679 | * The new top waiter is stored in @waiter so that | 679 | * The new top waiter is stored in @waiter so that |
@@ -747,7 +747,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, | |||
747 | * | 747 | * |
748 | * @lock: The lock to be acquired. | 748 | * @lock: The lock to be acquired. |
749 | * @task: The task which wants to acquire the lock | 749 | * @task: The task which wants to acquire the lock |
750 | * @waiter: The waiter that is queued to the lock's wait list if the | 750 | * @waiter: The waiter that is queued to the lock's wait tree if the |
751 | * callsite called task_blocked_on_lock(), otherwise NULL | 751 | * callsite called task_blocked_on_lock(), otherwise NULL |
752 | */ | 752 | */ |
753 | static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, | 753 | static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, |
@@ -782,7 +782,7 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, | |||
782 | 782 | ||
783 | /* | 783 | /* |
784 | * If @waiter != NULL, @task has already enqueued the waiter | 784 | * If @waiter != NULL, @task has already enqueued the waiter |
785 | * into @lock waiter list. If @waiter == NULL then this is a | 785 | * into @lock waiter tree. If @waiter == NULL then this is a |
786 | * trylock attempt. | 786 | * trylock attempt. |
787 | */ | 787 | */ |
788 | if (waiter) { | 788 | if (waiter) { |
@@ -795,7 +795,7 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, | |||
795 | 795 | ||
796 | /* | 796 | /* |
797 | * We can acquire the lock. Remove the waiter from the | 797 | * We can acquire the lock. Remove the waiter from the |
798 | * lock waiters list. | 798 | * lock waiters tree. |
799 | */ | 799 | */ |
800 | rt_mutex_dequeue(lock, waiter); | 800 | rt_mutex_dequeue(lock, waiter); |
801 | 801 | ||
@@ -827,7 +827,7 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, | |||
827 | * No waiters. Take the lock without the | 827 | * No waiters. Take the lock without the |
828 | * pi_lock dance.@task->pi_blocked_on is NULL | 828 | * pi_lock dance.@task->pi_blocked_on is NULL |
829 | * and we have no waiters to enqueue in @task | 829 | * and we have no waiters to enqueue in @task |
830 | * pi waiters list. | 830 | * pi waiters tree. |
831 | */ | 831 | */ |
832 | goto takeit; | 832 | goto takeit; |
833 | } | 833 | } |
@@ -844,7 +844,7 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, | |||
844 | /* | 844 | /* |
845 | * Finish the lock acquisition. @task is the new owner. If | 845 | * Finish the lock acquisition. @task is the new owner. If |
846 | * other waiters exist we have to insert the highest priority | 846 | * other waiters exist we have to insert the highest priority |
847 | * waiter into @task->pi_waiters list. | 847 | * waiter into @task->pi_waiters tree. |
848 | */ | 848 | */ |
849 | if (rt_mutex_has_waiters(lock)) | 849 | if (rt_mutex_has_waiters(lock)) |
850 | rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock)); | 850 | rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock)); |
@@ -955,14 +955,13 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, | |||
955 | } | 955 | } |
956 | 956 | ||
957 | /* | 957 | /* |
958 | * Wake up the next waiter on the lock. | 958 | * Remove the top waiter from the current tasks pi waiter tree and |
959 | * | 959 | * queue it up. |
960 | * Remove the top waiter from the current tasks pi waiter list and | ||
961 | * wake it up. | ||
962 | * | 960 | * |
963 | * Called with lock->wait_lock held. | 961 | * Called with lock->wait_lock held. |
964 | */ | 962 | */ |
965 | static void wakeup_next_waiter(struct rt_mutex *lock) | 963 | static void mark_wakeup_next_waiter(struct wake_q_head *wake_q, |
964 | struct rt_mutex *lock) | ||
966 | { | 965 | { |
967 | struct rt_mutex_waiter *waiter; | 966 | struct rt_mutex_waiter *waiter; |
968 | unsigned long flags; | 967 | unsigned long flags; |
@@ -991,12 +990,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock) | |||
991 | 990 | ||
992 | raw_spin_unlock_irqrestore(¤t->pi_lock, flags); | 991 | raw_spin_unlock_irqrestore(¤t->pi_lock, flags); |
993 | 992 | ||
994 | /* | 993 | wake_q_add(wake_q, waiter->task); |
995 | * It's safe to dereference waiter as it cannot go away as | ||
996 | * long as we hold lock->wait_lock. The waiter task needs to | ||
997 | * acquire it in order to dequeue the waiter. | ||
998 | */ | ||
999 | wake_up_process(waiter->task); | ||
1000 | } | 994 | } |
1001 | 995 | ||
1002 | /* | 996 | /* |
@@ -1250,10 +1244,11 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock) | |||
1250 | } | 1244 | } |
1251 | 1245 | ||
1252 | /* | 1246 | /* |
1253 | * Slow path to release a rt-mutex: | 1247 | * Slow path to release a rt-mutex. |
1248 | * Return whether the current task needs to undo a potential priority boosting. | ||
1254 | */ | 1249 | */ |
1255 | static void __sched | 1250 | static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock, |
1256 | rt_mutex_slowunlock(struct rt_mutex *lock) | 1251 | struct wake_q_head *wake_q) |
1257 | { | 1252 | { |
1258 | raw_spin_lock(&lock->wait_lock); | 1253 | raw_spin_lock(&lock->wait_lock); |
1259 | 1254 | ||
@@ -1295,7 +1290,7 @@ rt_mutex_slowunlock(struct rt_mutex *lock) | |||
1295 | while (!rt_mutex_has_waiters(lock)) { | 1290 | while (!rt_mutex_has_waiters(lock)) { |
1296 | /* Drops lock->wait_lock ! */ | 1291 | /* Drops lock->wait_lock ! */ |
1297 | if (unlock_rt_mutex_safe(lock) == true) | 1292 | if (unlock_rt_mutex_safe(lock) == true) |
1298 | return; | 1293 | return false; |
1299 | /* Relock the rtmutex and try again */ | 1294 | /* Relock the rtmutex and try again */ |
1300 | raw_spin_lock(&lock->wait_lock); | 1295 | raw_spin_lock(&lock->wait_lock); |
1301 | } | 1296 | } |
@@ -1303,13 +1298,15 @@ rt_mutex_slowunlock(struct rt_mutex *lock) | |||
1303 | /* | 1298 | /* |
1304 | * The wakeup next waiter path does not suffer from the above | 1299 | * The wakeup next waiter path does not suffer from the above |
1305 | * race. See the comments there. | 1300 | * race. See the comments there. |
1301 | * | ||
1302 | * Queue the next waiter for wakeup once we release the wait_lock. | ||
1306 | */ | 1303 | */ |
1307 | wakeup_next_waiter(lock); | 1304 | mark_wakeup_next_waiter(wake_q, lock); |
1308 | 1305 | ||
1309 | raw_spin_unlock(&lock->wait_lock); | 1306 | raw_spin_unlock(&lock->wait_lock); |
1310 | 1307 | ||
1311 | /* Undo pi boosting if necessary: */ | 1308 | /* check PI boosting */ |
1312 | rt_mutex_adjust_prio(current); | 1309 | return true; |
1313 | } | 1310 | } |
1314 | 1311 | ||
1315 | /* | 1312 | /* |
@@ -1360,12 +1357,23 @@ rt_mutex_fasttrylock(struct rt_mutex *lock, | |||
1360 | 1357 | ||
1361 | static inline void | 1358 | static inline void |
1362 | rt_mutex_fastunlock(struct rt_mutex *lock, | 1359 | rt_mutex_fastunlock(struct rt_mutex *lock, |
1363 | void (*slowfn)(struct rt_mutex *lock)) | 1360 | bool (*slowfn)(struct rt_mutex *lock, |
1361 | struct wake_q_head *wqh)) | ||
1364 | { | 1362 | { |
1365 | if (likely(rt_mutex_cmpxchg(lock, current, NULL))) | 1363 | WAKE_Q(wake_q); |
1364 | |||
1365 | if (likely(rt_mutex_cmpxchg(lock, current, NULL))) { | ||
1366 | rt_mutex_deadlock_account_unlock(current); | 1366 | rt_mutex_deadlock_account_unlock(current); |
1367 | else | 1367 | |
1368 | slowfn(lock); | 1368 | } else { |
1369 | bool deboost = slowfn(lock, &wake_q); | ||
1370 | |||
1371 | wake_up_q(&wake_q); | ||
1372 | |||
1373 | /* Undo pi boosting if necessary: */ | ||
1374 | if (deboost) | ||
1375 | rt_mutex_adjust_prio(current); | ||
1376 | } | ||
1369 | } | 1377 | } |
1370 | 1378 | ||
1371 | /** | 1379 | /** |
@@ -1467,6 +1475,23 @@ void __sched rt_mutex_unlock(struct rt_mutex *lock) | |||
1467 | EXPORT_SYMBOL_GPL(rt_mutex_unlock); | 1475 | EXPORT_SYMBOL_GPL(rt_mutex_unlock); |
1468 | 1476 | ||
1469 | /** | 1477 | /** |
1478 | * rt_mutex_futex_unlock - Futex variant of rt_mutex_unlock | ||
1479 | * @lock: the rt_mutex to be unlocked | ||
1480 | * | ||
1481 | * Returns: true/false indicating whether priority adjustment is | ||
1482 | * required or not. | ||
1483 | */ | ||
1484 | bool __sched rt_mutex_futex_unlock(struct rt_mutex *lock, | ||
1485 | struct wake_q_head *wqh) | ||
1486 | { | ||
1487 | if (likely(rt_mutex_cmpxchg(lock, current, NULL))) { | ||
1488 | rt_mutex_deadlock_account_unlock(current); | ||
1489 | return false; | ||
1490 | } | ||
1491 | return rt_mutex_slowunlock(lock, wqh); | ||
1492 | } | ||
1493 | |||
1494 | /** | ||
1470 | * rt_mutex_destroy - mark a mutex unusable | 1495 | * rt_mutex_destroy - mark a mutex unusable |
1471 | * @lock: the mutex to be destroyed | 1496 | * @lock: the mutex to be destroyed |
1472 | * | 1497 | * |