diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/rtmutex.c | 240 | ||||
-rw-r--r-- | kernel/rtmutex_common.h | 8 |
2 files changed, 195 insertions, 53 deletions
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c index 69d9cb921ffa..fec77e7e0562 100644 --- a/kernel/rtmutex.c +++ b/kernel/rtmutex.c | |||
@@ -300,7 +300,8 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, | |||
300 | * assigned pending owner [which might not have taken the | 300 | * assigned pending owner [which might not have taken the |
301 | * lock yet]: | 301 | * lock yet]: |
302 | */ | 302 | */ |
303 | static inline int try_to_steal_lock(struct rt_mutex *lock) | 303 | static inline int try_to_steal_lock(struct rt_mutex *lock, |
304 | struct task_struct *task) | ||
304 | { | 305 | { |
305 | struct task_struct *pendowner = rt_mutex_owner(lock); | 306 | struct task_struct *pendowner = rt_mutex_owner(lock); |
306 | struct rt_mutex_waiter *next; | 307 | struct rt_mutex_waiter *next; |
@@ -309,11 +310,11 @@ static inline int try_to_steal_lock(struct rt_mutex *lock) | |||
309 | if (!rt_mutex_owner_pending(lock)) | 310 | if (!rt_mutex_owner_pending(lock)) |
310 | return 0; | 311 | return 0; |
311 | 312 | ||
312 | if (pendowner == current) | 313 | if (pendowner == task) |
313 | return 1; | 314 | return 1; |
314 | 315 | ||
315 | spin_lock_irqsave(&pendowner->pi_lock, flags); | 316 | spin_lock_irqsave(&pendowner->pi_lock, flags); |
316 | if (current->prio >= pendowner->prio) { | 317 | if (task->prio >= pendowner->prio) { |
317 | spin_unlock_irqrestore(&pendowner->pi_lock, flags); | 318 | spin_unlock_irqrestore(&pendowner->pi_lock, flags); |
318 | return 0; | 319 | return 0; |
319 | } | 320 | } |
@@ -338,21 +339,21 @@ static inline int try_to_steal_lock(struct rt_mutex *lock) | |||
338 | * We are going to steal the lock and a waiter was | 339 | * We are going to steal the lock and a waiter was |
339 | * enqueued on the pending owners pi_waiters queue. So | 340 | * enqueued on the pending owners pi_waiters queue. So |
340 | * we have to enqueue this waiter into | 341 | * we have to enqueue this waiter into |
341 | * current->pi_waiters list. This covers the case, | 342 | * task->pi_waiters list. This covers the case, |
342 | * where current is boosted because it holds another | 343 | * where task is boosted because it holds another |
343 | * lock and gets unboosted because the booster is | 344 | * lock and gets unboosted because the booster is |
344 | * interrupted, so we would delay a waiter with higher | 345 | * interrupted, so we would delay a waiter with higher |
345 | * priority as current->normal_prio. | 346 | * priority as task->normal_prio. |
346 | * | 347 | * |
347 | * Note: in the rare case of a SCHED_OTHER task changing | 348 | * Note: in the rare case of a SCHED_OTHER task changing |
348 | * its priority and thus stealing the lock, next->task | 349 | * its priority and thus stealing the lock, next->task |
349 | * might be current: | 350 | * might be task: |
350 | */ | 351 | */ |
351 | if (likely(next->task != current)) { | 352 | if (likely(next->task != task)) { |
352 | spin_lock_irqsave(¤t->pi_lock, flags); | 353 | spin_lock_irqsave(&task->pi_lock, flags); |
353 | plist_add(&next->pi_list_entry, ¤t->pi_waiters); | 354 | plist_add(&next->pi_list_entry, &task->pi_waiters); |
354 | __rt_mutex_adjust_prio(current); | 355 | __rt_mutex_adjust_prio(task); |
355 | spin_unlock_irqrestore(¤t->pi_lock, flags); | 356 | spin_unlock_irqrestore(&task->pi_lock, flags); |
356 | } | 357 | } |
357 | return 1; | 358 | return 1; |
358 | } | 359 | } |
@@ -389,7 +390,7 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock) | |||
389 | */ | 390 | */ |
390 | mark_rt_mutex_waiters(lock); | 391 | mark_rt_mutex_waiters(lock); |
391 | 392 | ||
392 | if (rt_mutex_owner(lock) && !try_to_steal_lock(lock)) | 393 | if (rt_mutex_owner(lock) && !try_to_steal_lock(lock, current)) |
393 | return 0; | 394 | return 0; |
394 | 395 | ||
395 | /* We got the lock. */ | 396 | /* We got the lock. */ |
@@ -411,6 +412,7 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock) | |||
411 | */ | 412 | */ |
412 | static int task_blocks_on_rt_mutex(struct rt_mutex *lock, | 413 | static int task_blocks_on_rt_mutex(struct rt_mutex *lock, |
413 | struct rt_mutex_waiter *waiter, | 414 | struct rt_mutex_waiter *waiter, |
415 | struct task_struct *task, | ||
414 | int detect_deadlock) | 416 | int detect_deadlock) |
415 | { | 417 | { |
416 | struct task_struct *owner = rt_mutex_owner(lock); | 418 | struct task_struct *owner = rt_mutex_owner(lock); |
@@ -418,21 +420,21 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, | |||
418 | unsigned long flags; | 420 | unsigned long flags; |
419 | int chain_walk = 0, res; | 421 | int chain_walk = 0, res; |
420 | 422 | ||
421 | spin_lock_irqsave(¤t->pi_lock, flags); | 423 | spin_lock_irqsave(&task->pi_lock, flags); |
422 | __rt_mutex_adjust_prio(current); | 424 | __rt_mutex_adjust_prio(task); |
423 | waiter->task = current; | 425 | waiter->task = task; |
424 | waiter->lock = lock; | 426 | waiter->lock = lock; |
425 | plist_node_init(&waiter->list_entry, current->prio); | 427 | plist_node_init(&waiter->list_entry, task->prio); |
426 | plist_node_init(&waiter->pi_list_entry, current->prio); | 428 | plist_node_init(&waiter->pi_list_entry, task->prio); |
427 | 429 | ||
428 | /* Get the top priority waiter on the lock */ | 430 | /* Get the top priority waiter on the lock */ |
429 | if (rt_mutex_has_waiters(lock)) | 431 | if (rt_mutex_has_waiters(lock)) |
430 | top_waiter = rt_mutex_top_waiter(lock); | 432 | top_waiter = rt_mutex_top_waiter(lock); |
431 | plist_add(&waiter->list_entry, &lock->wait_list); | 433 | plist_add(&waiter->list_entry, &lock->wait_list); |
432 | 434 | ||
433 | current->pi_blocked_on = waiter; | 435 | task->pi_blocked_on = waiter; |
434 | 436 | ||
435 | spin_unlock_irqrestore(¤t->pi_lock, flags); | 437 | spin_unlock_irqrestore(&task->pi_lock, flags); |
436 | 438 | ||
437 | if (waiter == rt_mutex_top_waiter(lock)) { | 439 | if (waiter == rt_mutex_top_waiter(lock)) { |
438 | spin_lock_irqsave(&owner->pi_lock, flags); | 440 | spin_lock_irqsave(&owner->pi_lock, flags); |
@@ -460,7 +462,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, | |||
460 | spin_unlock(&lock->wait_lock); | 462 | spin_unlock(&lock->wait_lock); |
461 | 463 | ||
462 | res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter, | 464 | res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter, |
463 | current); | 465 | task); |
464 | 466 | ||
465 | spin_lock(&lock->wait_lock); | 467 | spin_lock(&lock->wait_lock); |
466 | 468 | ||
@@ -605,37 +607,25 @@ void rt_mutex_adjust_pi(struct task_struct *task) | |||
605 | rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task); | 607 | rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task); |
606 | } | 608 | } |
607 | 609 | ||
608 | /* | 610 | /** |
609 | * Slow path lock function: | 611 | * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop |
612 | * @lock: the rt_mutex to take | ||
613 | * @state: the state the task should block in (TASK_INTERRUPTIBLE | ||
614 | * or TASK_UNINTERRUPTIBLE) | ||
615 | * @timeout: the pre-initialized and started timer, or NULL for none | ||
616 | * @waiter: the pre-initialized rt_mutex_waiter | ||
617 | * @detect_deadlock: passed to task_blocks_on_rt_mutex | ||
618 | * | ||
619 | * lock->wait_lock must be held by the caller. | ||
610 | */ | 620 | */ |
611 | static int __sched | 621 | static int __sched |
612 | rt_mutex_slowlock(struct rt_mutex *lock, int state, | 622 | __rt_mutex_slowlock(struct rt_mutex *lock, int state, |
613 | struct hrtimer_sleeper *timeout, | 623 | struct hrtimer_sleeper *timeout, |
614 | int detect_deadlock) | 624 | struct rt_mutex_waiter *waiter, |
625 | int detect_deadlock) | ||
615 | { | 626 | { |
616 | struct rt_mutex_waiter waiter; | ||
617 | int ret = 0; | 627 | int ret = 0; |
618 | 628 | ||
619 | debug_rt_mutex_init_waiter(&waiter); | ||
620 | waiter.task = NULL; | ||
621 | |||
622 | spin_lock(&lock->wait_lock); | ||
623 | |||
624 | /* Try to acquire the lock again: */ | ||
625 | if (try_to_take_rt_mutex(lock)) { | ||
626 | spin_unlock(&lock->wait_lock); | ||
627 | return 0; | ||
628 | } | ||
629 | |||
630 | set_current_state(state); | ||
631 | |||
632 | /* Setup the timer, when timeout != NULL */ | ||
633 | if (unlikely(timeout)) { | ||
634 | hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS); | ||
635 | if (!hrtimer_active(&timeout->timer)) | ||
636 | timeout->task = NULL; | ||
637 | } | ||
638 | |||
639 | for (;;) { | 629 | for (;;) { |
640 | /* Try to acquire the lock: */ | 630 | /* Try to acquire the lock: */ |
641 | if (try_to_take_rt_mutex(lock)) | 631 | if (try_to_take_rt_mutex(lock)) |
@@ -656,19 +646,19 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, | |||
656 | } | 646 | } |
657 | 647 | ||
658 | /* | 648 | /* |
659 | * waiter.task is NULL the first time we come here and | 649 | * waiter->task is NULL the first time we come here and |
660 | * when we have been woken up by the previous owner | 650 | * when we have been woken up by the previous owner |
661 | * but the lock got stolen by a higher prio task. | 651 | * but the lock got stolen by a higher prio task. |
662 | */ | 652 | */ |
663 | if (!waiter.task) { | 653 | if (!waiter->task) { |
664 | ret = task_blocks_on_rt_mutex(lock, &waiter, | 654 | ret = task_blocks_on_rt_mutex(lock, waiter, current, |
665 | detect_deadlock); | 655 | detect_deadlock); |
666 | /* | 656 | /* |
667 | * If we got woken up by the owner then start loop | 657 | * If we got woken up by the owner then start loop |
668 | * all over without going into schedule to try | 658 | * all over without going into schedule to try |
669 | * to get the lock now: | 659 | * to get the lock now: |
670 | */ | 660 | */ |
671 | if (unlikely(!waiter.task)) { | 661 | if (unlikely(!waiter->task)) { |
672 | /* | 662 | /* |
673 | * Reset the return value. We might | 663 | * Reset the return value. We might |
674 | * have returned with -EDEADLK and the | 664 | * have returned with -EDEADLK and the |
@@ -684,15 +674,52 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, | |||
684 | 674 | ||
685 | spin_unlock(&lock->wait_lock); | 675 | spin_unlock(&lock->wait_lock); |
686 | 676 | ||
687 | debug_rt_mutex_print_deadlock(&waiter); | 677 | debug_rt_mutex_print_deadlock(waiter); |
688 | 678 | ||
689 | if (waiter.task) | 679 | if (waiter->task) |
690 | schedule_rt_mutex(lock); | 680 | schedule_rt_mutex(lock); |
691 | 681 | ||
692 | spin_lock(&lock->wait_lock); | 682 | spin_lock(&lock->wait_lock); |
693 | set_current_state(state); | 683 | set_current_state(state); |
694 | } | 684 | } |
695 | 685 | ||
686 | return ret; | ||
687 | } | ||
688 | |||
689 | /* | ||
690 | * Slow path lock function: | ||
691 | */ | ||
692 | static int __sched | ||
693 | rt_mutex_slowlock(struct rt_mutex *lock, int state, | ||
694 | struct hrtimer_sleeper *timeout, | ||
695 | int detect_deadlock) | ||
696 | { | ||
697 | struct rt_mutex_waiter waiter; | ||
698 | int ret = 0; | ||
699 | |||
700 | debug_rt_mutex_init_waiter(&waiter); | ||
701 | waiter.task = NULL; | ||
702 | |||
703 | spin_lock(&lock->wait_lock); | ||
704 | |||
705 | /* Try to acquire the lock again: */ | ||
706 | if (try_to_take_rt_mutex(lock)) { | ||
707 | spin_unlock(&lock->wait_lock); | ||
708 | return 0; | ||
709 | } | ||
710 | |||
711 | set_current_state(state); | ||
712 | |||
713 | /* Setup the timer, when timeout != NULL */ | ||
714 | if (unlikely(timeout)) { | ||
715 | hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS); | ||
716 | if (!hrtimer_active(&timeout->timer)) | ||
717 | timeout->task = NULL; | ||
718 | } | ||
719 | |||
720 | ret = __rt_mutex_slowlock(lock, state, timeout, &waiter, | ||
721 | detect_deadlock); | ||
722 | |||
696 | set_current_state(TASK_RUNNING); | 723 | set_current_state(TASK_RUNNING); |
697 | 724 | ||
698 | if (unlikely(waiter.task)) | 725 | if (unlikely(waiter.task)) |
@@ -986,6 +1013,59 @@ void rt_mutex_proxy_unlock(struct rt_mutex *lock, | |||
986 | } | 1013 | } |
987 | 1014 | ||
988 | /** | 1015 | /** |
1016 | * rt_mutex_start_proxy_lock() - Start lock acquisition for another task | ||
1017 | * @lock: the rt_mutex to take | ||
1018 | * @waiter: the pre-initialized rt_mutex_waiter | ||
1019 | * @task: the task to prepare | ||
1020 | * @detect_deadlock: perform deadlock detection (1) or not (0) | ||
1021 | * | ||
1022 | * Returns: | ||
1023 | * 0 - task blocked on lock | ||
1024 | * 1 - acquired the lock for task, caller should wake it up | ||
1025 | * <0 - error | ||
1026 | * | ||
1027 | * Special API call for FUTEX_REQUEUE_PI support. | ||
1028 | */ | ||
1029 | int rt_mutex_start_proxy_lock(struct rt_mutex *lock, | ||
1030 | struct rt_mutex_waiter *waiter, | ||
1031 | struct task_struct *task, int detect_deadlock) | ||
1032 | { | ||
1033 | int ret; | ||
1034 | |||
1035 | spin_lock(&lock->wait_lock); | ||
1036 | |||
1037 | mark_rt_mutex_waiters(lock); | ||
1038 | |||
1039 | if (!rt_mutex_owner(lock) || try_to_steal_lock(lock, task)) { | ||
1040 | /* We got the lock for task. */ | ||
1041 | debug_rt_mutex_lock(lock); | ||
1042 | |||
1043 | rt_mutex_set_owner(lock, task, 0); | ||
1044 | |||
1045 | rt_mutex_deadlock_account_lock(lock, task); | ||
1046 | return 1; | ||
1047 | } | ||
1048 | |||
1049 | ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock); | ||
1050 | |||
1051 | |||
1052 | if (ret && !waiter->task) { | ||
1053 | /* | ||
1054 | * Reset the return value. We might have | ||
1055 | * returned with -EDEADLK and the owner | ||
1056 | * released the lock while we were walking the | ||
1057 | * pi chain. Let the waiter sort it out. | ||
1058 | */ | ||
1059 | ret = 0; | ||
1060 | } | ||
1061 | spin_unlock(&lock->wait_lock); | ||
1062 | |||
1063 | debug_rt_mutex_print_deadlock(waiter); | ||
1064 | |||
1065 | return ret; | ||
1066 | } | ||
1067 | |||
1068 | /** | ||
989 | * rt_mutex_next_owner - return the next owner of the lock | 1069 | * rt_mutex_next_owner - return the next owner of the lock |
990 | * | 1070 | * |
991 | * @lock: the rt lock query | 1071 | * @lock: the rt lock query |
@@ -1004,3 +1084,57 @@ struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock) | |||
1004 | 1084 | ||
1005 | return rt_mutex_top_waiter(lock)->task; | 1085 | return rt_mutex_top_waiter(lock)->task; |
1006 | } | 1086 | } |
1087 | |||
1088 | /** | ||
1089 | * rt_mutex_finish_proxy_lock() - Complete lock acquisition | ||
1090 | * @lock: the rt_mutex we were woken on | ||
1091 | * @to: the timeout, null if none. hrtimer should already have | ||
1092 | * been started. | ||
1093 | * @waiter: the pre-initialized rt_mutex_waiter | ||
1094 | * @detect_deadlock: perform deadlock detection (1) or not (0) | ||
1095 | * | ||
1096 | * Complete the lock acquisition started our behalf by another thread. | ||
1097 | * | ||
1098 | * Returns: | ||
1099 | * 0 - success | ||
1100 | * <0 - error, one of -EINTR, -ETIMEDOUT, or -EDEADLK | ||
1101 | * | ||
1102 | * Special API call for PI-futex requeue support | ||
1103 | */ | ||
1104 | int rt_mutex_finish_proxy_lock(struct rt_mutex *lock, | ||
1105 | struct hrtimer_sleeper *to, | ||
1106 | struct rt_mutex_waiter *waiter, | ||
1107 | int detect_deadlock) | ||
1108 | { | ||
1109 | int ret; | ||
1110 | |||
1111 | spin_lock(&lock->wait_lock); | ||
1112 | |||
1113 | set_current_state(TASK_INTERRUPTIBLE); | ||
1114 | |||
1115 | ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter, | ||
1116 | detect_deadlock); | ||
1117 | |||
1118 | set_current_state(TASK_RUNNING); | ||
1119 | |||
1120 | if (unlikely(waiter->task)) | ||
1121 | remove_waiter(lock, waiter); | ||
1122 | |||
1123 | /* | ||
1124 | * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might | ||
1125 | * have to fix that up. | ||
1126 | */ | ||
1127 | fixup_rt_mutex_waiters(lock); | ||
1128 | |||
1129 | spin_unlock(&lock->wait_lock); | ||
1130 | |||
1131 | /* | ||
1132 | * Readjust priority, when we did not get the lock. We might have been | ||
1133 | * the pending owner and boosted. Since we did not take the lock, the | ||
1134 | * PI boost has to go. | ||
1135 | */ | ||
1136 | if (unlikely(ret)) | ||
1137 | rt_mutex_adjust_prio(current); | ||
1138 | |||
1139 | return ret; | ||
1140 | } | ||
diff --git a/kernel/rtmutex_common.h b/kernel/rtmutex_common.h index e124bf5800ea..97a2f81866af 100644 --- a/kernel/rtmutex_common.h +++ b/kernel/rtmutex_common.h | |||
@@ -120,6 +120,14 @@ extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock, | |||
120 | struct task_struct *proxy_owner); | 120 | struct task_struct *proxy_owner); |
121 | extern void rt_mutex_proxy_unlock(struct rt_mutex *lock, | 121 | extern void rt_mutex_proxy_unlock(struct rt_mutex *lock, |
122 | struct task_struct *proxy_owner); | 122 | struct task_struct *proxy_owner); |
123 | extern int rt_mutex_start_proxy_lock(struct rt_mutex *lock, | ||
124 | struct rt_mutex_waiter *waiter, | ||
125 | struct task_struct *task, | ||
126 | int detect_deadlock); | ||
127 | extern int rt_mutex_finish_proxy_lock(struct rt_mutex *lock, | ||
128 | struct hrtimer_sleeper *to, | ||
129 | struct rt_mutex_waiter *waiter, | ||
130 | int detect_deadlock); | ||
123 | 131 | ||
124 | #ifdef CONFIG_DEBUG_RT_MUTEXES | 132 | #ifdef CONFIG_DEBUG_RT_MUTEXES |
125 | # include "rtmutex-debug.h" | 133 | # include "rtmutex-debug.h" |