aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/locking/rtmutex.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/locking/rtmutex.c')
-rw-r--r--kernel/locking/rtmutex.c135
1 files changed, 72 insertions, 63 deletions
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 8251e75dd9c0..3e746607abe5 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -99,13 +99,14 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
99 * 2) Drop lock->wait_lock 99 * 2) Drop lock->wait_lock
100 * 3) Try to unlock the lock with cmpxchg 100 * 3) Try to unlock the lock with cmpxchg
101 */ 101 */
102static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock) 102static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
103 unsigned long flags)
103 __releases(lock->wait_lock) 104 __releases(lock->wait_lock)
104{ 105{
105 struct task_struct *owner = rt_mutex_owner(lock); 106 struct task_struct *owner = rt_mutex_owner(lock);
106 107
107 clear_rt_mutex_waiters(lock); 108 clear_rt_mutex_waiters(lock);
108 raw_spin_unlock(&lock->wait_lock); 109 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
109 /* 110 /*
110 * If a new waiter comes in between the unlock and the cmpxchg 111 * If a new waiter comes in between the unlock and the cmpxchg
111 * we have two situations: 112 * we have two situations:
@@ -147,11 +148,12 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
147/* 148/*
148 * Simple slow path only version: lock->owner is protected by lock->wait_lock. 149 * Simple slow path only version: lock->owner is protected by lock->wait_lock.
149 */ 150 */
150static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock) 151static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
152 unsigned long flags)
151 __releases(lock->wait_lock) 153 __releases(lock->wait_lock)
152{ 154{
153 lock->owner = NULL; 155 lock->owner = NULL;
154 raw_spin_unlock(&lock->wait_lock); 156 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
155 return true; 157 return true;
156} 158}
157#endif 159#endif
@@ -433,7 +435,6 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
433 int ret = 0, depth = 0; 435 int ret = 0, depth = 0;
434 struct rt_mutex *lock; 436 struct rt_mutex *lock;
435 bool detect_deadlock; 437 bool detect_deadlock;
436 unsigned long flags;
437 bool requeue = true; 438 bool requeue = true;
438 439
439 detect_deadlock = rt_mutex_cond_detect_deadlock(orig_waiter, chwalk); 440 detect_deadlock = rt_mutex_cond_detect_deadlock(orig_waiter, chwalk);
@@ -476,7 +477,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
476 /* 477 /*
477 * [1] Task cannot go away as we did a get_task() before ! 478 * [1] Task cannot go away as we did a get_task() before !
478 */ 479 */
479 raw_spin_lock_irqsave(&task->pi_lock, flags); 480 raw_spin_lock_irq(&task->pi_lock);
480 481
481 /* 482 /*
482 * [2] Get the waiter on which @task is blocked on. 483 * [2] Get the waiter on which @task is blocked on.
@@ -560,7 +561,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
560 * operations. 561 * operations.
561 */ 562 */
562 if (!raw_spin_trylock(&lock->wait_lock)) { 563 if (!raw_spin_trylock(&lock->wait_lock)) {
563 raw_spin_unlock_irqrestore(&task->pi_lock, flags); 564 raw_spin_unlock_irq(&task->pi_lock);
564 cpu_relax(); 565 cpu_relax();
565 goto retry; 566 goto retry;
566 } 567 }
@@ -591,7 +592,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
591 /* 592 /*
592 * No requeue[7] here. Just release @task [8] 593 * No requeue[7] here. Just release @task [8]
593 */ 594 */
594 raw_spin_unlock_irqrestore(&task->pi_lock, flags); 595 raw_spin_unlock(&task->pi_lock);
595 put_task_struct(task); 596 put_task_struct(task);
596 597
597 /* 598 /*
@@ -599,14 +600,14 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
599 * If there is no owner of the lock, end of chain. 600 * If there is no owner of the lock, end of chain.
600 */ 601 */
601 if (!rt_mutex_owner(lock)) { 602 if (!rt_mutex_owner(lock)) {
602 raw_spin_unlock(&lock->wait_lock); 603 raw_spin_unlock_irq(&lock->wait_lock);
603 return 0; 604 return 0;
604 } 605 }
605 606
606 /* [10] Grab the next task, i.e. owner of @lock */ 607 /* [10] Grab the next task, i.e. owner of @lock */
607 task = rt_mutex_owner(lock); 608 task = rt_mutex_owner(lock);
608 get_task_struct(task); 609 get_task_struct(task);
609 raw_spin_lock_irqsave(&task->pi_lock, flags); 610 raw_spin_lock(&task->pi_lock);
610 611
611 /* 612 /*
612 * No requeue [11] here. We just do deadlock detection. 613 * No requeue [11] here. We just do deadlock detection.
@@ -621,8 +622,8 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
621 top_waiter = rt_mutex_top_waiter(lock); 622 top_waiter = rt_mutex_top_waiter(lock);
622 623
623 /* [13] Drop locks */ 624 /* [13] Drop locks */
624 raw_spin_unlock_irqrestore(&task->pi_lock, flags); 625 raw_spin_unlock(&task->pi_lock);
625 raw_spin_unlock(&lock->wait_lock); 626 raw_spin_unlock_irq(&lock->wait_lock);
626 627
627 /* If owner is not blocked, end of chain. */ 628 /* If owner is not blocked, end of chain. */
628 if (!next_lock) 629 if (!next_lock)
@@ -643,7 +644,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
643 rt_mutex_enqueue(lock, waiter); 644 rt_mutex_enqueue(lock, waiter);
644 645
645 /* [8] Release the task */ 646 /* [8] Release the task */
646 raw_spin_unlock_irqrestore(&task->pi_lock, flags); 647 raw_spin_unlock(&task->pi_lock);
647 put_task_struct(task); 648 put_task_struct(task);
648 649
649 /* 650 /*
@@ -661,14 +662,14 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
661 */ 662 */
662 if (prerequeue_top_waiter != rt_mutex_top_waiter(lock)) 663 if (prerequeue_top_waiter != rt_mutex_top_waiter(lock))
663 wake_up_process(rt_mutex_top_waiter(lock)->task); 664 wake_up_process(rt_mutex_top_waiter(lock)->task);
664 raw_spin_unlock(&lock->wait_lock); 665 raw_spin_unlock_irq(&lock->wait_lock);
665 return 0; 666 return 0;
666 } 667 }
667 668
668 /* [10] Grab the next task, i.e. the owner of @lock */ 669 /* [10] Grab the next task, i.e. the owner of @lock */
669 task = rt_mutex_owner(lock); 670 task = rt_mutex_owner(lock);
670 get_task_struct(task); 671 get_task_struct(task);
671 raw_spin_lock_irqsave(&task->pi_lock, flags); 672 raw_spin_lock(&task->pi_lock);
672 673
673 /* [11] requeue the pi waiters if necessary */ 674 /* [11] requeue the pi waiters if necessary */
674 if (waiter == rt_mutex_top_waiter(lock)) { 675 if (waiter == rt_mutex_top_waiter(lock)) {
@@ -722,8 +723,8 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
722 top_waiter = rt_mutex_top_waiter(lock); 723 top_waiter = rt_mutex_top_waiter(lock);
723 724
724 /* [13] Drop the locks */ 725 /* [13] Drop the locks */
725 raw_spin_unlock_irqrestore(&task->pi_lock, flags); 726 raw_spin_unlock(&task->pi_lock);
726 raw_spin_unlock(&lock->wait_lock); 727 raw_spin_unlock_irq(&lock->wait_lock);
727 728
728 /* 729 /*
729 * Make the actual exit decisions [12], based on the stored 730 * Make the actual exit decisions [12], based on the stored
@@ -746,7 +747,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
746 goto again; 747 goto again;
747 748
748 out_unlock_pi: 749 out_unlock_pi:
749 raw_spin_unlock_irqrestore(&task->pi_lock, flags); 750 raw_spin_unlock_irq(&task->pi_lock);
750 out_put_task: 751 out_put_task:
751 put_task_struct(task); 752 put_task_struct(task);
752 753
@@ -756,7 +757,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
756/* 757/*
757 * Try to take an rt-mutex 758 * Try to take an rt-mutex
758 * 759 *
759 * Must be called with lock->wait_lock held. 760 * Must be called with lock->wait_lock held and interrupts disabled
760 * 761 *
761 * @lock: The lock to be acquired. 762 * @lock: The lock to be acquired.
762 * @task: The task which wants to acquire the lock 763 * @task: The task which wants to acquire the lock
@@ -766,8 +767,6 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
766static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, 767static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
767 struct rt_mutex_waiter *waiter) 768 struct rt_mutex_waiter *waiter)
768{ 769{
769 unsigned long flags;
770
771 /* 770 /*
772 * Before testing whether we can acquire @lock, we set the 771 * Before testing whether we can acquire @lock, we set the
773 * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all 772 * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all
@@ -852,7 +851,7 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
852 * case, but conditionals are more expensive than a redundant 851 * case, but conditionals are more expensive than a redundant
853 * store. 852 * store.
854 */ 853 */
855 raw_spin_lock_irqsave(&task->pi_lock, flags); 854 raw_spin_lock(&task->pi_lock);
856 task->pi_blocked_on = NULL; 855 task->pi_blocked_on = NULL;
857 /* 856 /*
858 * Finish the lock acquisition. @task is the new owner. If 857 * Finish the lock acquisition. @task is the new owner. If
@@ -861,7 +860,7 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
861 */ 860 */
862 if (rt_mutex_has_waiters(lock)) 861 if (rt_mutex_has_waiters(lock))
863 rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock)); 862 rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock));
864 raw_spin_unlock_irqrestore(&task->pi_lock, flags); 863 raw_spin_unlock(&task->pi_lock);
865 864
866takeit: 865takeit:
867 /* We got the lock. */ 866 /* We got the lock. */
@@ -883,7 +882,7 @@ takeit:
883 * 882 *
884 * Prepare waiter and propagate pi chain 883 * Prepare waiter and propagate pi chain
885 * 884 *
886 * This must be called with lock->wait_lock held. 885 * This must be called with lock->wait_lock held and interrupts disabled
887 */ 886 */
888static int task_blocks_on_rt_mutex(struct rt_mutex *lock, 887static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
889 struct rt_mutex_waiter *waiter, 888 struct rt_mutex_waiter *waiter,
@@ -894,7 +893,6 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
894 struct rt_mutex_waiter *top_waiter = waiter; 893 struct rt_mutex_waiter *top_waiter = waiter;
895 struct rt_mutex *next_lock; 894 struct rt_mutex *next_lock;
896 int chain_walk = 0, res; 895 int chain_walk = 0, res;
897 unsigned long flags;
898 896
899 /* 897 /*
900 * Early deadlock detection. We really don't want the task to 898 * Early deadlock detection. We really don't want the task to
@@ -908,7 +906,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
908 if (owner == task) 906 if (owner == task)
909 return -EDEADLK; 907 return -EDEADLK;
910 908
911 raw_spin_lock_irqsave(&task->pi_lock, flags); 909 raw_spin_lock(&task->pi_lock);
912 __rt_mutex_adjust_prio(task); 910 __rt_mutex_adjust_prio(task);
913 waiter->task = task; 911 waiter->task = task;
914 waiter->lock = lock; 912 waiter->lock = lock;
@@ -921,12 +919,12 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
921 919
922 task->pi_blocked_on = waiter; 920 task->pi_blocked_on = waiter;
923 921
924 raw_spin_unlock_irqrestore(&task->pi_lock, flags); 922 raw_spin_unlock(&task->pi_lock);
925 923
926 if (!owner) 924 if (!owner)
927 return 0; 925 return 0;
928 926
929 raw_spin_lock_irqsave(&owner->pi_lock, flags); 927 raw_spin_lock(&owner->pi_lock);
930 if (waiter == rt_mutex_top_waiter(lock)) { 928 if (waiter == rt_mutex_top_waiter(lock)) {
931 rt_mutex_dequeue_pi(owner, top_waiter); 929 rt_mutex_dequeue_pi(owner, top_waiter);
932 rt_mutex_enqueue_pi(owner, waiter); 930 rt_mutex_enqueue_pi(owner, waiter);
@@ -941,7 +939,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
941 /* Store the lock on which owner is blocked or NULL */ 939 /* Store the lock on which owner is blocked or NULL */
942 next_lock = task_blocked_on_lock(owner); 940 next_lock = task_blocked_on_lock(owner);
943 941
944 raw_spin_unlock_irqrestore(&owner->pi_lock, flags); 942 raw_spin_unlock(&owner->pi_lock);
945 /* 943 /*
946 * Even if full deadlock detection is on, if the owner is not 944 * Even if full deadlock detection is on, if the owner is not
947 * blocked itself, we can avoid finding this out in the chain 945 * blocked itself, we can avoid finding this out in the chain
@@ -957,12 +955,12 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
957 */ 955 */
958 get_task_struct(owner); 956 get_task_struct(owner);
959 957
960 raw_spin_unlock(&lock->wait_lock); 958 raw_spin_unlock_irq(&lock->wait_lock);
961 959
962 res = rt_mutex_adjust_prio_chain(owner, chwalk, lock, 960 res = rt_mutex_adjust_prio_chain(owner, chwalk, lock,
963 next_lock, waiter, task); 961 next_lock, waiter, task);
964 962
965 raw_spin_lock(&lock->wait_lock); 963 raw_spin_lock_irq(&lock->wait_lock);
966 964
967 return res; 965 return res;
968} 966}
@@ -971,15 +969,14 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
971 * Remove the top waiter from the current tasks pi waiter tree and 969 * Remove the top waiter from the current tasks pi waiter tree and
972 * queue it up. 970 * queue it up.
973 * 971 *
974 * Called with lock->wait_lock held. 972 * Called with lock->wait_lock held and interrupts disabled.
975 */ 973 */
976static void mark_wakeup_next_waiter(struct wake_q_head *wake_q, 974static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
977 struct rt_mutex *lock) 975 struct rt_mutex *lock)
978{ 976{
979 struct rt_mutex_waiter *waiter; 977 struct rt_mutex_waiter *waiter;
980 unsigned long flags;
981 978
982 raw_spin_lock_irqsave(&current->pi_lock, flags); 979 raw_spin_lock(&current->pi_lock);
983 980
984 waiter = rt_mutex_top_waiter(lock); 981 waiter = rt_mutex_top_waiter(lock);
985 982
@@ -1001,7 +998,7 @@ static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
1001 */ 998 */
1002 lock->owner = (void *) RT_MUTEX_HAS_WAITERS; 999 lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
1003 1000
1004 raw_spin_unlock_irqrestore(&current->pi_lock, flags); 1001 raw_spin_unlock(&current->pi_lock);
1005 1002
1006 wake_q_add(wake_q, waiter->task); 1003 wake_q_add(wake_q, waiter->task);
1007} 1004}
@@ -1009,7 +1006,7 @@ static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
1009/* 1006/*
1010 * Remove a waiter from a lock and give up 1007 * Remove a waiter from a lock and give up
1011 * 1008 *
1012 * Must be called with lock->wait_lock held and 1009 * Must be called with lock->wait_lock held and interrupts disabled. I must
1013 * have just failed to try_to_take_rt_mutex(). 1010 * have just failed to try_to_take_rt_mutex().
1014 */ 1011 */
1015static void remove_waiter(struct rt_mutex *lock, 1012static void remove_waiter(struct rt_mutex *lock,
@@ -1018,12 +1015,11 @@ static void remove_waiter(struct rt_mutex *lock,
1018 bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock)); 1015 bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
1019 struct task_struct *owner = rt_mutex_owner(lock); 1016 struct task_struct *owner = rt_mutex_owner(lock);
1020 struct rt_mutex *next_lock; 1017 struct rt_mutex *next_lock;
1021 unsigned long flags;
1022 1018
1023 raw_spin_lock_irqsave(&current->pi_lock, flags); 1019 raw_spin_lock(&current->pi_lock);
1024 rt_mutex_dequeue(lock, waiter); 1020 rt_mutex_dequeue(lock, waiter);
1025 current->pi_blocked_on = NULL; 1021 current->pi_blocked_on = NULL;
1026 raw_spin_unlock_irqrestore(&current->pi_lock, flags); 1022 raw_spin_unlock(&current->pi_lock);
1027 1023
1028 /* 1024 /*
1029 * Only update priority if the waiter was the highest priority 1025 * Only update priority if the waiter was the highest priority
@@ -1032,7 +1028,7 @@ static void remove_waiter(struct rt_mutex *lock,
1032 if (!owner || !is_top_waiter) 1028 if (!owner || !is_top_waiter)
1033 return; 1029 return;
1034 1030
1035 raw_spin_lock_irqsave(&owner->pi_lock, flags); 1031 raw_spin_lock(&owner->pi_lock);
1036 1032
1037 rt_mutex_dequeue_pi(owner, waiter); 1033 rt_mutex_dequeue_pi(owner, waiter);
1038 1034
@@ -1044,7 +1040,7 @@ static void remove_waiter(struct rt_mutex *lock,
1044 /* Store the lock on which owner is blocked or NULL */ 1040 /* Store the lock on which owner is blocked or NULL */
1045 next_lock = task_blocked_on_lock(owner); 1041 next_lock = task_blocked_on_lock(owner);
1046 1042
1047 raw_spin_unlock_irqrestore(&owner->pi_lock, flags); 1043 raw_spin_unlock(&owner->pi_lock);
1048 1044
1049 /* 1045 /*
1050 * Don't walk the chain, if the owner task is not blocked 1046 * Don't walk the chain, if the owner task is not blocked
@@ -1056,12 +1052,12 @@ static void remove_waiter(struct rt_mutex *lock,
1056 /* gets dropped in rt_mutex_adjust_prio_chain()! */ 1052 /* gets dropped in rt_mutex_adjust_prio_chain()! */
1057 get_task_struct(owner); 1053 get_task_struct(owner);
1058 1054
1059 raw_spin_unlock(&lock->wait_lock); 1055 raw_spin_unlock_irq(&lock->wait_lock);
1060 1056
1061 rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock, 1057 rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock,
1062 next_lock, NULL, current); 1058 next_lock, NULL, current);
1063 1059
1064 raw_spin_lock(&lock->wait_lock); 1060 raw_spin_lock_irq(&lock->wait_lock);
1065} 1061}
1066 1062
1067/* 1063/*
@@ -1097,11 +1093,11 @@ void rt_mutex_adjust_pi(struct task_struct *task)
1097 * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop 1093 * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
1098 * @lock: the rt_mutex to take 1094 * @lock: the rt_mutex to take
1099 * @state: the state the task should block in (TASK_INTERRUPTIBLE 1095 * @state: the state the task should block in (TASK_INTERRUPTIBLE
1100 * or TASK_UNINTERRUPTIBLE) 1096 * or TASK_UNINTERRUPTIBLE)
1101 * @timeout: the pre-initialized and started timer, or NULL for none 1097 * @timeout: the pre-initialized and started timer, or NULL for none
1102 * @waiter: the pre-initialized rt_mutex_waiter 1098 * @waiter: the pre-initialized rt_mutex_waiter
1103 * 1099 *
1104 * lock->wait_lock must be held by the caller. 1100 * Must be called with lock->wait_lock held and interrupts disabled
1105 */ 1101 */
1106static int __sched 1102static int __sched
1107__rt_mutex_slowlock(struct rt_mutex *lock, int state, 1103__rt_mutex_slowlock(struct rt_mutex *lock, int state,
@@ -1129,13 +1125,13 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
1129 break; 1125 break;
1130 } 1126 }
1131 1127
1132 raw_spin_unlock(&lock->wait_lock); 1128 raw_spin_unlock_irq(&lock->wait_lock);
1133 1129
1134 debug_rt_mutex_print_deadlock(waiter); 1130 debug_rt_mutex_print_deadlock(waiter);
1135 1131
1136 schedule(); 1132 schedule();
1137 1133
1138 raw_spin_lock(&lock->wait_lock); 1134 raw_spin_lock_irq(&lock->wait_lock);
1139 set_current_state(state); 1135 set_current_state(state);
1140 } 1136 }
1141 1137
@@ -1172,17 +1168,26 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
1172 enum rtmutex_chainwalk chwalk) 1168 enum rtmutex_chainwalk chwalk)
1173{ 1169{
1174 struct rt_mutex_waiter waiter; 1170 struct rt_mutex_waiter waiter;
1171 unsigned long flags;
1175 int ret = 0; 1172 int ret = 0;
1176 1173
1177 debug_rt_mutex_init_waiter(&waiter); 1174 debug_rt_mutex_init_waiter(&waiter);
1178 RB_CLEAR_NODE(&waiter.pi_tree_entry); 1175 RB_CLEAR_NODE(&waiter.pi_tree_entry);
1179 RB_CLEAR_NODE(&waiter.tree_entry); 1176 RB_CLEAR_NODE(&waiter.tree_entry);
1180 1177
1181 raw_spin_lock(&lock->wait_lock); 1178 /*
1179 * Technically we could use raw_spin_[un]lock_irq() here, but this can
1180 * be called in early boot if the cmpxchg() fast path is disabled
1181 * (debug, no architecture support). In this case we will acquire the
1182 * rtmutex with lock->wait_lock held. But we cannot unconditionally
1183 * enable interrupts in that early boot case. So we need to use the
1184 * irqsave/restore variants.
1185 */
1186 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1182 1187
1183 /* Try to acquire the lock again: */ 1188 /* Try to acquire the lock again: */
1184 if (try_to_take_rt_mutex(lock, current, NULL)) { 1189 if (try_to_take_rt_mutex(lock, current, NULL)) {
1185 raw_spin_unlock(&lock->wait_lock); 1190 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1186 return 0; 1191 return 0;
1187 } 1192 }
1188 1193
@@ -1211,7 +1216,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
1211 */ 1216 */
1212 fixup_rt_mutex_waiters(lock); 1217 fixup_rt_mutex_waiters(lock);
1213 1218
1214 raw_spin_unlock(&lock->wait_lock); 1219 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1215 1220
1216 /* Remove pending timer: */ 1221 /* Remove pending timer: */
1217 if (unlikely(timeout)) 1222 if (unlikely(timeout))
@@ -1227,6 +1232,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
1227 */ 1232 */
1228static inline int rt_mutex_slowtrylock(struct rt_mutex *lock) 1233static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
1229{ 1234{
1235 unsigned long flags;
1230 int ret; 1236 int ret;
1231 1237
1232 /* 1238 /*
@@ -1238,10 +1244,10 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
1238 return 0; 1244 return 0;
1239 1245
1240 /* 1246 /*
1241 * The mutex has currently no owner. Lock the wait lock and 1247 * The mutex has currently no owner. Lock the wait lock and try to
1242 * try to acquire the lock. 1248 * acquire the lock. We use irqsave here to support early boot calls.
1243 */ 1249 */
1244 raw_spin_lock(&lock->wait_lock); 1250 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1245 1251
1246 ret = try_to_take_rt_mutex(lock, current, NULL); 1252 ret = try_to_take_rt_mutex(lock, current, NULL);
1247 1253
@@ -1251,7 +1257,7 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
1251 */ 1257 */
1252 fixup_rt_mutex_waiters(lock); 1258 fixup_rt_mutex_waiters(lock);
1253 1259
1254 raw_spin_unlock(&lock->wait_lock); 1260 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1255 1261
1256 return ret; 1262 return ret;
1257} 1263}
@@ -1263,7 +1269,10 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
1263static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock, 1269static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
1264 struct wake_q_head *wake_q) 1270 struct wake_q_head *wake_q)
1265{ 1271{
1266 raw_spin_lock(&lock->wait_lock); 1272 unsigned long flags;
1273
1274 /* irqsave required to support early boot calls */
1275 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1267 1276
1268 debug_rt_mutex_unlock(lock); 1277 debug_rt_mutex_unlock(lock);
1269 1278
@@ -1302,10 +1311,10 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
1302 */ 1311 */
1303 while (!rt_mutex_has_waiters(lock)) { 1312 while (!rt_mutex_has_waiters(lock)) {
1304 /* Drops lock->wait_lock ! */ 1313 /* Drops lock->wait_lock ! */
1305 if (unlock_rt_mutex_safe(lock) == true) 1314 if (unlock_rt_mutex_safe(lock, flags) == true)
1306 return false; 1315 return false;
1307 /* Relock the rtmutex and try again */ 1316 /* Relock the rtmutex and try again */
1308 raw_spin_lock(&lock->wait_lock); 1317 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1309 } 1318 }
1310 1319
1311 /* 1320 /*
@@ -1316,7 +1325,7 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
1316 */ 1325 */
1317 mark_wakeup_next_waiter(wake_q, lock); 1326 mark_wakeup_next_waiter(wake_q, lock);
1318 1327
1319 raw_spin_unlock(&lock->wait_lock); 1328 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1320 1329
1321 /* check PI boosting */ 1330 /* check PI boosting */
1322 return true; 1331 return true;
@@ -1596,10 +1605,10 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
1596{ 1605{
1597 int ret; 1606 int ret;
1598 1607
1599 raw_spin_lock(&lock->wait_lock); 1608 raw_spin_lock_irq(&lock->wait_lock);
1600 1609
1601 if (try_to_take_rt_mutex(lock, task, NULL)) { 1610 if (try_to_take_rt_mutex(lock, task, NULL)) {
1602 raw_spin_unlock(&lock->wait_lock); 1611 raw_spin_unlock_irq(&lock->wait_lock);
1603 return 1; 1612 return 1;
1604 } 1613 }
1605 1614
@@ -1620,7 +1629,7 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
1620 if (unlikely(ret)) 1629 if (unlikely(ret))
1621 remove_waiter(lock, waiter); 1630 remove_waiter(lock, waiter);
1622 1631
1623 raw_spin_unlock(&lock->wait_lock); 1632 raw_spin_unlock_irq(&lock->wait_lock);
1624 1633
1625 debug_rt_mutex_print_deadlock(waiter); 1634 debug_rt_mutex_print_deadlock(waiter);
1626 1635
@@ -1668,7 +1677,7 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
1668{ 1677{
1669 int ret; 1678 int ret;
1670 1679
1671 raw_spin_lock(&lock->wait_lock); 1680 raw_spin_lock_irq(&lock->wait_lock);
1672 1681
1673 set_current_state(TASK_INTERRUPTIBLE); 1682 set_current_state(TASK_INTERRUPTIBLE);
1674 1683
@@ -1684,7 +1693,7 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
1684 */ 1693 */
1685 fixup_rt_mutex_waiters(lock); 1694 fixup_rt_mutex_waiters(lock);
1686 1695
1687 raw_spin_unlock(&lock->wait_lock); 1696 raw_spin_unlock_irq(&lock->wait_lock);
1688 1697
1689 return ret; 1698 return ret;
1690} 1699}