diff options
Diffstat (limited to 'kernel/locking/rtmutex.c')
-rw-r--r-- | kernel/locking/rtmutex.c | 32 |
1 files changed, 29 insertions, 3 deletions
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index c6eda049ef9f..0e641eb473de 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c | |||
@@ -224,6 +224,12 @@ static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock, | |||
224 | } | 224 | } |
225 | #endif | 225 | #endif |
226 | 226 | ||
227 | /* | ||
228 | * Only use with rt_mutex_waiter_{less,equal}() | ||
229 | */ | ||
230 | #define task_to_waiter(p) \ | ||
231 | &(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = (p)->dl.deadline } | ||
232 | |||
227 | static inline int | 233 | static inline int |
228 | rt_mutex_waiter_less(struct rt_mutex_waiter *left, | 234 | rt_mutex_waiter_less(struct rt_mutex_waiter *left, |
229 | struct rt_mutex_waiter *right) | 235 | struct rt_mutex_waiter *right) |
@@ -243,6 +249,25 @@ rt_mutex_waiter_less(struct rt_mutex_waiter *left, | |||
243 | return 0; | 249 | return 0; |
244 | } | 250 | } |
245 | 251 | ||
252 | static inline int | ||
253 | rt_mutex_waiter_equal(struct rt_mutex_waiter *left, | ||
254 | struct rt_mutex_waiter *right) | ||
255 | { | ||
256 | if (left->prio != right->prio) | ||
257 | return 0; | ||
258 | |||
259 | /* | ||
260 | * If both waiters have dl_prio(), we check the deadlines of the | ||
261 | * associated tasks. | ||
262 | * If left waiter has a dl_prio(), and we didn't return 0 above, | ||
263 | * then right waiter has a dl_prio() too. | ||
264 | */ | ||
265 | if (dl_prio(left->prio)) | ||
266 | return left->deadline == right->deadline; | ||
267 | |||
268 | return 1; | ||
269 | } | ||
270 | |||
246 | static void | 271 | static void |
247 | rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) | 272 | rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) |
248 | { | 273 | { |
@@ -553,7 +578,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, | |||
553 | * enabled we continue, but stop the requeueing in the chain | 578 | * enabled we continue, but stop the requeueing in the chain |
554 | * walk. | 579 | * walk. |
555 | */ | 580 | */ |
556 | if (waiter->prio == task->prio && !dl_task(task)) { | 581 | if (rt_mutex_waiter_equal(waiter, task_to_waiter(task))) { |
557 | if (!detect_deadlock) | 582 | if (!detect_deadlock) |
558 | goto out_unlock_pi; | 583 | goto out_unlock_pi; |
559 | else | 584 | else |
@@ -856,7 +881,8 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, | |||
856 | * the top waiter priority (kernel view), | 881 | * the top waiter priority (kernel view), |
857 | * @task lost. | 882 | * @task lost. |
858 | */ | 883 | */ |
859 | if (task->prio >= rt_mutex_top_waiter(lock)->prio) | 884 | if (!rt_mutex_waiter_less(task_to_waiter(task), |
885 | rt_mutex_top_waiter(lock))) | ||
860 | return 0; | 886 | return 0; |
861 | 887 | ||
862 | /* | 888 | /* |
@@ -1119,7 +1145,7 @@ void rt_mutex_adjust_pi(struct task_struct *task) | |||
1119 | raw_spin_lock_irqsave(&task->pi_lock, flags); | 1145 | raw_spin_lock_irqsave(&task->pi_lock, flags); |
1120 | 1146 | ||
1121 | waiter = task->pi_blocked_on; | 1147 | waiter = task->pi_blocked_on; |
1122 | if (!waiter || (waiter->prio == task->prio && !dl_prio(task->prio))) { | 1148 | if (!waiter || rt_mutex_waiter_equal(waiter, task_to_waiter(task))) { |
1123 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); | 1149 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
1124 | return; | 1150 | return; |
1125 | } | 1151 | } |