summaryrefslogtreecommitdiffstats
path: root/kernel/locking
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-01-20 13:42:08 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-20 13:42:08 -0500
commita0fa1dd3cdbccec9597fe53b6177a9aa6e20f2f8 (patch)
treeb249854573815eedf377e554f0ea516f86411841 /kernel/locking
parent9326657abe1a83ed4b4f396b923ca1217fd50cba (diff)
parenteaad45132c564ce377e6dce05e78e08e456d5315 (diff)
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler changes from Ingo Molnar: - Add the initial implementation of SCHED_DEADLINE support: a real-time scheduling policy where tasks that meet their deadlines and periodically execute their instances in less than their runtime quota see real-time scheduling and won't miss any of their deadlines. Tasks that go over their quota get delayed (Available to privileged users for now) - Clean up and fix preempt_enable_no_resched() abuse all around the tree - Do sched_clock() performance optimizations on x86 and elsewhere - Fix and improve auto-NUMA balancing - Fix and clean up the idle loop - Apply various cleanups and fixes * 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (60 commits) sched: Fix __sched_setscheduler() nice test sched: Move SCHED_RESET_ON_FORK into attr::sched_flags sched: Fix up attr::sched_priority warning sched: Fix up scheduler syscall LTP fails sched: Preserve the nice level over sched_setscheduler() and sched_setparam() calls sched/core: Fix htmldocs warnings sched/deadline: No need to check p if dl_se is valid sched/deadline: Remove unused variables sched/deadline: Fix sparse static warnings m68k: Fix build warning in mac_via.h sched, thermal: Clean up preempt_enable_no_resched() abuse sched, net: Fixup busy_loop_us_clock() sched, net: Clean up preempt_enable_no_resched() abuse sched/preempt: Fix up missed PREEMPT_NEED_RESCHED folding sched/preempt, locking: Rework local_bh_{dis,en}able() sched/clock, x86: Avoid a runtime condition in native_sched_clock() sched/clock: Fix up clear_sched_clock_stable() sched/clock, x86: Use a static_key for sched_clock_stable sched/clock: Remove local_irq_disable() from the clocks sched/clock, x86: Rewrite cyc2ns() to avoid the need to disable IRQs ...
Diffstat (limited to 'kernel/locking')
-rw-r--r--kernel/locking/rtmutex-debug.c8
-rw-r--r--kernel/locking/rtmutex.c166
-rw-r--r--kernel/locking/rtmutex_common.h23
3 files changed, 149 insertions, 48 deletions
diff --git a/kernel/locking/rtmutex-debug.c b/kernel/locking/rtmutex-debug.c
index 13b243a323fa..49b2ed3dced8 100644
--- a/kernel/locking/rtmutex-debug.c
+++ b/kernel/locking/rtmutex-debug.c
@@ -24,7 +24,7 @@
24#include <linux/kallsyms.h> 24#include <linux/kallsyms.h>
25#include <linux/syscalls.h> 25#include <linux/syscalls.h>
26#include <linux/interrupt.h> 26#include <linux/interrupt.h>
27#include <linux/plist.h> 27#include <linux/rbtree.h>
28#include <linux/fs.h> 28#include <linux/fs.h>
29#include <linux/debug_locks.h> 29#include <linux/debug_locks.h>
30 30
@@ -57,7 +57,7 @@ static void printk_lock(struct rt_mutex *lock, int print_owner)
57 57
58void rt_mutex_debug_task_free(struct task_struct *task) 58void rt_mutex_debug_task_free(struct task_struct *task)
59{ 59{
60 DEBUG_LOCKS_WARN_ON(!plist_head_empty(&task->pi_waiters)); 60 DEBUG_LOCKS_WARN_ON(!RB_EMPTY_ROOT(&task->pi_waiters));
61 DEBUG_LOCKS_WARN_ON(task->pi_blocked_on); 61 DEBUG_LOCKS_WARN_ON(task->pi_blocked_on);
62} 62}
63 63
@@ -154,16 +154,12 @@ void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock)
154void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) 154void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
155{ 155{
156 memset(waiter, 0x11, sizeof(*waiter)); 156 memset(waiter, 0x11, sizeof(*waiter));
157 plist_node_init(&waiter->list_entry, MAX_PRIO);
158 plist_node_init(&waiter->pi_list_entry, MAX_PRIO);
159 waiter->deadlock_task_pid = NULL; 157 waiter->deadlock_task_pid = NULL;
160} 158}
161 159
162void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter) 160void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter)
163{ 161{
164 put_pid(waiter->deadlock_task_pid); 162 put_pid(waiter->deadlock_task_pid);
165 DEBUG_LOCKS_WARN_ON(!plist_node_empty(&waiter->list_entry));
166 DEBUG_LOCKS_WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
167 memset(waiter, 0x22, sizeof(*waiter)); 163 memset(waiter, 0x22, sizeof(*waiter));
168} 164}
169 165
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 0dd6aec1cb6a..2e960a2bab81 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -14,6 +14,7 @@
14#include <linux/export.h> 14#include <linux/export.h>
15#include <linux/sched.h> 15#include <linux/sched.h>
16#include <linux/sched/rt.h> 16#include <linux/sched/rt.h>
17#include <linux/sched/deadline.h>
17#include <linux/timer.h> 18#include <linux/timer.h>
18 19
19#include "rtmutex_common.h" 20#include "rtmutex_common.h"
@@ -91,10 +92,107 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
91} 92}
92#endif 93#endif
93 94
95static inline int
96rt_mutex_waiter_less(struct rt_mutex_waiter *left,
97 struct rt_mutex_waiter *right)
98{
99 if (left->prio < right->prio)
100 return 1;
101
102 /*
103 * If both waiters have dl_prio(), we check the deadlines of the
104 * associated tasks.
105 * If left waiter has a dl_prio(), and we didn't return 1 above,
106 * then right waiter has a dl_prio() too.
107 */
108 if (dl_prio(left->prio))
109 return (left->task->dl.deadline < right->task->dl.deadline);
110
111 return 0;
112}
113
114static void
115rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
116{
117 struct rb_node **link = &lock->waiters.rb_node;
118 struct rb_node *parent = NULL;
119 struct rt_mutex_waiter *entry;
120 int leftmost = 1;
121
122 while (*link) {
123 parent = *link;
124 entry = rb_entry(parent, struct rt_mutex_waiter, tree_entry);
125 if (rt_mutex_waiter_less(waiter, entry)) {
126 link = &parent->rb_left;
127 } else {
128 link = &parent->rb_right;
129 leftmost = 0;
130 }
131 }
132
133 if (leftmost)
134 lock->waiters_leftmost = &waiter->tree_entry;
135
136 rb_link_node(&waiter->tree_entry, parent, link);
137 rb_insert_color(&waiter->tree_entry, &lock->waiters);
138}
139
140static void
141rt_mutex_dequeue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
142{
143 if (RB_EMPTY_NODE(&waiter->tree_entry))
144 return;
145
146 if (lock->waiters_leftmost == &waiter->tree_entry)
147 lock->waiters_leftmost = rb_next(&waiter->tree_entry);
148
149 rb_erase(&waiter->tree_entry, &lock->waiters);
150 RB_CLEAR_NODE(&waiter->tree_entry);
151}
152
153static void
154rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
155{
156 struct rb_node **link = &task->pi_waiters.rb_node;
157 struct rb_node *parent = NULL;
158 struct rt_mutex_waiter *entry;
159 int leftmost = 1;
160
161 while (*link) {
162 parent = *link;
163 entry = rb_entry(parent, struct rt_mutex_waiter, pi_tree_entry);
164 if (rt_mutex_waiter_less(waiter, entry)) {
165 link = &parent->rb_left;
166 } else {
167 link = &parent->rb_right;
168 leftmost = 0;
169 }
170 }
171
172 if (leftmost)
173 task->pi_waiters_leftmost = &waiter->pi_tree_entry;
174
175 rb_link_node(&waiter->pi_tree_entry, parent, link);
176 rb_insert_color(&waiter->pi_tree_entry, &task->pi_waiters);
177}
178
179static void
180rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
181{
182 if (RB_EMPTY_NODE(&waiter->pi_tree_entry))
183 return;
184
185 if (task->pi_waiters_leftmost == &waiter->pi_tree_entry)
186 task->pi_waiters_leftmost = rb_next(&waiter->pi_tree_entry);
187
188 rb_erase(&waiter->pi_tree_entry, &task->pi_waiters);
189 RB_CLEAR_NODE(&waiter->pi_tree_entry);
190}
191
94/* 192/*
95 * Calculate task priority from the waiter list priority 193 * Calculate task priority from the waiter tree priority
96 * 194 *
97 * Return task->normal_prio when the waiter list is empty or when 195 * Return task->normal_prio when the waiter tree is empty or when
98 * the waiter is not allowed to do priority boosting 196 * the waiter is not allowed to do priority boosting
99 */ 197 */
100int rt_mutex_getprio(struct task_struct *task) 198int rt_mutex_getprio(struct task_struct *task)
@@ -102,10 +200,18 @@ int rt_mutex_getprio(struct task_struct *task)
102 if (likely(!task_has_pi_waiters(task))) 200 if (likely(!task_has_pi_waiters(task)))
103 return task->normal_prio; 201 return task->normal_prio;
104 202
105 return min(task_top_pi_waiter(task)->pi_list_entry.prio, 203 return min(task_top_pi_waiter(task)->prio,
106 task->normal_prio); 204 task->normal_prio);
107} 205}
108 206
207struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
208{
209 if (likely(!task_has_pi_waiters(task)))
210 return NULL;
211
212 return task_top_pi_waiter(task)->task;
213}
214
109/* 215/*
110 * Adjust the priority of a task, after its pi_waiters got modified. 216 * Adjust the priority of a task, after its pi_waiters got modified.
111 * 217 *
@@ -115,7 +221,7 @@ static void __rt_mutex_adjust_prio(struct task_struct *task)
115{ 221{
116 int prio = rt_mutex_getprio(task); 222 int prio = rt_mutex_getprio(task);
117 223
118 if (task->prio != prio) 224 if (task->prio != prio || dl_prio(prio))
119 rt_mutex_setprio(task, prio); 225 rt_mutex_setprio(task, prio);
120} 226}
121 227
@@ -233,7 +339,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
233 * When deadlock detection is off then we check, if further 339 * When deadlock detection is off then we check, if further
234 * priority adjustment is necessary. 340 * priority adjustment is necessary.
235 */ 341 */
236 if (!detect_deadlock && waiter->list_entry.prio == task->prio) 342 if (!detect_deadlock && waiter->prio == task->prio)
237 goto out_unlock_pi; 343 goto out_unlock_pi;
238 344
239 lock = waiter->lock; 345 lock = waiter->lock;
@@ -254,9 +360,9 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
254 top_waiter = rt_mutex_top_waiter(lock); 360 top_waiter = rt_mutex_top_waiter(lock);
255 361
256 /* Requeue the waiter */ 362 /* Requeue the waiter */
257 plist_del(&waiter->list_entry, &lock->wait_list); 363 rt_mutex_dequeue(lock, waiter);
258 waiter->list_entry.prio = task->prio; 364 waiter->prio = task->prio;
259 plist_add(&waiter->list_entry, &lock->wait_list); 365 rt_mutex_enqueue(lock, waiter);
260 366
261 /* Release the task */ 367 /* Release the task */
262 raw_spin_unlock_irqrestore(&task->pi_lock, flags); 368 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
@@ -280,17 +386,15 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
280 386
281 if (waiter == rt_mutex_top_waiter(lock)) { 387 if (waiter == rt_mutex_top_waiter(lock)) {
282 /* Boost the owner */ 388 /* Boost the owner */
283 plist_del(&top_waiter->pi_list_entry, &task->pi_waiters); 389 rt_mutex_dequeue_pi(task, top_waiter);
284 waiter->pi_list_entry.prio = waiter->list_entry.prio; 390 rt_mutex_enqueue_pi(task, waiter);
285 plist_add(&waiter->pi_list_entry, &task->pi_waiters);
286 __rt_mutex_adjust_prio(task); 391 __rt_mutex_adjust_prio(task);
287 392
288 } else if (top_waiter == waiter) { 393 } else if (top_waiter == waiter) {
289 /* Deboost the owner */ 394 /* Deboost the owner */
290 plist_del(&waiter->pi_list_entry, &task->pi_waiters); 395 rt_mutex_dequeue_pi(task, waiter);
291 waiter = rt_mutex_top_waiter(lock); 396 waiter = rt_mutex_top_waiter(lock);
292 waiter->pi_list_entry.prio = waiter->list_entry.prio; 397 rt_mutex_enqueue_pi(task, waiter);
293 plist_add(&waiter->pi_list_entry, &task->pi_waiters);
294 __rt_mutex_adjust_prio(task); 398 __rt_mutex_adjust_prio(task);
295 } 399 }
296 400
@@ -355,7 +459,7 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
355 * 3) it is top waiter 459 * 3) it is top waiter
356 */ 460 */
357 if (rt_mutex_has_waiters(lock)) { 461 if (rt_mutex_has_waiters(lock)) {
358 if (task->prio >= rt_mutex_top_waiter(lock)->list_entry.prio) { 462 if (task->prio >= rt_mutex_top_waiter(lock)->prio) {
359 if (!waiter || waiter != rt_mutex_top_waiter(lock)) 463 if (!waiter || waiter != rt_mutex_top_waiter(lock))
360 return 0; 464 return 0;
361 } 465 }
@@ -369,7 +473,7 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
369 473
370 /* remove the queued waiter. */ 474 /* remove the queued waiter. */
371 if (waiter) { 475 if (waiter) {
372 plist_del(&waiter->list_entry, &lock->wait_list); 476 rt_mutex_dequeue(lock, waiter);
373 task->pi_blocked_on = NULL; 477 task->pi_blocked_on = NULL;
374 } 478 }
375 479
@@ -379,8 +483,7 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
379 */ 483 */
380 if (rt_mutex_has_waiters(lock)) { 484 if (rt_mutex_has_waiters(lock)) {
381 top = rt_mutex_top_waiter(lock); 485 top = rt_mutex_top_waiter(lock);
382 top->pi_list_entry.prio = top->list_entry.prio; 486 rt_mutex_enqueue_pi(task, top);
383 plist_add(&top->pi_list_entry, &task->pi_waiters);
384 } 487 }
385 raw_spin_unlock_irqrestore(&task->pi_lock, flags); 488 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
386 } 489 }
@@ -416,13 +519,12 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
416 __rt_mutex_adjust_prio(task); 519 __rt_mutex_adjust_prio(task);
417 waiter->task = task; 520 waiter->task = task;
418 waiter->lock = lock; 521 waiter->lock = lock;
419 plist_node_init(&waiter->list_entry, task->prio); 522 waiter->prio = task->prio;
420 plist_node_init(&waiter->pi_list_entry, task->prio);
421 523
422 /* Get the top priority waiter on the lock */ 524 /* Get the top priority waiter on the lock */
423 if (rt_mutex_has_waiters(lock)) 525 if (rt_mutex_has_waiters(lock))
424 top_waiter = rt_mutex_top_waiter(lock); 526 top_waiter = rt_mutex_top_waiter(lock);
425 plist_add(&waiter->list_entry, &lock->wait_list); 527 rt_mutex_enqueue(lock, waiter);
426 528
427 task->pi_blocked_on = waiter; 529 task->pi_blocked_on = waiter;
428 530
@@ -433,8 +535,8 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
433 535
434 if (waiter == rt_mutex_top_waiter(lock)) { 536 if (waiter == rt_mutex_top_waiter(lock)) {
435 raw_spin_lock_irqsave(&owner->pi_lock, flags); 537 raw_spin_lock_irqsave(&owner->pi_lock, flags);
436 plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters); 538 rt_mutex_dequeue_pi(owner, top_waiter);
437 plist_add(&waiter->pi_list_entry, &owner->pi_waiters); 539 rt_mutex_enqueue_pi(owner, waiter);
438 540
439 __rt_mutex_adjust_prio(owner); 541 __rt_mutex_adjust_prio(owner);
440 if (owner->pi_blocked_on) 542 if (owner->pi_blocked_on)
@@ -486,7 +588,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
486 * boosted mode and go back to normal after releasing 588 * boosted mode and go back to normal after releasing
487 * lock->wait_lock. 589 * lock->wait_lock.
488 */ 590 */
489 plist_del(&waiter->pi_list_entry, &current->pi_waiters); 591 rt_mutex_dequeue_pi(current, waiter);
490 592
491 rt_mutex_set_owner(lock, NULL); 593 rt_mutex_set_owner(lock, NULL);
492 594
@@ -510,7 +612,7 @@ static void remove_waiter(struct rt_mutex *lock,
510 int chain_walk = 0; 612 int chain_walk = 0;
511 613
512 raw_spin_lock_irqsave(&current->pi_lock, flags); 614 raw_spin_lock_irqsave(&current->pi_lock, flags);
513 plist_del(&waiter->list_entry, &lock->wait_list); 615 rt_mutex_dequeue(lock, waiter);
514 current->pi_blocked_on = NULL; 616 current->pi_blocked_on = NULL;
515 raw_spin_unlock_irqrestore(&current->pi_lock, flags); 617 raw_spin_unlock_irqrestore(&current->pi_lock, flags);
516 618
@@ -521,13 +623,13 @@ static void remove_waiter(struct rt_mutex *lock,
521 623
522 raw_spin_lock_irqsave(&owner->pi_lock, flags); 624 raw_spin_lock_irqsave(&owner->pi_lock, flags);
523 625
524 plist_del(&waiter->pi_list_entry, &owner->pi_waiters); 626 rt_mutex_dequeue_pi(owner, waiter);
525 627
526 if (rt_mutex_has_waiters(lock)) { 628 if (rt_mutex_has_waiters(lock)) {
527 struct rt_mutex_waiter *next; 629 struct rt_mutex_waiter *next;
528 630
529 next = rt_mutex_top_waiter(lock); 631 next = rt_mutex_top_waiter(lock);
530 plist_add(&next->pi_list_entry, &owner->pi_waiters); 632 rt_mutex_enqueue_pi(owner, next);
531 } 633 }
532 __rt_mutex_adjust_prio(owner); 634 __rt_mutex_adjust_prio(owner);
533 635
@@ -537,8 +639,6 @@ static void remove_waiter(struct rt_mutex *lock,
537 raw_spin_unlock_irqrestore(&owner->pi_lock, flags); 639 raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
538 } 640 }
539 641
540 WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
541
542 if (!chain_walk) 642 if (!chain_walk)
543 return; 643 return;
544 644
@@ -565,7 +665,8 @@ void rt_mutex_adjust_pi(struct task_struct *task)
565 raw_spin_lock_irqsave(&task->pi_lock, flags); 665 raw_spin_lock_irqsave(&task->pi_lock, flags);
566 666
567 waiter = task->pi_blocked_on; 667 waiter = task->pi_blocked_on;
568 if (!waiter || waiter->list_entry.prio == task->prio) { 668 if (!waiter || (waiter->prio == task->prio &&
669 !dl_prio(task->prio))) {
569 raw_spin_unlock_irqrestore(&task->pi_lock, flags); 670 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
570 return; 671 return;
571 } 672 }
@@ -638,6 +739,8 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
638 int ret = 0; 739 int ret = 0;
639 740
640 debug_rt_mutex_init_waiter(&waiter); 741 debug_rt_mutex_init_waiter(&waiter);
742 RB_CLEAR_NODE(&waiter.pi_tree_entry);
743 RB_CLEAR_NODE(&waiter.tree_entry);
641 744
642 raw_spin_lock(&lock->wait_lock); 745 raw_spin_lock(&lock->wait_lock);
643 746
@@ -904,7 +1007,8 @@ void __rt_mutex_init(struct rt_mutex *lock, const char *name)
904{ 1007{
905 lock->owner = NULL; 1008 lock->owner = NULL;
906 raw_spin_lock_init(&lock->wait_lock); 1009 raw_spin_lock_init(&lock->wait_lock);
907 plist_head_init(&lock->wait_list); 1010 lock->waiters = RB_ROOT;
1011 lock->waiters_leftmost = NULL;
908 1012
909 debug_rt_mutex_init(lock, name); 1013 debug_rt_mutex_init(lock, name);
910} 1014}
diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
index 53a66c85261b..7431a9c86f35 100644
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
@@ -40,13 +40,13 @@ extern void schedule_rt_mutex_test(struct rt_mutex *lock);
40 * This is the control structure for tasks blocked on a rt_mutex, 40 * This is the control structure for tasks blocked on a rt_mutex,
41 * which is allocated on the kernel stack on of the blocked task. 41 * which is allocated on the kernel stack on of the blocked task.
42 * 42 *
43 * @list_entry: pi node to enqueue into the mutex waiters list 43 * @tree_entry: pi node to enqueue into the mutex waiters tree
44 * @pi_list_entry: pi node to enqueue into the mutex owner waiters list 44 * @pi_tree_entry: pi node to enqueue into the mutex owner waiters tree
45 * @task: task reference to the blocked task 45 * @task: task reference to the blocked task
46 */ 46 */
47struct rt_mutex_waiter { 47struct rt_mutex_waiter {
48 struct plist_node list_entry; 48 struct rb_node tree_entry;
49 struct plist_node pi_list_entry; 49 struct rb_node pi_tree_entry;
50 struct task_struct *task; 50 struct task_struct *task;
51 struct rt_mutex *lock; 51 struct rt_mutex *lock;
52#ifdef CONFIG_DEBUG_RT_MUTEXES 52#ifdef CONFIG_DEBUG_RT_MUTEXES
@@ -54,14 +54,15 @@ struct rt_mutex_waiter {
54 struct pid *deadlock_task_pid; 54 struct pid *deadlock_task_pid;
55 struct rt_mutex *deadlock_lock; 55 struct rt_mutex *deadlock_lock;
56#endif 56#endif
57 int prio;
57}; 58};
58 59
59/* 60/*
60 * Various helpers to access the waiters-plist: 61 * Various helpers to access the waiters-tree:
61 */ 62 */
62static inline int rt_mutex_has_waiters(struct rt_mutex *lock) 63static inline int rt_mutex_has_waiters(struct rt_mutex *lock)
63{ 64{
64 return !plist_head_empty(&lock->wait_list); 65 return !RB_EMPTY_ROOT(&lock->waiters);
65} 66}
66 67
67static inline struct rt_mutex_waiter * 68static inline struct rt_mutex_waiter *
@@ -69,8 +70,8 @@ rt_mutex_top_waiter(struct rt_mutex *lock)
69{ 70{
70 struct rt_mutex_waiter *w; 71 struct rt_mutex_waiter *w;
71 72
72 w = plist_first_entry(&lock->wait_list, struct rt_mutex_waiter, 73 w = rb_entry(lock->waiters_leftmost, struct rt_mutex_waiter,
73 list_entry); 74 tree_entry);
74 BUG_ON(w->lock != lock); 75 BUG_ON(w->lock != lock);
75 76
76 return w; 77 return w;
@@ -78,14 +79,14 @@ rt_mutex_top_waiter(struct rt_mutex *lock)
78 79
79static inline int task_has_pi_waiters(struct task_struct *p) 80static inline int task_has_pi_waiters(struct task_struct *p)
80{ 81{
81 return !plist_head_empty(&p->pi_waiters); 82 return !RB_EMPTY_ROOT(&p->pi_waiters);
82} 83}
83 84
84static inline struct rt_mutex_waiter * 85static inline struct rt_mutex_waiter *
85task_top_pi_waiter(struct task_struct *p) 86task_top_pi_waiter(struct task_struct *p)
86{ 87{
87 return plist_first_entry(&p->pi_waiters, struct rt_mutex_waiter, 88 return rb_entry(p->pi_waiters_leftmost, struct rt_mutex_waiter,
88 pi_list_entry); 89 pi_tree_entry);
89} 90}
90 91
91/* 92/*