diff options
Diffstat (limited to 'kernel/locking')
| -rw-r--r-- | kernel/locking/lockdep.c | 4 | ||||
| -rw-r--r-- | kernel/locking/mutex-debug.c | 7 | ||||
| -rw-r--r-- | kernel/locking/rtmutex-debug.c | 8 | ||||
| -rw-r--r-- | kernel/locking/rtmutex.c | 166 | ||||
| -rw-r--r-- | kernel/locking/rtmutex_common.h | 23 |
5 files changed, 159 insertions, 49 deletions
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 576ba756a32d..eb8a54783fa0 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c | |||
| @@ -590,6 +590,7 @@ static int very_verbose(struct lock_class *class) | |||
| 590 | /* | 590 | /* |
| 591 | * Is this the address of a static object: | 591 | * Is this the address of a static object: |
| 592 | */ | 592 | */ |
| 593 | #ifdef __KERNEL__ | ||
| 593 | static int static_obj(void *obj) | 594 | static int static_obj(void *obj) |
| 594 | { | 595 | { |
| 595 | unsigned long start = (unsigned long) &_stext, | 596 | unsigned long start = (unsigned long) &_stext, |
| @@ -616,6 +617,7 @@ static int static_obj(void *obj) | |||
| 616 | */ | 617 | */ |
| 617 | return is_module_address(addr) || is_module_percpu_address(addr); | 618 | return is_module_address(addr) || is_module_percpu_address(addr); |
| 618 | } | 619 | } |
| 620 | #endif | ||
| 619 | 621 | ||
| 620 | /* | 622 | /* |
| 621 | * To make lock name printouts unique, we calculate a unique | 623 | * To make lock name printouts unique, we calculate a unique |
| @@ -4115,6 +4117,7 @@ void debug_check_no_locks_held(void) | |||
| 4115 | } | 4117 | } |
| 4116 | EXPORT_SYMBOL_GPL(debug_check_no_locks_held); | 4118 | EXPORT_SYMBOL_GPL(debug_check_no_locks_held); |
| 4117 | 4119 | ||
| 4120 | #ifdef __KERNEL__ | ||
| 4118 | void debug_show_all_locks(void) | 4121 | void debug_show_all_locks(void) |
| 4119 | { | 4122 | { |
| 4120 | struct task_struct *g, *p; | 4123 | struct task_struct *g, *p; |
| @@ -4172,6 +4175,7 @@ retry: | |||
| 4172 | read_unlock(&tasklist_lock); | 4175 | read_unlock(&tasklist_lock); |
| 4173 | } | 4176 | } |
| 4174 | EXPORT_SYMBOL_GPL(debug_show_all_locks); | 4177 | EXPORT_SYMBOL_GPL(debug_show_all_locks); |
| 4178 | #endif | ||
| 4175 | 4179 | ||
| 4176 | /* | 4180 | /* |
| 4177 | * Careful: only use this function if you are sure that | 4181 | * Careful: only use this function if you are sure that |
diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c index 7e3443fe1f48..faf6f5b53e77 100644 --- a/kernel/locking/mutex-debug.c +++ b/kernel/locking/mutex-debug.c | |||
| @@ -75,7 +75,12 @@ void debug_mutex_unlock(struct mutex *lock) | |||
| 75 | return; | 75 | return; |
| 76 | 76 | ||
| 77 | DEBUG_LOCKS_WARN_ON(lock->magic != lock); | 77 | DEBUG_LOCKS_WARN_ON(lock->magic != lock); |
| 78 | DEBUG_LOCKS_WARN_ON(lock->owner != current); | 78 | |
| 79 | if (!lock->owner) | ||
| 80 | DEBUG_LOCKS_WARN_ON(!lock->owner); | ||
| 81 | else | ||
| 82 | DEBUG_LOCKS_WARN_ON(lock->owner != current); | ||
| 83 | |||
| 79 | DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next); | 84 | DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next); |
| 80 | mutex_clear_owner(lock); | 85 | mutex_clear_owner(lock); |
| 81 | } | 86 | } |
diff --git a/kernel/locking/rtmutex-debug.c b/kernel/locking/rtmutex-debug.c index 13b243a323fa..49b2ed3dced8 100644 --- a/kernel/locking/rtmutex-debug.c +++ b/kernel/locking/rtmutex-debug.c | |||
| @@ -24,7 +24,7 @@ | |||
| 24 | #include <linux/kallsyms.h> | 24 | #include <linux/kallsyms.h> |
| 25 | #include <linux/syscalls.h> | 25 | #include <linux/syscalls.h> |
| 26 | #include <linux/interrupt.h> | 26 | #include <linux/interrupt.h> |
| 27 | #include <linux/plist.h> | 27 | #include <linux/rbtree.h> |
| 28 | #include <linux/fs.h> | 28 | #include <linux/fs.h> |
| 29 | #include <linux/debug_locks.h> | 29 | #include <linux/debug_locks.h> |
| 30 | 30 | ||
| @@ -57,7 +57,7 @@ static void printk_lock(struct rt_mutex *lock, int print_owner) | |||
| 57 | 57 | ||
| 58 | void rt_mutex_debug_task_free(struct task_struct *task) | 58 | void rt_mutex_debug_task_free(struct task_struct *task) |
| 59 | { | 59 | { |
| 60 | DEBUG_LOCKS_WARN_ON(!plist_head_empty(&task->pi_waiters)); | 60 | DEBUG_LOCKS_WARN_ON(!RB_EMPTY_ROOT(&task->pi_waiters)); |
| 61 | DEBUG_LOCKS_WARN_ON(task->pi_blocked_on); | 61 | DEBUG_LOCKS_WARN_ON(task->pi_blocked_on); |
| 62 | } | 62 | } |
| 63 | 63 | ||
| @@ -154,16 +154,12 @@ void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock) | |||
| 154 | void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) | 154 | void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) |
| 155 | { | 155 | { |
| 156 | memset(waiter, 0x11, sizeof(*waiter)); | 156 | memset(waiter, 0x11, sizeof(*waiter)); |
| 157 | plist_node_init(&waiter->list_entry, MAX_PRIO); | ||
| 158 | plist_node_init(&waiter->pi_list_entry, MAX_PRIO); | ||
| 159 | waiter->deadlock_task_pid = NULL; | 157 | waiter->deadlock_task_pid = NULL; |
| 160 | } | 158 | } |
| 161 | 159 | ||
| 162 | void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter) | 160 | void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter) |
| 163 | { | 161 | { |
| 164 | put_pid(waiter->deadlock_task_pid); | 162 | put_pid(waiter->deadlock_task_pid); |
| 165 | DEBUG_LOCKS_WARN_ON(!plist_node_empty(&waiter->list_entry)); | ||
| 166 | DEBUG_LOCKS_WARN_ON(!plist_node_empty(&waiter->pi_list_entry)); | ||
| 167 | memset(waiter, 0x22, sizeof(*waiter)); | 163 | memset(waiter, 0x22, sizeof(*waiter)); |
| 168 | } | 164 | } |
| 169 | 165 | ||
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 0dd6aec1cb6a..2e960a2bab81 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | #include <linux/export.h> | 14 | #include <linux/export.h> |
| 15 | #include <linux/sched.h> | 15 | #include <linux/sched.h> |
| 16 | #include <linux/sched/rt.h> | 16 | #include <linux/sched/rt.h> |
| 17 | #include <linux/sched/deadline.h> | ||
| 17 | #include <linux/timer.h> | 18 | #include <linux/timer.h> |
| 18 | 19 | ||
| 19 | #include "rtmutex_common.h" | 20 | #include "rtmutex_common.h" |
| @@ -91,10 +92,107 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock) | |||
| 91 | } | 92 | } |
| 92 | #endif | 93 | #endif |
| 93 | 94 | ||
| 95 | static inline int | ||
| 96 | rt_mutex_waiter_less(struct rt_mutex_waiter *left, | ||
| 97 | struct rt_mutex_waiter *right) | ||
| 98 | { | ||
| 99 | if (left->prio < right->prio) | ||
| 100 | return 1; | ||
| 101 | |||
| 102 | /* | ||
| 103 | * If both waiters have dl_prio(), we check the deadlines of the | ||
| 104 | * associated tasks. | ||
| 105 | * If left waiter has a dl_prio(), and we didn't return 1 above, | ||
| 106 | * then right waiter has a dl_prio() too. | ||
| 107 | */ | ||
| 108 | if (dl_prio(left->prio)) | ||
| 109 | return (left->task->dl.deadline < right->task->dl.deadline); | ||
| 110 | |||
| 111 | return 0; | ||
| 112 | } | ||
| 113 | |||
| 114 | static void | ||
| 115 | rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) | ||
| 116 | { | ||
| 117 | struct rb_node **link = &lock->waiters.rb_node; | ||
| 118 | struct rb_node *parent = NULL; | ||
| 119 | struct rt_mutex_waiter *entry; | ||
| 120 | int leftmost = 1; | ||
| 121 | |||
| 122 | while (*link) { | ||
| 123 | parent = *link; | ||
| 124 | entry = rb_entry(parent, struct rt_mutex_waiter, tree_entry); | ||
| 125 | if (rt_mutex_waiter_less(waiter, entry)) { | ||
| 126 | link = &parent->rb_left; | ||
| 127 | } else { | ||
| 128 | link = &parent->rb_right; | ||
| 129 | leftmost = 0; | ||
| 130 | } | ||
| 131 | } | ||
| 132 | |||
| 133 | if (leftmost) | ||
| 134 | lock->waiters_leftmost = &waiter->tree_entry; | ||
| 135 | |||
| 136 | rb_link_node(&waiter->tree_entry, parent, link); | ||
| 137 | rb_insert_color(&waiter->tree_entry, &lock->waiters); | ||
| 138 | } | ||
| 139 | |||
| 140 | static void | ||
| 141 | rt_mutex_dequeue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) | ||
| 142 | { | ||
| 143 | if (RB_EMPTY_NODE(&waiter->tree_entry)) | ||
| 144 | return; | ||
| 145 | |||
| 146 | if (lock->waiters_leftmost == &waiter->tree_entry) | ||
| 147 | lock->waiters_leftmost = rb_next(&waiter->tree_entry); | ||
| 148 | |||
| 149 | rb_erase(&waiter->tree_entry, &lock->waiters); | ||
| 150 | RB_CLEAR_NODE(&waiter->tree_entry); | ||
| 151 | } | ||
| 152 | |||
| 153 | static void | ||
| 154 | rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) | ||
| 155 | { | ||
| 156 | struct rb_node **link = &task->pi_waiters.rb_node; | ||
| 157 | struct rb_node *parent = NULL; | ||
| 158 | struct rt_mutex_waiter *entry; | ||
| 159 | int leftmost = 1; | ||
| 160 | |||
| 161 | while (*link) { | ||
| 162 | parent = *link; | ||
| 163 | entry = rb_entry(parent, struct rt_mutex_waiter, pi_tree_entry); | ||
| 164 | if (rt_mutex_waiter_less(waiter, entry)) { | ||
| 165 | link = &parent->rb_left; | ||
| 166 | } else { | ||
| 167 | link = &parent->rb_right; | ||
| 168 | leftmost = 0; | ||
| 169 | } | ||
| 170 | } | ||
| 171 | |||
| 172 | if (leftmost) | ||
| 173 | task->pi_waiters_leftmost = &waiter->pi_tree_entry; | ||
| 174 | |||
| 175 | rb_link_node(&waiter->pi_tree_entry, parent, link); | ||
| 176 | rb_insert_color(&waiter->pi_tree_entry, &task->pi_waiters); | ||
| 177 | } | ||
| 178 | |||
| 179 | static void | ||
| 180 | rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) | ||
| 181 | { | ||
| 182 | if (RB_EMPTY_NODE(&waiter->pi_tree_entry)) | ||
| 183 | return; | ||
| 184 | |||
| 185 | if (task->pi_waiters_leftmost == &waiter->pi_tree_entry) | ||
| 186 | task->pi_waiters_leftmost = rb_next(&waiter->pi_tree_entry); | ||
| 187 | |||
| 188 | rb_erase(&waiter->pi_tree_entry, &task->pi_waiters); | ||
| 189 | RB_CLEAR_NODE(&waiter->pi_tree_entry); | ||
| 190 | } | ||
| 191 | |||
| 94 | /* | 192 | /* |
| 95 | * Calculate task priority from the waiter list priority | 193 | * Calculate task priority from the waiter tree priority |
| 96 | * | 194 | * |
| 97 | * Return task->normal_prio when the waiter list is empty or when | 195 | * Return task->normal_prio when the waiter tree is empty or when |
| 98 | * the waiter is not allowed to do priority boosting | 196 | * the waiter is not allowed to do priority boosting |
| 99 | */ | 197 | */ |
| 100 | int rt_mutex_getprio(struct task_struct *task) | 198 | int rt_mutex_getprio(struct task_struct *task) |
| @@ -102,10 +200,18 @@ int rt_mutex_getprio(struct task_struct *task) | |||
| 102 | if (likely(!task_has_pi_waiters(task))) | 200 | if (likely(!task_has_pi_waiters(task))) |
| 103 | return task->normal_prio; | 201 | return task->normal_prio; |
| 104 | 202 | ||
| 105 | return min(task_top_pi_waiter(task)->pi_list_entry.prio, | 203 | return min(task_top_pi_waiter(task)->prio, |
| 106 | task->normal_prio); | 204 | task->normal_prio); |
| 107 | } | 205 | } |
| 108 | 206 | ||
| 207 | struct task_struct *rt_mutex_get_top_task(struct task_struct *task) | ||
| 208 | { | ||
| 209 | if (likely(!task_has_pi_waiters(task))) | ||
| 210 | return NULL; | ||
| 211 | |||
| 212 | return task_top_pi_waiter(task)->task; | ||
| 213 | } | ||
| 214 | |||
| 109 | /* | 215 | /* |
| 110 | * Adjust the priority of a task, after its pi_waiters got modified. | 216 | * Adjust the priority of a task, after its pi_waiters got modified. |
| 111 | * | 217 | * |
| @@ -115,7 +221,7 @@ static void __rt_mutex_adjust_prio(struct task_struct *task) | |||
| 115 | { | 221 | { |
| 116 | int prio = rt_mutex_getprio(task); | 222 | int prio = rt_mutex_getprio(task); |
| 117 | 223 | ||
| 118 | if (task->prio != prio) | 224 | if (task->prio != prio || dl_prio(prio)) |
| 119 | rt_mutex_setprio(task, prio); | 225 | rt_mutex_setprio(task, prio); |
| 120 | } | 226 | } |
| 121 | 227 | ||
| @@ -233,7 +339,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, | |||
| 233 | * When deadlock detection is off then we check, if further | 339 | * When deadlock detection is off then we check, if further |
| 234 | * priority adjustment is necessary. | 340 | * priority adjustment is necessary. |
| 235 | */ | 341 | */ |
| 236 | if (!detect_deadlock && waiter->list_entry.prio == task->prio) | 342 | if (!detect_deadlock && waiter->prio == task->prio) |
| 237 | goto out_unlock_pi; | 343 | goto out_unlock_pi; |
| 238 | 344 | ||
| 239 | lock = waiter->lock; | 345 | lock = waiter->lock; |
| @@ -254,9 +360,9 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, | |||
| 254 | top_waiter = rt_mutex_top_waiter(lock); | 360 | top_waiter = rt_mutex_top_waiter(lock); |
| 255 | 361 | ||
| 256 | /* Requeue the waiter */ | 362 | /* Requeue the waiter */ |
| 257 | plist_del(&waiter->list_entry, &lock->wait_list); | 363 | rt_mutex_dequeue(lock, waiter); |
| 258 | waiter->list_entry.prio = task->prio; | 364 | waiter->prio = task->prio; |
| 259 | plist_add(&waiter->list_entry, &lock->wait_list); | 365 | rt_mutex_enqueue(lock, waiter); |
| 260 | 366 | ||
| 261 | /* Release the task */ | 367 | /* Release the task */ |
| 262 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); | 368 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
| @@ -280,17 +386,15 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, | |||
| 280 | 386 | ||
| 281 | if (waiter == rt_mutex_top_waiter(lock)) { | 387 | if (waiter == rt_mutex_top_waiter(lock)) { |
| 282 | /* Boost the owner */ | 388 | /* Boost the owner */ |
| 283 | plist_del(&top_waiter->pi_list_entry, &task->pi_waiters); | 389 | rt_mutex_dequeue_pi(task, top_waiter); |
| 284 | waiter->pi_list_entry.prio = waiter->list_entry.prio; | 390 | rt_mutex_enqueue_pi(task, waiter); |
| 285 | plist_add(&waiter->pi_list_entry, &task->pi_waiters); | ||
| 286 | __rt_mutex_adjust_prio(task); | 391 | __rt_mutex_adjust_prio(task); |
| 287 | 392 | ||
| 288 | } else if (top_waiter == waiter) { | 393 | } else if (top_waiter == waiter) { |
| 289 | /* Deboost the owner */ | 394 | /* Deboost the owner */ |
| 290 | plist_del(&waiter->pi_list_entry, &task->pi_waiters); | 395 | rt_mutex_dequeue_pi(task, waiter); |
| 291 | waiter = rt_mutex_top_waiter(lock); | 396 | waiter = rt_mutex_top_waiter(lock); |
| 292 | waiter->pi_list_entry.prio = waiter->list_entry.prio; | 397 | rt_mutex_enqueue_pi(task, waiter); |
| 293 | plist_add(&waiter->pi_list_entry, &task->pi_waiters); | ||
| 294 | __rt_mutex_adjust_prio(task); | 398 | __rt_mutex_adjust_prio(task); |
| 295 | } | 399 | } |
| 296 | 400 | ||
| @@ -355,7 +459,7 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, | |||
| 355 | * 3) it is top waiter | 459 | * 3) it is top waiter |
| 356 | */ | 460 | */ |
| 357 | if (rt_mutex_has_waiters(lock)) { | 461 | if (rt_mutex_has_waiters(lock)) { |
| 358 | if (task->prio >= rt_mutex_top_waiter(lock)->list_entry.prio) { | 462 | if (task->prio >= rt_mutex_top_waiter(lock)->prio) { |
| 359 | if (!waiter || waiter != rt_mutex_top_waiter(lock)) | 463 | if (!waiter || waiter != rt_mutex_top_waiter(lock)) |
| 360 | return 0; | 464 | return 0; |
| 361 | } | 465 | } |
| @@ -369,7 +473,7 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, | |||
| 369 | 473 | ||
| 370 | /* remove the queued waiter. */ | 474 | /* remove the queued waiter. */ |
| 371 | if (waiter) { | 475 | if (waiter) { |
| 372 | plist_del(&waiter->list_entry, &lock->wait_list); | 476 | rt_mutex_dequeue(lock, waiter); |
| 373 | task->pi_blocked_on = NULL; | 477 | task->pi_blocked_on = NULL; |
| 374 | } | 478 | } |
| 375 | 479 | ||
| @@ -379,8 +483,7 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, | |||
| 379 | */ | 483 | */ |
| 380 | if (rt_mutex_has_waiters(lock)) { | 484 | if (rt_mutex_has_waiters(lock)) { |
| 381 | top = rt_mutex_top_waiter(lock); | 485 | top = rt_mutex_top_waiter(lock); |
| 382 | top->pi_list_entry.prio = top->list_entry.prio; | 486 | rt_mutex_enqueue_pi(task, top); |
| 383 | plist_add(&top->pi_list_entry, &task->pi_waiters); | ||
| 384 | } | 487 | } |
| 385 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); | 488 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
| 386 | } | 489 | } |
| @@ -416,13 +519,12 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, | |||
| 416 | __rt_mutex_adjust_prio(task); | 519 | __rt_mutex_adjust_prio(task); |
| 417 | waiter->task = task; | 520 | waiter->task = task; |
| 418 | waiter->lock = lock; | 521 | waiter->lock = lock; |
| 419 | plist_node_init(&waiter->list_entry, task->prio); | 522 | waiter->prio = task->prio; |
| 420 | plist_node_init(&waiter->pi_list_entry, task->prio); | ||
| 421 | 523 | ||
| 422 | /* Get the top priority waiter on the lock */ | 524 | /* Get the top priority waiter on the lock */ |
| 423 | if (rt_mutex_has_waiters(lock)) | 525 | if (rt_mutex_has_waiters(lock)) |
| 424 | top_waiter = rt_mutex_top_waiter(lock); | 526 | top_waiter = rt_mutex_top_waiter(lock); |
| 425 | plist_add(&waiter->list_entry, &lock->wait_list); | 527 | rt_mutex_enqueue(lock, waiter); |
| 426 | 528 | ||
| 427 | task->pi_blocked_on = waiter; | 529 | task->pi_blocked_on = waiter; |
| 428 | 530 | ||
| @@ -433,8 +535,8 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, | |||
| 433 | 535 | ||
| 434 | if (waiter == rt_mutex_top_waiter(lock)) { | 536 | if (waiter == rt_mutex_top_waiter(lock)) { |
| 435 | raw_spin_lock_irqsave(&owner->pi_lock, flags); | 537 | raw_spin_lock_irqsave(&owner->pi_lock, flags); |
| 436 | plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters); | 538 | rt_mutex_dequeue_pi(owner, top_waiter); |
| 437 | plist_add(&waiter->pi_list_entry, &owner->pi_waiters); | 539 | rt_mutex_enqueue_pi(owner, waiter); |
| 438 | 540 | ||
| 439 | __rt_mutex_adjust_prio(owner); | 541 | __rt_mutex_adjust_prio(owner); |
| 440 | if (owner->pi_blocked_on) | 542 | if (owner->pi_blocked_on) |
| @@ -486,7 +588,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock) | |||
| 486 | * boosted mode and go back to normal after releasing | 588 | * boosted mode and go back to normal after releasing |
| 487 | * lock->wait_lock. | 589 | * lock->wait_lock. |
| 488 | */ | 590 | */ |
| 489 | plist_del(&waiter->pi_list_entry, ¤t->pi_waiters); | 591 | rt_mutex_dequeue_pi(current, waiter); |
| 490 | 592 | ||
| 491 | rt_mutex_set_owner(lock, NULL); | 593 | rt_mutex_set_owner(lock, NULL); |
| 492 | 594 | ||
| @@ -510,7 +612,7 @@ static void remove_waiter(struct rt_mutex *lock, | |||
| 510 | int chain_walk = 0; | 612 | int chain_walk = 0; |
| 511 | 613 | ||
| 512 | raw_spin_lock_irqsave(¤t->pi_lock, flags); | 614 | raw_spin_lock_irqsave(¤t->pi_lock, flags); |
| 513 | plist_del(&waiter->list_entry, &lock->wait_list); | 615 | rt_mutex_dequeue(lock, waiter); |
| 514 | current->pi_blocked_on = NULL; | 616 | current->pi_blocked_on = NULL; |
| 515 | raw_spin_unlock_irqrestore(¤t->pi_lock, flags); | 617 | raw_spin_unlock_irqrestore(¤t->pi_lock, flags); |
| 516 | 618 | ||
| @@ -521,13 +623,13 @@ static void remove_waiter(struct rt_mutex *lock, | |||
| 521 | 623 | ||
| 522 | raw_spin_lock_irqsave(&owner->pi_lock, flags); | 624 | raw_spin_lock_irqsave(&owner->pi_lock, flags); |
| 523 | 625 | ||
| 524 | plist_del(&waiter->pi_list_entry, &owner->pi_waiters); | 626 | rt_mutex_dequeue_pi(owner, waiter); |
| 525 | 627 | ||
| 526 | if (rt_mutex_has_waiters(lock)) { | 628 | if (rt_mutex_has_waiters(lock)) { |
| 527 | struct rt_mutex_waiter *next; | 629 | struct rt_mutex_waiter *next; |
| 528 | 630 | ||
| 529 | next = rt_mutex_top_waiter(lock); | 631 | next = rt_mutex_top_waiter(lock); |
| 530 | plist_add(&next->pi_list_entry, &owner->pi_waiters); | 632 | rt_mutex_enqueue_pi(owner, next); |
| 531 | } | 633 | } |
| 532 | __rt_mutex_adjust_prio(owner); | 634 | __rt_mutex_adjust_prio(owner); |
| 533 | 635 | ||
| @@ -537,8 +639,6 @@ static void remove_waiter(struct rt_mutex *lock, | |||
| 537 | raw_spin_unlock_irqrestore(&owner->pi_lock, flags); | 639 | raw_spin_unlock_irqrestore(&owner->pi_lock, flags); |
| 538 | } | 640 | } |
| 539 | 641 | ||
| 540 | WARN_ON(!plist_node_empty(&waiter->pi_list_entry)); | ||
| 541 | |||
| 542 | if (!chain_walk) | 642 | if (!chain_walk) |
| 543 | return; | 643 | return; |
| 544 | 644 | ||
| @@ -565,7 +665,8 @@ void rt_mutex_adjust_pi(struct task_struct *task) | |||
| 565 | raw_spin_lock_irqsave(&task->pi_lock, flags); | 665 | raw_spin_lock_irqsave(&task->pi_lock, flags); |
| 566 | 666 | ||
| 567 | waiter = task->pi_blocked_on; | 667 | waiter = task->pi_blocked_on; |
| 568 | if (!waiter || waiter->list_entry.prio == task->prio) { | 668 | if (!waiter || (waiter->prio == task->prio && |
| 669 | !dl_prio(task->prio))) { | ||
| 569 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); | 670 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
| 570 | return; | 671 | return; |
| 571 | } | 672 | } |
| @@ -638,6 +739,8 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, | |||
| 638 | int ret = 0; | 739 | int ret = 0; |
| 639 | 740 | ||
| 640 | debug_rt_mutex_init_waiter(&waiter); | 741 | debug_rt_mutex_init_waiter(&waiter); |
| 742 | RB_CLEAR_NODE(&waiter.pi_tree_entry); | ||
| 743 | RB_CLEAR_NODE(&waiter.tree_entry); | ||
| 641 | 744 | ||
| 642 | raw_spin_lock(&lock->wait_lock); | 745 | raw_spin_lock(&lock->wait_lock); |
| 643 | 746 | ||
| @@ -904,7 +1007,8 @@ void __rt_mutex_init(struct rt_mutex *lock, const char *name) | |||
| 904 | { | 1007 | { |
| 905 | lock->owner = NULL; | 1008 | lock->owner = NULL; |
| 906 | raw_spin_lock_init(&lock->wait_lock); | 1009 | raw_spin_lock_init(&lock->wait_lock); |
| 907 | plist_head_init(&lock->wait_list); | 1010 | lock->waiters = RB_ROOT; |
| 1011 | lock->waiters_leftmost = NULL; | ||
| 908 | 1012 | ||
| 909 | debug_rt_mutex_init(lock, name); | 1013 | debug_rt_mutex_init(lock, name); |
| 910 | } | 1014 | } |
diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h index 53a66c85261b..7431a9c86f35 100644 --- a/kernel/locking/rtmutex_common.h +++ b/kernel/locking/rtmutex_common.h | |||
| @@ -40,13 +40,13 @@ extern void schedule_rt_mutex_test(struct rt_mutex *lock); | |||
| 40 | * This is the control structure for tasks blocked on a rt_mutex, | 40 | * This is the control structure for tasks blocked on a rt_mutex, |
| 41 | * which is allocated on the kernel stack on of the blocked task. | 41 | * which is allocated on the kernel stack on of the blocked task. |
| 42 | * | 42 | * |
| 43 | * @list_entry: pi node to enqueue into the mutex waiters list | 43 | * @tree_entry: pi node to enqueue into the mutex waiters tree |
| 44 | * @pi_list_entry: pi node to enqueue into the mutex owner waiters list | 44 | * @pi_tree_entry: pi node to enqueue into the mutex owner waiters tree |
| 45 | * @task: task reference to the blocked task | 45 | * @task: task reference to the blocked task |
| 46 | */ | 46 | */ |
| 47 | struct rt_mutex_waiter { | 47 | struct rt_mutex_waiter { |
| 48 | struct plist_node list_entry; | 48 | struct rb_node tree_entry; |
| 49 | struct plist_node pi_list_entry; | 49 | struct rb_node pi_tree_entry; |
| 50 | struct task_struct *task; | 50 | struct task_struct *task; |
| 51 | struct rt_mutex *lock; | 51 | struct rt_mutex *lock; |
| 52 | #ifdef CONFIG_DEBUG_RT_MUTEXES | 52 | #ifdef CONFIG_DEBUG_RT_MUTEXES |
| @@ -54,14 +54,15 @@ struct rt_mutex_waiter { | |||
| 54 | struct pid *deadlock_task_pid; | 54 | struct pid *deadlock_task_pid; |
| 55 | struct rt_mutex *deadlock_lock; | 55 | struct rt_mutex *deadlock_lock; |
| 56 | #endif | 56 | #endif |
| 57 | int prio; | ||
| 57 | }; | 58 | }; |
| 58 | 59 | ||
| 59 | /* | 60 | /* |
| 60 | * Various helpers to access the waiters-plist: | 61 | * Various helpers to access the waiters-tree: |
| 61 | */ | 62 | */ |
| 62 | static inline int rt_mutex_has_waiters(struct rt_mutex *lock) | 63 | static inline int rt_mutex_has_waiters(struct rt_mutex *lock) |
| 63 | { | 64 | { |
| 64 | return !plist_head_empty(&lock->wait_list); | 65 | return !RB_EMPTY_ROOT(&lock->waiters); |
| 65 | } | 66 | } |
| 66 | 67 | ||
| 67 | static inline struct rt_mutex_waiter * | 68 | static inline struct rt_mutex_waiter * |
| @@ -69,8 +70,8 @@ rt_mutex_top_waiter(struct rt_mutex *lock) | |||
| 69 | { | 70 | { |
| 70 | struct rt_mutex_waiter *w; | 71 | struct rt_mutex_waiter *w; |
| 71 | 72 | ||
| 72 | w = plist_first_entry(&lock->wait_list, struct rt_mutex_waiter, | 73 | w = rb_entry(lock->waiters_leftmost, struct rt_mutex_waiter, |
| 73 | list_entry); | 74 | tree_entry); |
| 74 | BUG_ON(w->lock != lock); | 75 | BUG_ON(w->lock != lock); |
| 75 | 76 | ||
| 76 | return w; | 77 | return w; |
| @@ -78,14 +79,14 @@ rt_mutex_top_waiter(struct rt_mutex *lock) | |||
| 78 | 79 | ||
| 79 | static inline int task_has_pi_waiters(struct task_struct *p) | 80 | static inline int task_has_pi_waiters(struct task_struct *p) |
| 80 | { | 81 | { |
| 81 | return !plist_head_empty(&p->pi_waiters); | 82 | return !RB_EMPTY_ROOT(&p->pi_waiters); |
| 82 | } | 83 | } |
| 83 | 84 | ||
| 84 | static inline struct rt_mutex_waiter * | 85 | static inline struct rt_mutex_waiter * |
| 85 | task_top_pi_waiter(struct task_struct *p) | 86 | task_top_pi_waiter(struct task_struct *p) |
| 86 | { | 87 | { |
| 87 | return plist_first_entry(&p->pi_waiters, struct rt_mutex_waiter, | 88 | return rb_entry(p->pi_waiters_leftmost, struct rt_mutex_waiter, |
| 88 | pi_list_entry); | 89 | pi_tree_entry); |
| 89 | } | 90 | } |
| 90 | 91 | ||
| 91 | /* | 92 | /* |
