diff options
Diffstat (limited to 'kernel/rtmutex.c')
| -rw-r--r-- | kernel/rtmutex.c | 318 |
1 files changed, 113 insertions, 205 deletions
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c index a9604815786..ab449117aaf 100644 --- a/kernel/rtmutex.c +++ b/kernel/rtmutex.c | |||
| @@ -20,41 +20,34 @@ | |||
| 20 | /* | 20 | /* |
| 21 | * lock->owner state tracking: | 21 | * lock->owner state tracking: |
| 22 | * | 22 | * |
| 23 | * lock->owner holds the task_struct pointer of the owner. Bit 0 and 1 | 23 | * lock->owner holds the task_struct pointer of the owner. Bit 0 |
| 24 | * are used to keep track of the "owner is pending" and "lock has | 24 | * is used to keep track of the "lock has waiters" state. |
| 25 | * waiters" state. | ||
| 26 | * | 25 | * |
| 27 | * owner bit1 bit0 | 26 | * owner bit0 |
| 28 | * NULL 0 0 lock is free (fast acquire possible) | 27 | * NULL 0 lock is free (fast acquire possible) |
| 29 | * NULL 0 1 invalid state | 28 | * NULL 1 lock is free and has waiters and the top waiter |
| 30 | * NULL 1 0 Transitional State* | 29 | * is going to take the lock* |
| 31 | * NULL 1 1 invalid state | 30 | * taskpointer 0 lock is held (fast release possible) |
| 32 | * taskpointer 0 0 lock is held (fast release possible) | 31 | * taskpointer 1 lock is held and has waiters** |
| 33 | * taskpointer 0 1 task is pending owner | ||
| 34 | * taskpointer 1 0 lock is held and has waiters | ||
| 35 | * taskpointer 1 1 task is pending owner and lock has more waiters | ||
| 36 | * | ||
| 37 | * Pending ownership is assigned to the top (highest priority) | ||
| 38 | * waiter of the lock, when the lock is released. The thread is woken | ||
| 39 | * up and can now take the lock. Until the lock is taken (bit 0 | ||
| 40 | * cleared) a competing higher priority thread can steal the lock | ||
| 41 | * which puts the woken up thread back on the waiters list. | ||
| 42 | * | 32 | * |
| 43 | * The fast atomic compare exchange based acquire and release is only | 33 | * The fast atomic compare exchange based acquire and release is only |
| 44 | * possible when bit 0 and 1 of lock->owner are 0. | 34 | * possible when bit 0 of lock->owner is 0. |
| 35 | * | ||
| 36 | * (*) It also can be a transitional state when grabbing the lock | ||
| 37 | * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock, | ||
| 38 | * we need to set the bit0 before looking at the lock, and the owner may be | ||
| 39 | * NULL in this small time, hence this can be a transitional state. | ||
| 45 | * | 40 | * |
| 46 | * (*) There's a small time where the owner can be NULL and the | 41 | * (**) There is a small time when bit 0 is set but there are no |
| 47 | * "lock has waiters" bit is set. This can happen when grabbing the lock. | 42 | * waiters. This can happen when grabbing the lock in the slow path. |
| 48 | * To prevent a cmpxchg of the owner releasing the lock, we need to set this | 43 | * To prevent a cmpxchg of the owner releasing the lock, we need to |
| 49 | * bit before looking at the lock, hence the reason this is a transitional | 44 | * set this bit before looking at the lock. |
| 50 | * state. | ||
| 51 | */ | 45 | */ |
| 52 | 46 | ||
| 53 | static void | 47 | static void |
| 54 | rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner, | 48 | rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner) |
| 55 | unsigned long mask) | ||
| 56 | { | 49 | { |
| 57 | unsigned long val = (unsigned long)owner | mask; | 50 | unsigned long val = (unsigned long)owner; |
| 58 | 51 | ||
| 59 | if (rt_mutex_has_waiters(lock)) | 52 | if (rt_mutex_has_waiters(lock)) |
| 60 | val |= RT_MUTEX_HAS_WAITERS; | 53 | val |= RT_MUTEX_HAS_WAITERS; |
| @@ -203,15 +196,14 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, | |||
| 203 | * reached or the state of the chain has changed while we | 196 | * reached or the state of the chain has changed while we |
| 204 | * dropped the locks. | 197 | * dropped the locks. |
| 205 | */ | 198 | */ |
| 206 | if (!waiter || !waiter->task) | 199 | if (!waiter) |
| 207 | goto out_unlock_pi; | 200 | goto out_unlock_pi; |
| 208 | 201 | ||
| 209 | /* | 202 | /* |
| 210 | * Check the orig_waiter state. After we dropped the locks, | 203 | * Check the orig_waiter state. After we dropped the locks, |
| 211 | * the previous owner of the lock might have released the lock | 204 | * the previous owner of the lock might have released the lock. |
| 212 | * and made us the pending owner: | ||
| 213 | */ | 205 | */ |
| 214 | if (orig_waiter && !orig_waiter->task) | 206 | if (orig_waiter && !rt_mutex_owner(orig_lock)) |
| 215 | goto out_unlock_pi; | 207 | goto out_unlock_pi; |
| 216 | 208 | ||
| 217 | /* | 209 | /* |
| @@ -254,6 +246,17 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, | |||
| 254 | 246 | ||
| 255 | /* Release the task */ | 247 | /* Release the task */ |
| 256 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); | 248 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
| 249 | if (!rt_mutex_owner(lock)) { | ||
| 250 | /* | ||
| 251 | * If the requeue above changed the top waiter, then we need | ||
| 252 | * to wake the new top waiter up to try to get the lock. | ||
| 253 | */ | ||
| 254 | |||
| 255 | if (top_waiter != rt_mutex_top_waiter(lock)) | ||
| 256 | wake_up_process(rt_mutex_top_waiter(lock)->task); | ||
| 257 | raw_spin_unlock(&lock->wait_lock); | ||
| 258 | goto out_put_task; | ||
| 259 | } | ||
| 257 | put_task_struct(task); | 260 | put_task_struct(task); |
| 258 | 261 | ||
| 259 | /* Grab the next task */ | 262 | /* Grab the next task */ |
| @@ -296,78 +299,16 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, | |||
| 296 | } | 299 | } |
| 297 | 300 | ||
| 298 | /* | 301 | /* |
| 299 | * Optimization: check if we can steal the lock from the | ||
| 300 | * assigned pending owner [which might not have taken the | ||
| 301 | * lock yet]: | ||
| 302 | */ | ||
| 303 | static inline int try_to_steal_lock(struct rt_mutex *lock, | ||
| 304 | struct task_struct *task) | ||
| 305 | { | ||
| 306 | struct task_struct *pendowner = rt_mutex_owner(lock); | ||
| 307 | struct rt_mutex_waiter *next; | ||
| 308 | unsigned long flags; | ||
| 309 | |||
| 310 | if (!rt_mutex_owner_pending(lock)) | ||
| 311 | return 0; | ||
| 312 | |||
| 313 | if (pendowner == task) | ||
| 314 | return 1; | ||
| 315 | |||
| 316 | raw_spin_lock_irqsave(&pendowner->pi_lock, flags); | ||
| 317 | if (task->prio >= pendowner->prio) { | ||
| 318 | raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags); | ||
| 319 | return 0; | ||
| 320 | } | ||
| 321 | |||
| 322 | /* | ||
| 323 | * Check if a waiter is enqueued on the pending owners | ||
| 324 | * pi_waiters list. Remove it and readjust pending owners | ||
| 325 | * priority. | ||
| 326 | */ | ||
| 327 | if (likely(!rt_mutex_has_waiters(lock))) { | ||
| 328 | raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags); | ||
| 329 | return 1; | ||
| 330 | } | ||
| 331 | |||
| 332 | /* No chain handling, pending owner is not blocked on anything: */ | ||
| 333 | next = rt_mutex_top_waiter(lock); | ||
| 334 | plist_del(&next->pi_list_entry, &pendowner->pi_waiters); | ||
| 335 | __rt_mutex_adjust_prio(pendowner); | ||
| 336 | raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags); | ||
| 337 | |||
| 338 | /* | ||
| 339 | * We are going to steal the lock and a waiter was | ||
| 340 | * enqueued on the pending owners pi_waiters queue. So | ||
| 341 | * we have to enqueue this waiter into | ||
| 342 | * task->pi_waiters list. This covers the case, | ||
| 343 | * where task is boosted because it holds another | ||
| 344 | * lock and gets unboosted because the booster is | ||
| 345 | * interrupted, so we would delay a waiter with higher | ||
| 346 | * priority as task->normal_prio. | ||
| 347 | * | ||
| 348 | * Note: in the rare case of a SCHED_OTHER task changing | ||
| 349 | * its priority and thus stealing the lock, next->task | ||
| 350 | * might be task: | ||
| 351 | */ | ||
| 352 | if (likely(next->task != task)) { | ||
| 353 | raw_spin_lock_irqsave(&task->pi_lock, flags); | ||
| 354 | plist_add(&next->pi_list_entry, &task->pi_waiters); | ||
| 355 | __rt_mutex_adjust_prio(task); | ||
| 356 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); | ||
| 357 | } | ||
| 358 | return 1; | ||
| 359 | } | ||
| 360 | |||
| 361 | /* | ||
| 362 | * Try to take an rt-mutex | 302 | * Try to take an rt-mutex |
| 363 | * | 303 | * |
| 364 | * This fails | ||
| 365 | * - when the lock has a real owner | ||
| 366 | * - when a different pending owner exists and has higher priority than current | ||
| 367 | * | ||
| 368 | * Must be called with lock->wait_lock held. | 304 | * Must be called with lock->wait_lock held. |
| 305 | * | ||
| 306 | * @lock: the lock to be acquired. | ||
| 307 | * @task: the task which wants to acquire the lock | ||
| 308 | * @waiter: the waiter that is queued to the lock's wait list. (could be NULL) | ||
| 369 | */ | 309 | */ |
| 370 | static int try_to_take_rt_mutex(struct rt_mutex *lock) | 310 | static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, |
| 311 | struct rt_mutex_waiter *waiter) | ||
| 371 | { | 312 | { |
| 372 | /* | 313 | /* |
| 373 | * We have to be careful here if the atomic speedups are | 314 | * We have to be careful here if the atomic speedups are |
| @@ -390,15 +331,52 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock) | |||
| 390 | */ | 331 | */ |
| 391 | mark_rt_mutex_waiters(lock); | 332 | mark_rt_mutex_waiters(lock); |
| 392 | 333 | ||
| 393 | if (rt_mutex_owner(lock) && !try_to_steal_lock(lock, current)) | 334 | if (rt_mutex_owner(lock)) |
| 394 | return 0; | 335 | return 0; |
| 395 | 336 | ||
| 337 | /* | ||
| 338 | * It will get the lock because of one of these conditions: | ||
| 339 | * 1) there is no waiter | ||
| 340 | * 2) higher priority than waiters | ||
| 341 | * 3) it is top waiter | ||
| 342 | */ | ||
| 343 | if (rt_mutex_has_waiters(lock)) { | ||
| 344 | if (task->prio >= rt_mutex_top_waiter(lock)->list_entry.prio) { | ||
| 345 | if (!waiter || waiter != rt_mutex_top_waiter(lock)) | ||
| 346 | return 0; | ||
| 347 | } | ||
| 348 | } | ||
| 349 | |||
| 350 | if (waiter || rt_mutex_has_waiters(lock)) { | ||
| 351 | unsigned long flags; | ||
| 352 | struct rt_mutex_waiter *top; | ||
| 353 | |||
| 354 | raw_spin_lock_irqsave(&task->pi_lock, flags); | ||
| 355 | |||
| 356 | /* remove the queued waiter. */ | ||
| 357 | if (waiter) { | ||
| 358 | plist_del(&waiter->list_entry, &lock->wait_list); | ||
| 359 | task->pi_blocked_on = NULL; | ||
| 360 | } | ||
| 361 | |||
| 362 | /* | ||
| 363 | * We have to enqueue the top waiter(if it exists) into | ||
| 364 | * task->pi_waiters list. | ||
| 365 | */ | ||
| 366 | if (rt_mutex_has_waiters(lock)) { | ||
| 367 | top = rt_mutex_top_waiter(lock); | ||
| 368 | top->pi_list_entry.prio = top->list_entry.prio; | ||
| 369 | plist_add(&top->pi_list_entry, &task->pi_waiters); | ||
| 370 | } | ||
| 371 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); | ||
| 372 | } | ||
| 373 | |||
| 396 | /* We got the lock. */ | 374 | /* We got the lock. */ |
| 397 | debug_rt_mutex_lock(lock); | 375 | debug_rt_mutex_lock(lock); |
| 398 | 376 | ||
| 399 | rt_mutex_set_owner(lock, current, 0); | 377 | rt_mutex_set_owner(lock, task); |
| 400 | 378 | ||
| 401 | rt_mutex_deadlock_account_lock(lock, current); | 379 | rt_mutex_deadlock_account_lock(lock, task); |
| 402 | 380 | ||
| 403 | return 1; | 381 | return 1; |
| 404 | } | 382 | } |
| @@ -436,6 +414,9 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, | |||
| 436 | 414 | ||
| 437 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); | 415 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
| 438 | 416 | ||
| 417 | if (!owner) | ||
| 418 | return 0; | ||
| 419 | |||
| 439 | if (waiter == rt_mutex_top_waiter(lock)) { | 420 | if (waiter == rt_mutex_top_waiter(lock)) { |
| 440 | raw_spin_lock_irqsave(&owner->pi_lock, flags); | 421 | raw_spin_lock_irqsave(&owner->pi_lock, flags); |
| 441 | plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters); | 422 | plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters); |
| @@ -472,21 +453,18 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, | |||
| 472 | /* | 453 | /* |
| 473 | * Wake up the next waiter on the lock. | 454 | * Wake up the next waiter on the lock. |
| 474 | * | 455 | * |
| 475 | * Remove the top waiter from the current tasks waiter list and from | 456 | * Remove the top waiter from the current tasks waiter list and wake it up. |
| 476 | * the lock waiter list. Set it as pending owner. Then wake it up. | ||
| 477 | * | 457 | * |
| 478 | * Called with lock->wait_lock held. | 458 | * Called with lock->wait_lock held. |
| 479 | */ | 459 | */ |
| 480 | static void wakeup_next_waiter(struct rt_mutex *lock) | 460 | static void wakeup_next_waiter(struct rt_mutex *lock) |
| 481 | { | 461 | { |
| 482 | struct rt_mutex_waiter *waiter; | 462 | struct rt_mutex_waiter *waiter; |
| 483 | struct task_struct *pendowner; | ||
| 484 | unsigned long flags; | 463 | unsigned long flags; |
| 485 | 464 | ||
| 486 | raw_spin_lock_irqsave(¤t->pi_lock, flags); | 465 | raw_spin_lock_irqsave(¤t->pi_lock, flags); |
| 487 | 466 | ||
| 488 | waiter = rt_mutex_top_waiter(lock); | 467 | waiter = rt_mutex_top_waiter(lock); |
| 489 | plist_del(&waiter->list_entry, &lock->wait_list); | ||
| 490 | 468 | ||
| 491 | /* | 469 | /* |
| 492 | * Remove it from current->pi_waiters. We do not adjust a | 470 | * Remove it from current->pi_waiters. We do not adjust a |
| @@ -495,43 +473,19 @@ static void wakeup_next_waiter(struct rt_mutex *lock) | |||
| 495 | * lock->wait_lock. | 473 | * lock->wait_lock. |
| 496 | */ | 474 | */ |
| 497 | plist_del(&waiter->pi_list_entry, ¤t->pi_waiters); | 475 | plist_del(&waiter->pi_list_entry, ¤t->pi_waiters); |
| 498 | pendowner = waiter->task; | ||
| 499 | waiter->task = NULL; | ||
| 500 | 476 | ||
| 501 | rt_mutex_set_owner(lock, pendowner, RT_MUTEX_OWNER_PENDING); | 477 | rt_mutex_set_owner(lock, NULL); |
| 502 | 478 | ||
| 503 | raw_spin_unlock_irqrestore(¤t->pi_lock, flags); | 479 | raw_spin_unlock_irqrestore(¤t->pi_lock, flags); |
| 504 | 480 | ||
| 505 | /* | 481 | wake_up_process(waiter->task); |
| 506 | * Clear the pi_blocked_on variable and enqueue a possible | ||
| 507 | * waiter into the pi_waiters list of the pending owner. This | ||
| 508 | * prevents that in case the pending owner gets unboosted a | ||
| 509 | * waiter with higher priority than pending-owner->normal_prio | ||
| 510 | * is blocked on the unboosted (pending) owner. | ||
| 511 | */ | ||
| 512 | raw_spin_lock_irqsave(&pendowner->pi_lock, flags); | ||
| 513 | |||
| 514 | WARN_ON(!pendowner->pi_blocked_on); | ||
| 515 | WARN_ON(pendowner->pi_blocked_on != waiter); | ||
| 516 | WARN_ON(pendowner->pi_blocked_on->lock != lock); | ||
| 517 | |||
| 518 | pendowner->pi_blocked_on = NULL; | ||
| 519 | |||
| 520 | if (rt_mutex_has_waiters(lock)) { | ||
| 521 | struct rt_mutex_waiter *next; | ||
| 522 | |||
| 523 | next = rt_mutex_top_waiter(lock); | ||
| 524 | plist_add(&next->pi_list_entry, &pendowner->pi_waiters); | ||
| 525 | } | ||
| 526 | raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags); | ||
| 527 | |||
| 528 | wake_up_process(pendowner); | ||
| 529 | } | 482 | } |
| 530 | 483 | ||
| 531 | /* | 484 | /* |
| 532 | * Remove a waiter from a lock | 485 | * Remove a waiter from a lock and give up |
| 533 | * | 486 | * |
| 534 | * Must be called with lock->wait_lock held | 487 | * Must be called with lock->wait_lock held and |
| 488 | * have just failed to try_to_take_rt_mutex(). | ||
| 535 | */ | 489 | */ |
| 536 | static void remove_waiter(struct rt_mutex *lock, | 490 | static void remove_waiter(struct rt_mutex *lock, |
| 537 | struct rt_mutex_waiter *waiter) | 491 | struct rt_mutex_waiter *waiter) |
| @@ -543,11 +497,13 @@ static void remove_waiter(struct rt_mutex *lock, | |||
| 543 | 497 | ||
| 544 | raw_spin_lock_irqsave(¤t->pi_lock, flags); | 498 | raw_spin_lock_irqsave(¤t->pi_lock, flags); |
| 545 | plist_del(&waiter->list_entry, &lock->wait_list); | 499 | plist_del(&waiter->list_entry, &lock->wait_list); |
| 546 | waiter->task = NULL; | ||
| 547 | current->pi_blocked_on = NULL; | 500 | current->pi_blocked_on = NULL; |
| 548 | raw_spin_unlock_irqrestore(¤t->pi_lock, flags); | 501 | raw_spin_unlock_irqrestore(¤t->pi_lock, flags); |
| 549 | 502 | ||
| 550 | if (first && owner != current) { | 503 | if (!owner) |
| 504 | return; | ||
| 505 | |||
| 506 | if (first) { | ||
| 551 | 507 | ||
| 552 | raw_spin_lock_irqsave(&owner->pi_lock, flags); | 508 | raw_spin_lock_irqsave(&owner->pi_lock, flags); |
| 553 | 509 | ||
| @@ -614,21 +570,19 @@ void rt_mutex_adjust_pi(struct task_struct *task) | |||
| 614 | * or TASK_UNINTERRUPTIBLE) | 570 | * or TASK_UNINTERRUPTIBLE) |
| 615 | * @timeout: the pre-initialized and started timer, or NULL for none | 571 | * @timeout: the pre-initialized and started timer, or NULL for none |
| 616 | * @waiter: the pre-initialized rt_mutex_waiter | 572 | * @waiter: the pre-initialized rt_mutex_waiter |
| 617 | * @detect_deadlock: passed to task_blocks_on_rt_mutex | ||
| 618 | * | 573 | * |
| 619 | * lock->wait_lock must be held by the caller. | 574 | * lock->wait_lock must be held by the caller. |
| 620 | */ | 575 | */ |
| 621 | static int __sched | 576 | static int __sched |
| 622 | __rt_mutex_slowlock(struct rt_mutex *lock, int state, | 577 | __rt_mutex_slowlock(struct rt_mutex *lock, int state, |
| 623 | struct hrtimer_sleeper *timeout, | 578 | struct hrtimer_sleeper *timeout, |
| 624 | struct rt_mutex_waiter *waiter, | 579 | struct rt_mutex_waiter *waiter) |
| 625 | int detect_deadlock) | ||
| 626 | { | 580 | { |
| 627 | int ret = 0; | 581 | int ret = 0; |
| 628 | 582 | ||
| 629 | for (;;) { | 583 | for (;;) { |
| 630 | /* Try to acquire the lock: */ | 584 | /* Try to acquire the lock: */ |
| 631 | if (try_to_take_rt_mutex(lock)) | 585 | if (try_to_take_rt_mutex(lock, current, waiter)) |
| 632 | break; | 586 | break; |
| 633 | 587 | ||
| 634 | /* | 588 | /* |
| @@ -645,39 +599,11 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state, | |||
| 645 | break; | 599 | break; |
| 646 | } | 600 | } |
| 647 | 601 | ||
| 648 | /* | ||
| 649 | * waiter->task is NULL the first time we come here and | ||
| 650 | * when we have been woken up by the previous owner | ||
| 651 | * but the lock got stolen by a higher prio task. | ||
| 652 | */ | ||
| 653 | if (!waiter->task) { | ||
| 654 | ret = task_blocks_on_rt_mutex(lock, waiter, current, | ||
| 655 | detect_deadlock); | ||
| 656 | /* | ||
| 657 | * If we got woken up by the owner then start loop | ||
| 658 | * all over without going into schedule to try | ||
| 659 | * to get the lock now: | ||
| 660 | */ | ||
| 661 | if (unlikely(!waiter->task)) { | ||
| 662 | /* | ||
| 663 | * Reset the return value. We might | ||
| 664 | * have returned with -EDEADLK and the | ||
| 665 | * owner released the lock while we | ||
| 666 | * were walking the pi chain. | ||
| 667 | */ | ||
| 668 | ret = 0; | ||
| 669 | continue; | ||
| 670 | } | ||
| 671 | if (unlikely(ret)) | ||
| 672 | break; | ||
| 673 | } | ||
| 674 | |||
| 675 | raw_spin_unlock(&lock->wait_lock); | 602 | raw_spin_unlock(&lock->wait_lock); |
| 676 | 603 | ||
| 677 | debug_rt_mutex_print_deadlock(waiter); | 604 | debug_rt_mutex_print_deadlock(waiter); |
| 678 | 605 | ||
| 679 | if (waiter->task) | 606 | schedule_rt_mutex(lock); |
| 680 | schedule_rt_mutex(lock); | ||
| 681 | 607 | ||
| 682 | raw_spin_lock(&lock->wait_lock); | 608 | raw_spin_lock(&lock->wait_lock); |
| 683 | set_current_state(state); | 609 | set_current_state(state); |
| @@ -698,12 +624,11 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, | |||
| 698 | int ret = 0; | 624 | int ret = 0; |
| 699 | 625 | ||
| 700 | debug_rt_mutex_init_waiter(&waiter); | 626 | debug_rt_mutex_init_waiter(&waiter); |
| 701 | waiter.task = NULL; | ||
| 702 | 627 | ||
| 703 | raw_spin_lock(&lock->wait_lock); | 628 | raw_spin_lock(&lock->wait_lock); |
| 704 | 629 | ||
| 705 | /* Try to acquire the lock again: */ | 630 | /* Try to acquire the lock again: */ |
| 706 | if (try_to_take_rt_mutex(lock)) { | 631 | if (try_to_take_rt_mutex(lock, current, NULL)) { |
| 707 | raw_spin_unlock(&lock->wait_lock); | 632 | raw_spin_unlock(&lock->wait_lock); |
| 708 | return 0; | 633 | return 0; |
| 709 | } | 634 | } |
| @@ -717,12 +642,14 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, | |||
| 717 | timeout->task = NULL; | 642 | timeout->task = NULL; |
| 718 | } | 643 | } |
| 719 | 644 | ||
| 720 | ret = __rt_mutex_slowlock(lock, state, timeout, &waiter, | 645 | ret = task_blocks_on_rt_mutex(lock, &waiter, current, detect_deadlock); |
| 721 | detect_deadlock); | 646 | |
| 647 | if (likely(!ret)) | ||
| 648 | ret = __rt_mutex_slowlock(lock, state, timeout, &waiter); | ||
| 722 | 649 | ||
| 723 | set_current_state(TASK_RUNNING); | 650 | set_current_state(TASK_RUNNING); |
| 724 | 651 | ||
| 725 | if (unlikely(waiter.task)) | 652 | if (unlikely(ret)) |
| 726 | remove_waiter(lock, &waiter); | 653 | remove_waiter(lock, &waiter); |
| 727 | 654 | ||
| 728 | /* | 655 | /* |
| @@ -737,14 +664,6 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, | |||
| 737 | if (unlikely(timeout)) | 664 | if (unlikely(timeout)) |
| 738 | hrtimer_cancel(&timeout->timer); | 665 | hrtimer_cancel(&timeout->timer); |
| 739 | 666 | ||
| 740 | /* | ||
| 741 | * Readjust priority, when we did not get the lock. We might | ||
| 742 | * have been the pending owner and boosted. Since we did not | ||
| 743 | * take the lock, the PI boost has to go. | ||
| 744 | */ | ||
| 745 | if (unlikely(ret)) | ||
| 746 | rt_mutex_adjust_prio(current); | ||
| 747 | |||
| 748 | debug_rt_mutex_free_waiter(&waiter); | 667 | debug_rt_mutex_free_waiter(&waiter); |
| 749 | 668 | ||
| 750 | return ret; | 669 | return ret; |
| @@ -762,7 +681,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lock) | |||
| 762 | 681 | ||
| 763 | if (likely(rt_mutex_owner(lock) != current)) { | 682 | if (likely(rt_mutex_owner(lock) != current)) { |
| 764 | 683 | ||
| 765 | ret = try_to_take_rt_mutex(lock); | 684 | ret = try_to_take_rt_mutex(lock, current, NULL); |
| 766 | /* | 685 | /* |
| 767 | * try_to_take_rt_mutex() sets the lock waiters | 686 | * try_to_take_rt_mutex() sets the lock waiters |
| 768 | * bit unconditionally. Clean this up. | 687 | * bit unconditionally. Clean this up. |
| @@ -992,7 +911,7 @@ void rt_mutex_init_proxy_locked(struct rt_mutex *lock, | |||
| 992 | { | 911 | { |
| 993 | __rt_mutex_init(lock, NULL); | 912 | __rt_mutex_init(lock, NULL); |
| 994 | debug_rt_mutex_proxy_lock(lock, proxy_owner); | 913 | debug_rt_mutex_proxy_lock(lock, proxy_owner); |
| 995 | rt_mutex_set_owner(lock, proxy_owner, 0); | 914 | rt_mutex_set_owner(lock, proxy_owner); |
| 996 | rt_mutex_deadlock_account_lock(lock, proxy_owner); | 915 | rt_mutex_deadlock_account_lock(lock, proxy_owner); |
| 997 | } | 916 | } |
| 998 | 917 | ||
| @@ -1008,7 +927,7 @@ void rt_mutex_proxy_unlock(struct rt_mutex *lock, | |||
| 1008 | struct task_struct *proxy_owner) | 927 | struct task_struct *proxy_owner) |
| 1009 | { | 928 | { |
| 1010 | debug_rt_mutex_proxy_unlock(lock); | 929 | debug_rt_mutex_proxy_unlock(lock); |
| 1011 | rt_mutex_set_owner(lock, NULL, 0); | 930 | rt_mutex_set_owner(lock, NULL); |
| 1012 | rt_mutex_deadlock_account_unlock(proxy_owner); | 931 | rt_mutex_deadlock_account_unlock(proxy_owner); |
| 1013 | } | 932 | } |
| 1014 | 933 | ||
| @@ -1034,20 +953,14 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock, | |||
| 1034 | 953 | ||
| 1035 | raw_spin_lock(&lock->wait_lock); | 954 | raw_spin_lock(&lock->wait_lock); |
| 1036 | 955 | ||
| 1037 | mark_rt_mutex_waiters(lock); | 956 | if (try_to_take_rt_mutex(lock, task, NULL)) { |
| 1038 | |||
| 1039 | if (!rt_mutex_owner(lock) || try_to_steal_lock(lock, task)) { | ||
| 1040 | /* We got the lock for task. */ | ||
| 1041 | debug_rt_mutex_lock(lock); | ||
| 1042 | rt_mutex_set_owner(lock, task, 0); | ||
| 1043 | raw_spin_unlock(&lock->wait_lock); | 957 | raw_spin_unlock(&lock->wait_lock); |
| 1044 | rt_mutex_deadlock_account_lock(lock, task); | ||
| 1045 | return 1; | 958 | return 1; |
| 1046 | } | 959 | } |
| 1047 | 960 | ||
| 1048 | ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock); | 961 | ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock); |
| 1049 | 962 | ||
| 1050 | if (ret && !waiter->task) { | 963 | if (ret && !rt_mutex_owner(lock)) { |
| 1051 | /* | 964 | /* |
| 1052 | * Reset the return value. We might have | 965 | * Reset the return value. We might have |
| 1053 | * returned with -EDEADLK and the owner | 966 | * returned with -EDEADLK and the owner |
| @@ -1056,6 +969,10 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock, | |||
| 1056 | */ | 969 | */ |
| 1057 | ret = 0; | 970 | ret = 0; |
| 1058 | } | 971 | } |
| 972 | |||
| 973 | if (unlikely(ret)) | ||
| 974 | remove_waiter(lock, waiter); | ||
| 975 | |||
| 1059 | raw_spin_unlock(&lock->wait_lock); | 976 | raw_spin_unlock(&lock->wait_lock); |
| 1060 | 977 | ||
| 1061 | debug_rt_mutex_print_deadlock(waiter); | 978 | debug_rt_mutex_print_deadlock(waiter); |
| @@ -1110,12 +1027,11 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock, | |||
| 1110 | 1027 | ||
| 1111 | set_current_state(TASK_INTERRUPTIBLE); | 1028 | set_current_state(TASK_INTERRUPTIBLE); |
| 1112 | 1029 | ||
| 1113 | ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter, | 1030 | ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter); |
| 1114 | detect_deadlock); | ||
| 1115 | 1031 | ||
| 1116 | set_current_state(TASK_RUNNING); | 1032 | set_current_state(TASK_RUNNING); |
| 1117 | 1033 | ||
| 1118 | if (unlikely(waiter->task)) | 1034 | if (unlikely(ret)) |
| 1119 | remove_waiter(lock, waiter); | 1035 | remove_waiter(lock, waiter); |
| 1120 | 1036 | ||
| 1121 | /* | 1037 | /* |
| @@ -1126,13 +1042,5 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock, | |||
| 1126 | 1042 | ||
| 1127 | raw_spin_unlock(&lock->wait_lock); | 1043 | raw_spin_unlock(&lock->wait_lock); |
| 1128 | 1044 | ||
| 1129 | /* | ||
| 1130 | * Readjust priority, when we did not get the lock. We might have been | ||
| 1131 | * the pending owner and boosted. Since we did not take the lock, the | ||
| 1132 | * PI boost has to go. | ||
| 1133 | */ | ||
| 1134 | if (unlikely(ret)) | ||
| 1135 | rt_mutex_adjust_prio(current); | ||
| 1136 | |||
| 1137 | return ret; | 1045 | return ret; |
| 1138 | } | 1046 | } |
