diff options
Diffstat (limited to 'kernel/rtmutex.c')
| -rw-r--r-- | kernel/rtmutex.c | 57 |
1 files changed, 28 insertions, 29 deletions
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c index 45d61016da57..d2ef13b485e7 100644 --- a/kernel/rtmutex.c +++ b/kernel/rtmutex.c | |||
| @@ -157,12 +157,11 @@ int max_lock_depth = 1024; | |||
| 157 | * Decreases task's usage by one - may thus free the task. | 157 | * Decreases task's usage by one - may thus free the task. |
| 158 | * Returns 0 or -EDEADLK. | 158 | * Returns 0 or -EDEADLK. |
| 159 | */ | 159 | */ |
| 160 | static int rt_mutex_adjust_prio_chain(task_t *task, | 160 | static int rt_mutex_adjust_prio_chain(struct task_struct *task, |
| 161 | int deadlock_detect, | 161 | int deadlock_detect, |
| 162 | struct rt_mutex *orig_lock, | 162 | struct rt_mutex *orig_lock, |
| 163 | struct rt_mutex_waiter *orig_waiter, | 163 | struct rt_mutex_waiter *orig_waiter, |
| 164 | struct task_struct *top_task | 164 | struct task_struct *top_task) |
| 165 | __IP_DECL__) | ||
| 166 | { | 165 | { |
| 167 | struct rt_mutex *lock; | 166 | struct rt_mutex *lock; |
| 168 | struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter; | 167 | struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter; |
| @@ -283,6 +282,7 @@ static int rt_mutex_adjust_prio_chain(task_t *task, | |||
| 283 | spin_unlock_irqrestore(&task->pi_lock, flags); | 282 | spin_unlock_irqrestore(&task->pi_lock, flags); |
| 284 | out_put_task: | 283 | out_put_task: |
| 285 | put_task_struct(task); | 284 | put_task_struct(task); |
| 285 | |||
| 286 | return ret; | 286 | return ret; |
| 287 | } | 287 | } |
| 288 | 288 | ||
| @@ -357,7 +357,7 @@ static inline int try_to_steal_lock(struct rt_mutex *lock) | |||
| 357 | * | 357 | * |
| 358 | * Must be called with lock->wait_lock held. | 358 | * Must be called with lock->wait_lock held. |
| 359 | */ | 359 | */ |
| 360 | static int try_to_take_rt_mutex(struct rt_mutex *lock __IP_DECL__) | 360 | static int try_to_take_rt_mutex(struct rt_mutex *lock) |
| 361 | { | 361 | { |
| 362 | /* | 362 | /* |
| 363 | * We have to be careful here if the atomic speedups are | 363 | * We have to be careful here if the atomic speedups are |
| @@ -384,7 +384,7 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock __IP_DECL__) | |||
| 384 | return 0; | 384 | return 0; |
| 385 | 385 | ||
| 386 | /* We got the lock. */ | 386 | /* We got the lock. */ |
| 387 | debug_rt_mutex_lock(lock __IP__); | 387 | debug_rt_mutex_lock(lock); |
| 388 | 388 | ||
| 389 | rt_mutex_set_owner(lock, current, 0); | 389 | rt_mutex_set_owner(lock, current, 0); |
| 390 | 390 | ||
| @@ -402,13 +402,12 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock __IP_DECL__) | |||
| 402 | */ | 402 | */ |
| 403 | static int task_blocks_on_rt_mutex(struct rt_mutex *lock, | 403 | static int task_blocks_on_rt_mutex(struct rt_mutex *lock, |
| 404 | struct rt_mutex_waiter *waiter, | 404 | struct rt_mutex_waiter *waiter, |
| 405 | int detect_deadlock | 405 | int detect_deadlock) |
| 406 | __IP_DECL__) | ||
| 407 | { | 406 | { |
| 407 | struct task_struct *owner = rt_mutex_owner(lock); | ||
| 408 | struct rt_mutex_waiter *top_waiter = waiter; | 408 | struct rt_mutex_waiter *top_waiter = waiter; |
| 409 | task_t *owner = rt_mutex_owner(lock); | ||
| 410 | int boost = 0, res; | ||
| 411 | unsigned long flags; | 409 | unsigned long flags; |
| 410 | int boost = 0, res; | ||
| 412 | 411 | ||
| 413 | spin_lock_irqsave(¤t->pi_lock, flags); | 412 | spin_lock_irqsave(¤t->pi_lock, flags); |
| 414 | __rt_mutex_adjust_prio(current); | 413 | __rt_mutex_adjust_prio(current); |
| @@ -454,7 +453,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, | |||
| 454 | spin_unlock(&lock->wait_lock); | 453 | spin_unlock(&lock->wait_lock); |
| 455 | 454 | ||
| 456 | res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter, | 455 | res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter, |
| 457 | current __IP__); | 456 | current); |
| 458 | 457 | ||
| 459 | spin_lock(&lock->wait_lock); | 458 | spin_lock(&lock->wait_lock); |
| 460 | 459 | ||
| @@ -526,12 +525,12 @@ static void wakeup_next_waiter(struct rt_mutex *lock) | |||
| 526 | * Must be called with lock->wait_lock held | 525 | * Must be called with lock->wait_lock held |
| 527 | */ | 526 | */ |
| 528 | static void remove_waiter(struct rt_mutex *lock, | 527 | static void remove_waiter(struct rt_mutex *lock, |
| 529 | struct rt_mutex_waiter *waiter __IP_DECL__) | 528 | struct rt_mutex_waiter *waiter) |
| 530 | { | 529 | { |
| 531 | int first = (waiter == rt_mutex_top_waiter(lock)); | 530 | int first = (waiter == rt_mutex_top_waiter(lock)); |
| 532 | int boost = 0; | 531 | struct task_struct *owner = rt_mutex_owner(lock); |
| 533 | task_t *owner = rt_mutex_owner(lock); | ||
| 534 | unsigned long flags; | 532 | unsigned long flags; |
| 533 | int boost = 0; | ||
| 535 | 534 | ||
| 536 | spin_lock_irqsave(¤t->pi_lock, flags); | 535 | spin_lock_irqsave(¤t->pi_lock, flags); |
| 537 | plist_del(&waiter->list_entry, &lock->wait_list); | 536 | plist_del(&waiter->list_entry, &lock->wait_list); |
| @@ -568,7 +567,7 @@ static void remove_waiter(struct rt_mutex *lock, | |||
| 568 | 567 | ||
| 569 | spin_unlock(&lock->wait_lock); | 568 | spin_unlock(&lock->wait_lock); |
| 570 | 569 | ||
| 571 | rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current __IP__); | 570 | rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current); |
| 572 | 571 | ||
| 573 | spin_lock(&lock->wait_lock); | 572 | spin_lock(&lock->wait_lock); |
| 574 | } | 573 | } |
| @@ -595,7 +594,7 @@ void rt_mutex_adjust_pi(struct task_struct *task) | |||
| 595 | get_task_struct(task); | 594 | get_task_struct(task); |
| 596 | spin_unlock_irqrestore(&task->pi_lock, flags); | 595 | spin_unlock_irqrestore(&task->pi_lock, flags); |
| 597 | 596 | ||
| 598 | rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task __RET_IP__); | 597 | rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task); |
| 599 | } | 598 | } |
| 600 | 599 | ||
| 601 | /* | 600 | /* |
| @@ -604,7 +603,7 @@ void rt_mutex_adjust_pi(struct task_struct *task) | |||
| 604 | static int __sched | 603 | static int __sched |
| 605 | rt_mutex_slowlock(struct rt_mutex *lock, int state, | 604 | rt_mutex_slowlock(struct rt_mutex *lock, int state, |
| 606 | struct hrtimer_sleeper *timeout, | 605 | struct hrtimer_sleeper *timeout, |
| 607 | int detect_deadlock __IP_DECL__) | 606 | int detect_deadlock) |
| 608 | { | 607 | { |
| 609 | struct rt_mutex_waiter waiter; | 608 | struct rt_mutex_waiter waiter; |
| 610 | int ret = 0; | 609 | int ret = 0; |
| @@ -615,7 +614,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, | |||
| 615 | spin_lock(&lock->wait_lock); | 614 | spin_lock(&lock->wait_lock); |
| 616 | 615 | ||
| 617 | /* Try to acquire the lock again: */ | 616 | /* Try to acquire the lock again: */ |
| 618 | if (try_to_take_rt_mutex(lock __IP__)) { | 617 | if (try_to_take_rt_mutex(lock)) { |
| 619 | spin_unlock(&lock->wait_lock); | 618 | spin_unlock(&lock->wait_lock); |
| 620 | return 0; | 619 | return 0; |
| 621 | } | 620 | } |
| @@ -629,7 +628,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, | |||
| 629 | 628 | ||
| 630 | for (;;) { | 629 | for (;;) { |
| 631 | /* Try to acquire the lock: */ | 630 | /* Try to acquire the lock: */ |
| 632 | if (try_to_take_rt_mutex(lock __IP__)) | 631 | if (try_to_take_rt_mutex(lock)) |
| 633 | break; | 632 | break; |
| 634 | 633 | ||
| 635 | /* | 634 | /* |
| @@ -653,7 +652,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, | |||
| 653 | */ | 652 | */ |
| 654 | if (!waiter.task) { | 653 | if (!waiter.task) { |
| 655 | ret = task_blocks_on_rt_mutex(lock, &waiter, | 654 | ret = task_blocks_on_rt_mutex(lock, &waiter, |
| 656 | detect_deadlock __IP__); | 655 | detect_deadlock); |
| 657 | /* | 656 | /* |
| 658 | * If we got woken up by the owner then start loop | 657 | * If we got woken up by the owner then start loop |
| 659 | * all over without going into schedule to try | 658 | * all over without going into schedule to try |
| @@ -680,7 +679,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, | |||
| 680 | set_current_state(TASK_RUNNING); | 679 | set_current_state(TASK_RUNNING); |
| 681 | 680 | ||
| 682 | if (unlikely(waiter.task)) | 681 | if (unlikely(waiter.task)) |
| 683 | remove_waiter(lock, &waiter __IP__); | 682 | remove_waiter(lock, &waiter); |
| 684 | 683 | ||
| 685 | /* | 684 | /* |
| 686 | * try_to_take_rt_mutex() sets the waiter bit | 685 | * try_to_take_rt_mutex() sets the waiter bit |
| @@ -711,7 +710,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, | |||
| 711 | * Slow path try-lock function: | 710 | * Slow path try-lock function: |
| 712 | */ | 711 | */ |
| 713 | static inline int | 712 | static inline int |
| 714 | rt_mutex_slowtrylock(struct rt_mutex *lock __IP_DECL__) | 713 | rt_mutex_slowtrylock(struct rt_mutex *lock) |
| 715 | { | 714 | { |
| 716 | int ret = 0; | 715 | int ret = 0; |
| 717 | 716 | ||
| @@ -719,7 +718,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lock __IP_DECL__) | |||
| 719 | 718 | ||
| 720 | if (likely(rt_mutex_owner(lock) != current)) { | 719 | if (likely(rt_mutex_owner(lock) != current)) { |
| 721 | 720 | ||
| 722 | ret = try_to_take_rt_mutex(lock __IP__); | 721 | ret = try_to_take_rt_mutex(lock); |
| 723 | /* | 722 | /* |
| 724 | * try_to_take_rt_mutex() sets the lock waiters | 723 | * try_to_take_rt_mutex() sets the lock waiters |
| 725 | * bit unconditionally. Clean this up. | 724 | * bit unconditionally. Clean this up. |
| @@ -769,13 +768,13 @@ rt_mutex_fastlock(struct rt_mutex *lock, int state, | |||
| 769 | int detect_deadlock, | 768 | int detect_deadlock, |
| 770 | int (*slowfn)(struct rt_mutex *lock, int state, | 769 | int (*slowfn)(struct rt_mutex *lock, int state, |
| 771 | struct hrtimer_sleeper *timeout, | 770 | struct hrtimer_sleeper *timeout, |
| 772 | int detect_deadlock __IP_DECL__)) | 771 | int detect_deadlock)) |
| 773 | { | 772 | { |
| 774 | if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) { | 773 | if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) { |
| 775 | rt_mutex_deadlock_account_lock(lock, current); | 774 | rt_mutex_deadlock_account_lock(lock, current); |
| 776 | return 0; | 775 | return 0; |
| 777 | } else | 776 | } else |
| 778 | return slowfn(lock, state, NULL, detect_deadlock __RET_IP__); | 777 | return slowfn(lock, state, NULL, detect_deadlock); |
| 779 | } | 778 | } |
| 780 | 779 | ||
| 781 | static inline int | 780 | static inline int |
| @@ -783,24 +782,24 @@ rt_mutex_timed_fastlock(struct rt_mutex *lock, int state, | |||
| 783 | struct hrtimer_sleeper *timeout, int detect_deadlock, | 782 | struct hrtimer_sleeper *timeout, int detect_deadlock, |
| 784 | int (*slowfn)(struct rt_mutex *lock, int state, | 783 | int (*slowfn)(struct rt_mutex *lock, int state, |
| 785 | struct hrtimer_sleeper *timeout, | 784 | struct hrtimer_sleeper *timeout, |
| 786 | int detect_deadlock __IP_DECL__)) | 785 | int detect_deadlock)) |
| 787 | { | 786 | { |
| 788 | if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) { | 787 | if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) { |
| 789 | rt_mutex_deadlock_account_lock(lock, current); | 788 | rt_mutex_deadlock_account_lock(lock, current); |
| 790 | return 0; | 789 | return 0; |
| 791 | } else | 790 | } else |
| 792 | return slowfn(lock, state, timeout, detect_deadlock __RET_IP__); | 791 | return slowfn(lock, state, timeout, detect_deadlock); |
| 793 | } | 792 | } |
| 794 | 793 | ||
| 795 | static inline int | 794 | static inline int |
| 796 | rt_mutex_fasttrylock(struct rt_mutex *lock, | 795 | rt_mutex_fasttrylock(struct rt_mutex *lock, |
| 797 | int (*slowfn)(struct rt_mutex *lock __IP_DECL__)) | 796 | int (*slowfn)(struct rt_mutex *lock)) |
| 798 | { | 797 | { |
| 799 | if (likely(rt_mutex_cmpxchg(lock, NULL, current))) { | 798 | if (likely(rt_mutex_cmpxchg(lock, NULL, current))) { |
| 800 | rt_mutex_deadlock_account_lock(lock, current); | 799 | rt_mutex_deadlock_account_lock(lock, current); |
| 801 | return 1; | 800 | return 1; |
| 802 | } | 801 | } |
| 803 | return slowfn(lock __RET_IP__); | 802 | return slowfn(lock); |
| 804 | } | 803 | } |
| 805 | 804 | ||
| 806 | static inline void | 805 | static inline void |
| @@ -948,7 +947,7 @@ void rt_mutex_init_proxy_locked(struct rt_mutex *lock, | |||
| 948 | struct task_struct *proxy_owner) | 947 | struct task_struct *proxy_owner) |
| 949 | { | 948 | { |
| 950 | __rt_mutex_init(lock, NULL); | 949 | __rt_mutex_init(lock, NULL); |
| 951 | debug_rt_mutex_proxy_lock(lock, proxy_owner __RET_IP__); | 950 | debug_rt_mutex_proxy_lock(lock, proxy_owner); |
| 952 | rt_mutex_set_owner(lock, proxy_owner, 0); | 951 | rt_mutex_set_owner(lock, proxy_owner, 0); |
| 953 | rt_mutex_deadlock_account_lock(lock, proxy_owner); | 952 | rt_mutex_deadlock_account_lock(lock, proxy_owner); |
| 954 | } | 953 | } |
