diff options
author | Andi Kleen <ak@suse.de> | 2007-10-15 11:00:14 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-10-15 11:00:14 -0400 |
commit | 8cbbe86dfcfd68ad69916164bdc838d9e09adca8 (patch) | |
tree | 32e949b7a0fac548d7668f01f5d53b727c885502 /kernel | |
parent | 3a5c359a58c39801d838c508f127bdb228af28b0 (diff) |
sched: cleanup: refactor common code of sleep_on / wait_for_completion
Refactor common code of sleep_on / wait_for_completion
These functions were largely cut'n'pasted. This moves
the common code into single helpers instead. Advantage
is about 1k less code on x86-64 and 91 lines of code removed.
It adds one function call to the non timeout version of
the functions; i don't expect this to be measurable.
Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 188 |
1 files changed, 49 insertions, 139 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 4c15b1726196..db88b5655aca 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -3697,206 +3697,116 @@ void fastcall complete_all(struct completion *x) | |||
3697 | } | 3697 | } |
3698 | EXPORT_SYMBOL(complete_all); | 3698 | EXPORT_SYMBOL(complete_all); |
3699 | 3699 | ||
3700 | void fastcall __sched wait_for_completion(struct completion *x) | 3700 | static inline long __sched |
3701 | do_wait_for_common(struct completion *x, long timeout, int state) | ||
3701 | { | 3702 | { |
3702 | might_sleep(); | ||
3703 | |||
3704 | spin_lock_irq(&x->wait.lock); | ||
3705 | if (!x->done) { | 3703 | if (!x->done) { |
3706 | DECLARE_WAITQUEUE(wait, current); | 3704 | DECLARE_WAITQUEUE(wait, current); |
3707 | 3705 | ||
3708 | wait.flags |= WQ_FLAG_EXCLUSIVE; | 3706 | wait.flags |= WQ_FLAG_EXCLUSIVE; |
3709 | __add_wait_queue_tail(&x->wait, &wait); | 3707 | __add_wait_queue_tail(&x->wait, &wait); |
3710 | do { | 3708 | do { |
3711 | __set_current_state(TASK_UNINTERRUPTIBLE); | 3709 | if (state == TASK_INTERRUPTIBLE && |
3712 | spin_unlock_irq(&x->wait.lock); | 3710 | signal_pending(current)) { |
3713 | schedule(); | 3711 | __remove_wait_queue(&x->wait, &wait); |
3714 | spin_lock_irq(&x->wait.lock); | 3712 | return -ERESTARTSYS; |
3715 | } while (!x->done); | 3713 | } |
3716 | __remove_wait_queue(&x->wait, &wait); | 3714 | __set_current_state(state); |
3717 | } | ||
3718 | x->done--; | ||
3719 | spin_unlock_irq(&x->wait.lock); | ||
3720 | } | ||
3721 | EXPORT_SYMBOL(wait_for_completion); | ||
3722 | |||
3723 | unsigned long fastcall __sched | ||
3724 | wait_for_completion_timeout(struct completion *x, unsigned long timeout) | ||
3725 | { | ||
3726 | might_sleep(); | ||
3727 | |||
3728 | spin_lock_irq(&x->wait.lock); | ||
3729 | if (!x->done) { | ||
3730 | DECLARE_WAITQUEUE(wait, current); | ||
3731 | |||
3732 | wait.flags |= WQ_FLAG_EXCLUSIVE; | ||
3733 | __add_wait_queue_tail(&x->wait, &wait); | ||
3734 | do { | ||
3735 | __set_current_state(TASK_UNINTERRUPTIBLE); | ||
3736 | spin_unlock_irq(&x->wait.lock); | 3715 | spin_unlock_irq(&x->wait.lock); |
3737 | timeout = schedule_timeout(timeout); | 3716 | timeout = schedule_timeout(timeout); |
3738 | spin_lock_irq(&x->wait.lock); | 3717 | spin_lock_irq(&x->wait.lock); |
3739 | if (!timeout) { | 3718 | if (!timeout) { |
3740 | __remove_wait_queue(&x->wait, &wait); | 3719 | __remove_wait_queue(&x->wait, &wait); |
3741 | goto out; | 3720 | return timeout; |
3742 | } | 3721 | } |
3743 | } while (!x->done); | 3722 | } while (!x->done); |
3744 | __remove_wait_queue(&x->wait, &wait); | 3723 | __remove_wait_queue(&x->wait, &wait); |
3745 | } | 3724 | } |
3746 | x->done--; | 3725 | x->done--; |
3747 | out: | ||
3748 | spin_unlock_irq(&x->wait.lock); | ||
3749 | return timeout; | 3726 | return timeout; |
3750 | } | 3727 | } |
3751 | EXPORT_SYMBOL(wait_for_completion_timeout); | ||
3752 | 3728 | ||
3753 | int fastcall __sched wait_for_completion_interruptible(struct completion *x) | 3729 | static long __sched |
3730 | wait_for_common(struct completion *x, long timeout, int state) | ||
3754 | { | 3731 | { |
3755 | int ret = 0; | ||
3756 | |||
3757 | might_sleep(); | 3732 | might_sleep(); |
3758 | 3733 | ||
3759 | spin_lock_irq(&x->wait.lock); | 3734 | spin_lock_irq(&x->wait.lock); |
3760 | if (!x->done) { | 3735 | timeout = do_wait_for_common(x, timeout, state); |
3761 | DECLARE_WAITQUEUE(wait, current); | ||
3762 | |||
3763 | wait.flags |= WQ_FLAG_EXCLUSIVE; | ||
3764 | __add_wait_queue_tail(&x->wait, &wait); | ||
3765 | do { | ||
3766 | if (signal_pending(current)) { | ||
3767 | ret = -ERESTARTSYS; | ||
3768 | __remove_wait_queue(&x->wait, &wait); | ||
3769 | goto out; | ||
3770 | } | ||
3771 | __set_current_state(TASK_INTERRUPTIBLE); | ||
3772 | spin_unlock_irq(&x->wait.lock); | ||
3773 | schedule(); | ||
3774 | spin_lock_irq(&x->wait.lock); | ||
3775 | } while (!x->done); | ||
3776 | __remove_wait_queue(&x->wait, &wait); | ||
3777 | } | ||
3778 | x->done--; | ||
3779 | out: | ||
3780 | spin_unlock_irq(&x->wait.lock); | 3736 | spin_unlock_irq(&x->wait.lock); |
3737 | return timeout; | ||
3738 | } | ||
3781 | 3739 | ||
3782 | return ret; | 3740 | void fastcall __sched wait_for_completion(struct completion *x) |
3741 | { | ||
3742 | wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE); | ||
3783 | } | 3743 | } |
3784 | EXPORT_SYMBOL(wait_for_completion_interruptible); | 3744 | EXPORT_SYMBOL(wait_for_completion); |
3785 | 3745 | ||
3786 | unsigned long fastcall __sched | 3746 | unsigned long fastcall __sched |
3787 | wait_for_completion_interruptible_timeout(struct completion *x, | 3747 | wait_for_completion_timeout(struct completion *x, unsigned long timeout) |
3788 | unsigned long timeout) | ||
3789 | { | 3748 | { |
3790 | might_sleep(); | 3749 | return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE); |
3791 | |||
3792 | spin_lock_irq(&x->wait.lock); | ||
3793 | if (!x->done) { | ||
3794 | DECLARE_WAITQUEUE(wait, current); | ||
3795 | |||
3796 | wait.flags |= WQ_FLAG_EXCLUSIVE; | ||
3797 | __add_wait_queue_tail(&x->wait, &wait); | ||
3798 | do { | ||
3799 | if (signal_pending(current)) { | ||
3800 | timeout = -ERESTARTSYS; | ||
3801 | __remove_wait_queue(&x->wait, &wait); | ||
3802 | goto out; | ||
3803 | } | ||
3804 | __set_current_state(TASK_INTERRUPTIBLE); | ||
3805 | spin_unlock_irq(&x->wait.lock); | ||
3806 | timeout = schedule_timeout(timeout); | ||
3807 | spin_lock_irq(&x->wait.lock); | ||
3808 | if (!timeout) { | ||
3809 | __remove_wait_queue(&x->wait, &wait); | ||
3810 | goto out; | ||
3811 | } | ||
3812 | } while (!x->done); | ||
3813 | __remove_wait_queue(&x->wait, &wait); | ||
3814 | } | ||
3815 | x->done--; | ||
3816 | out: | ||
3817 | spin_unlock_irq(&x->wait.lock); | ||
3818 | return timeout; | ||
3819 | } | 3750 | } |
3820 | EXPORT_SYMBOL(wait_for_completion_interruptible_timeout); | 3751 | EXPORT_SYMBOL(wait_for_completion_timeout); |
3821 | 3752 | ||
3822 | static inline void | 3753 | int __sched wait_for_completion_interruptible(struct completion *x) |
3823 | sleep_on_head(wait_queue_head_t *q, wait_queue_t *wait, unsigned long *flags) | ||
3824 | { | 3754 | { |
3825 | spin_lock_irqsave(&q->lock, *flags); | 3755 | return wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE); |
3826 | __add_wait_queue(q, wait); | ||
3827 | spin_unlock(&q->lock); | ||
3828 | } | 3756 | } |
3757 | EXPORT_SYMBOL(wait_for_completion_interruptible); | ||
3829 | 3758 | ||
3830 | static inline void | 3759 | unsigned long fastcall __sched |
3831 | sleep_on_tail(wait_queue_head_t *q, wait_queue_t *wait, unsigned long *flags) | 3760 | wait_for_completion_interruptible_timeout(struct completion *x, |
3761 | unsigned long timeout) | ||
3832 | { | 3762 | { |
3833 | spin_lock_irq(&q->lock); | 3763 | return wait_for_common(x, timeout, TASK_INTERRUPTIBLE); |
3834 | __remove_wait_queue(q, wait); | ||
3835 | spin_unlock_irqrestore(&q->lock, *flags); | ||
3836 | } | 3764 | } |
3765 | EXPORT_SYMBOL(wait_for_completion_interruptible_timeout); | ||
3837 | 3766 | ||
3838 | void __sched interruptible_sleep_on(wait_queue_head_t *q) | 3767 | static long __sched |
3768 | sleep_on_common(wait_queue_head_t *q, int state, long timeout) | ||
3839 | { | 3769 | { |
3840 | unsigned long flags; | 3770 | unsigned long flags; |
3841 | wait_queue_t wait; | 3771 | wait_queue_t wait; |
3842 | 3772 | ||
3843 | init_waitqueue_entry(&wait, current); | 3773 | init_waitqueue_entry(&wait, current); |
3844 | 3774 | ||
3845 | current->state = TASK_INTERRUPTIBLE; | 3775 | __set_current_state(state); |
3846 | 3776 | ||
3847 | sleep_on_head(q, &wait, &flags); | 3777 | spin_lock_irqsave(&q->lock, flags); |
3848 | schedule(); | 3778 | __add_wait_queue(q, &wait); |
3849 | sleep_on_tail(q, &wait, &flags); | 3779 | spin_unlock(&q->lock); |
3780 | timeout = schedule_timeout(timeout); | ||
3781 | spin_lock_irq(&q->lock); | ||
3782 | __remove_wait_queue(q, &wait); | ||
3783 | spin_unlock_irqrestore(&q->lock, flags); | ||
3784 | |||
3785 | return timeout; | ||
3786 | } | ||
3787 | |||
3788 | void __sched interruptible_sleep_on(wait_queue_head_t *q) | ||
3789 | { | ||
3790 | sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); | ||
3850 | } | 3791 | } |
3851 | EXPORT_SYMBOL(interruptible_sleep_on); | 3792 | EXPORT_SYMBOL(interruptible_sleep_on); |
3852 | 3793 | ||
3853 | long __sched | 3794 | long __sched |
3854 | interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout) | 3795 | interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout) |
3855 | { | 3796 | { |
3856 | unsigned long flags; | 3797 | return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout); |
3857 | wait_queue_t wait; | ||
3858 | |||
3859 | init_waitqueue_entry(&wait, current); | ||
3860 | |||
3861 | current->state = TASK_INTERRUPTIBLE; | ||
3862 | |||
3863 | sleep_on_head(q, &wait, &flags); | ||
3864 | timeout = schedule_timeout(timeout); | ||
3865 | sleep_on_tail(q, &wait, &flags); | ||
3866 | |||
3867 | return timeout; | ||
3868 | } | 3798 | } |
3869 | EXPORT_SYMBOL(interruptible_sleep_on_timeout); | 3799 | EXPORT_SYMBOL(interruptible_sleep_on_timeout); |
3870 | 3800 | ||
3871 | void __sched sleep_on(wait_queue_head_t *q) | 3801 | void __sched sleep_on(wait_queue_head_t *q) |
3872 | { | 3802 | { |
3873 | unsigned long flags; | 3803 | sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); |
3874 | wait_queue_t wait; | ||
3875 | |||
3876 | init_waitqueue_entry(&wait, current); | ||
3877 | |||
3878 | current->state = TASK_UNINTERRUPTIBLE; | ||
3879 | |||
3880 | sleep_on_head(q, &wait, &flags); | ||
3881 | schedule(); | ||
3882 | sleep_on_tail(q, &wait, &flags); | ||
3883 | } | 3804 | } |
3884 | EXPORT_SYMBOL(sleep_on); | 3805 | EXPORT_SYMBOL(sleep_on); |
3885 | 3806 | ||
3886 | long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout) | 3807 | long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout) |
3887 | { | 3808 | { |
3888 | unsigned long flags; | 3809 | return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout); |
3889 | wait_queue_t wait; | ||
3890 | |||
3891 | init_waitqueue_entry(&wait, current); | ||
3892 | |||
3893 | current->state = TASK_UNINTERRUPTIBLE; | ||
3894 | |||
3895 | sleep_on_head(q, &wait, &flags); | ||
3896 | timeout = schedule_timeout(timeout); | ||
3897 | sleep_on_tail(q, &wait, &flags); | ||
3898 | |||
3899 | return timeout; | ||
3900 | } | 3810 | } |
3901 | EXPORT_SYMBOL(sleep_on_timeout); | 3811 | EXPORT_SYMBOL(sleep_on_timeout); |
3902 | 3812 | ||