aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorAndi Kleen <ak@suse.de>2007-10-15 11:00:14 -0400
committerIngo Molnar <mingo@elte.hu>2007-10-15 11:00:14 -0400
commit8cbbe86dfcfd68ad69916164bdc838d9e09adca8 (patch)
tree32e949b7a0fac548d7668f01f5d53b727c885502 /kernel
parent3a5c359a58c39801d838c508f127bdb228af28b0 (diff)
sched: cleanup: refactor common code of sleep_on / wait_for_completion
Refactor common code of sleep_on / wait_for_completion These functions were largely cut'n'pasted. This moves the common code into single helpers instead. Advantage is about 1k less code on x86-64 and 91 lines of code removed. It adds one function call to the non timeout version of the functions; i don't expect this to be measurable. Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c188
1 files changed, 49 insertions, 139 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 4c15b1726196..db88b5655aca 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3697,206 +3697,116 @@ void fastcall complete_all(struct completion *x)
3697} 3697}
3698EXPORT_SYMBOL(complete_all); 3698EXPORT_SYMBOL(complete_all);
3699 3699
3700void fastcall __sched wait_for_completion(struct completion *x) 3700static inline long __sched
3701do_wait_for_common(struct completion *x, long timeout, int state)
3701{ 3702{
3702 might_sleep();
3703
3704 spin_lock_irq(&x->wait.lock);
3705 if (!x->done) { 3703 if (!x->done) {
3706 DECLARE_WAITQUEUE(wait, current); 3704 DECLARE_WAITQUEUE(wait, current);
3707 3705
3708 wait.flags |= WQ_FLAG_EXCLUSIVE; 3706 wait.flags |= WQ_FLAG_EXCLUSIVE;
3709 __add_wait_queue_tail(&x->wait, &wait); 3707 __add_wait_queue_tail(&x->wait, &wait);
3710 do { 3708 do {
3711 __set_current_state(TASK_UNINTERRUPTIBLE); 3709 if (state == TASK_INTERRUPTIBLE &&
3712 spin_unlock_irq(&x->wait.lock); 3710 signal_pending(current)) {
3713 schedule(); 3711 __remove_wait_queue(&x->wait, &wait);
3714 spin_lock_irq(&x->wait.lock); 3712 return -ERESTARTSYS;
3715 } while (!x->done); 3713 }
3716 __remove_wait_queue(&x->wait, &wait); 3714 __set_current_state(state);
3717 }
3718 x->done--;
3719 spin_unlock_irq(&x->wait.lock);
3720}
3721EXPORT_SYMBOL(wait_for_completion);
3722
3723unsigned long fastcall __sched
3724wait_for_completion_timeout(struct completion *x, unsigned long timeout)
3725{
3726 might_sleep();
3727
3728 spin_lock_irq(&x->wait.lock);
3729 if (!x->done) {
3730 DECLARE_WAITQUEUE(wait, current);
3731
3732 wait.flags |= WQ_FLAG_EXCLUSIVE;
3733 __add_wait_queue_tail(&x->wait, &wait);
3734 do {
3735 __set_current_state(TASK_UNINTERRUPTIBLE);
3736 spin_unlock_irq(&x->wait.lock); 3715 spin_unlock_irq(&x->wait.lock);
3737 timeout = schedule_timeout(timeout); 3716 timeout = schedule_timeout(timeout);
3738 spin_lock_irq(&x->wait.lock); 3717 spin_lock_irq(&x->wait.lock);
3739 if (!timeout) { 3718 if (!timeout) {
3740 __remove_wait_queue(&x->wait, &wait); 3719 __remove_wait_queue(&x->wait, &wait);
3741 goto out; 3720 return timeout;
3742 } 3721 }
3743 } while (!x->done); 3722 } while (!x->done);
3744 __remove_wait_queue(&x->wait, &wait); 3723 __remove_wait_queue(&x->wait, &wait);
3745 } 3724 }
3746 x->done--; 3725 x->done--;
3747out:
3748 spin_unlock_irq(&x->wait.lock);
3749 return timeout; 3726 return timeout;
3750} 3727}
3751EXPORT_SYMBOL(wait_for_completion_timeout);
3752 3728
3753int fastcall __sched wait_for_completion_interruptible(struct completion *x) 3729static long __sched
3730wait_for_common(struct completion *x, long timeout, int state)
3754{ 3731{
3755 int ret = 0;
3756
3757 might_sleep(); 3732 might_sleep();
3758 3733
3759 spin_lock_irq(&x->wait.lock); 3734 spin_lock_irq(&x->wait.lock);
3760 if (!x->done) { 3735 timeout = do_wait_for_common(x, timeout, state);
3761 DECLARE_WAITQUEUE(wait, current);
3762
3763 wait.flags |= WQ_FLAG_EXCLUSIVE;
3764 __add_wait_queue_tail(&x->wait, &wait);
3765 do {
3766 if (signal_pending(current)) {
3767 ret = -ERESTARTSYS;
3768 __remove_wait_queue(&x->wait, &wait);
3769 goto out;
3770 }
3771 __set_current_state(TASK_INTERRUPTIBLE);
3772 spin_unlock_irq(&x->wait.lock);
3773 schedule();
3774 spin_lock_irq(&x->wait.lock);
3775 } while (!x->done);
3776 __remove_wait_queue(&x->wait, &wait);
3777 }
3778 x->done--;
3779out:
3780 spin_unlock_irq(&x->wait.lock); 3736 spin_unlock_irq(&x->wait.lock);
3737 return timeout;
3738}
3781 3739
3782 return ret; 3740void fastcall __sched wait_for_completion(struct completion *x)
3741{
3742 wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
3783} 3743}
3784EXPORT_SYMBOL(wait_for_completion_interruptible); 3744EXPORT_SYMBOL(wait_for_completion);
3785 3745
3786unsigned long fastcall __sched 3746unsigned long fastcall __sched
3787wait_for_completion_interruptible_timeout(struct completion *x, 3747wait_for_completion_timeout(struct completion *x, unsigned long timeout)
3788 unsigned long timeout)
3789{ 3748{
3790 might_sleep(); 3749 return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
3791
3792 spin_lock_irq(&x->wait.lock);
3793 if (!x->done) {
3794 DECLARE_WAITQUEUE(wait, current);
3795
3796 wait.flags |= WQ_FLAG_EXCLUSIVE;
3797 __add_wait_queue_tail(&x->wait, &wait);
3798 do {
3799 if (signal_pending(current)) {
3800 timeout = -ERESTARTSYS;
3801 __remove_wait_queue(&x->wait, &wait);
3802 goto out;
3803 }
3804 __set_current_state(TASK_INTERRUPTIBLE);
3805 spin_unlock_irq(&x->wait.lock);
3806 timeout = schedule_timeout(timeout);
3807 spin_lock_irq(&x->wait.lock);
3808 if (!timeout) {
3809 __remove_wait_queue(&x->wait, &wait);
3810 goto out;
3811 }
3812 } while (!x->done);
3813 __remove_wait_queue(&x->wait, &wait);
3814 }
3815 x->done--;
3816out:
3817 spin_unlock_irq(&x->wait.lock);
3818 return timeout;
3819} 3750}
3820EXPORT_SYMBOL(wait_for_completion_interruptible_timeout); 3751EXPORT_SYMBOL(wait_for_completion_timeout);
3821 3752
3822static inline void 3753int __sched wait_for_completion_interruptible(struct completion *x)
3823sleep_on_head(wait_queue_head_t *q, wait_queue_t *wait, unsigned long *flags)
3824{ 3754{
3825 spin_lock_irqsave(&q->lock, *flags); 3755 return wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
3826 __add_wait_queue(q, wait);
3827 spin_unlock(&q->lock);
3828} 3756}
3757EXPORT_SYMBOL(wait_for_completion_interruptible);
3829 3758
3830static inline void 3759unsigned long fastcall __sched
3831sleep_on_tail(wait_queue_head_t *q, wait_queue_t *wait, unsigned long *flags) 3760wait_for_completion_interruptible_timeout(struct completion *x,
3761 unsigned long timeout)
3832{ 3762{
3833 spin_lock_irq(&q->lock); 3763 return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
3834 __remove_wait_queue(q, wait);
3835 spin_unlock_irqrestore(&q->lock, *flags);
3836} 3764}
3765EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
3837 3766
3838void __sched interruptible_sleep_on(wait_queue_head_t *q) 3767static long __sched
3768sleep_on_common(wait_queue_head_t *q, int state, long timeout)
3839{ 3769{
3840 unsigned long flags; 3770 unsigned long flags;
3841 wait_queue_t wait; 3771 wait_queue_t wait;
3842 3772
3843 init_waitqueue_entry(&wait, current); 3773 init_waitqueue_entry(&wait, current);
3844 3774
3845 current->state = TASK_INTERRUPTIBLE; 3775 __set_current_state(state);
3846 3776
3847 sleep_on_head(q, &wait, &flags); 3777 spin_lock_irqsave(&q->lock, flags);
3848 schedule(); 3778 __add_wait_queue(q, &wait);
3849 sleep_on_tail(q, &wait, &flags); 3779 spin_unlock(&q->lock);
3780 timeout = schedule_timeout(timeout);
3781 spin_lock_irq(&q->lock);
3782 __remove_wait_queue(q, &wait);
3783 spin_unlock_irqrestore(&q->lock, flags);
3784
3785 return timeout;
3786}
3787
3788void __sched interruptible_sleep_on(wait_queue_head_t *q)
3789{
3790 sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
3850} 3791}
3851EXPORT_SYMBOL(interruptible_sleep_on); 3792EXPORT_SYMBOL(interruptible_sleep_on);
3852 3793
3853long __sched 3794long __sched
3854interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout) 3795interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
3855{ 3796{
3856 unsigned long flags; 3797 return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout);
3857 wait_queue_t wait;
3858
3859 init_waitqueue_entry(&wait, current);
3860
3861 current->state = TASK_INTERRUPTIBLE;
3862
3863 sleep_on_head(q, &wait, &flags);
3864 timeout = schedule_timeout(timeout);
3865 sleep_on_tail(q, &wait, &flags);
3866
3867 return timeout;
3868} 3798}
3869EXPORT_SYMBOL(interruptible_sleep_on_timeout); 3799EXPORT_SYMBOL(interruptible_sleep_on_timeout);
3870 3800
3871void __sched sleep_on(wait_queue_head_t *q) 3801void __sched sleep_on(wait_queue_head_t *q)
3872{ 3802{
3873 unsigned long flags; 3803 sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
3874 wait_queue_t wait;
3875
3876 init_waitqueue_entry(&wait, current);
3877
3878 current->state = TASK_UNINTERRUPTIBLE;
3879
3880 sleep_on_head(q, &wait, &flags);
3881 schedule();
3882 sleep_on_tail(q, &wait, &flags);
3883} 3804}
3884EXPORT_SYMBOL(sleep_on); 3805EXPORT_SYMBOL(sleep_on);
3885 3806
3886long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout) 3807long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
3887{ 3808{
3888 unsigned long flags; 3809 return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout);
3889 wait_queue_t wait;
3890
3891 init_waitqueue_entry(&wait, current);
3892
3893 current->state = TASK_UNINTERRUPTIBLE;
3894
3895 sleep_on_head(q, &wait, &flags);
3896 timeout = schedule_timeout(timeout);
3897 sleep_on_tail(q, &wait, &flags);
3898
3899 return timeout;
3900} 3810}
3901EXPORT_SYMBOL(sleep_on_timeout); 3811EXPORT_SYMBOL(sleep_on_timeout);
3902 3812