diff options
author | Peter Zijlstra <peterz@infradead.org> | 2013-10-04 11:24:35 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-11-06 01:49:18 -0500 |
commit | b4145872f7049e429718b40b86e1b46659988398 (patch) | |
tree | 8bc11b45388ea3d520c678e03c8ff6f99810f84e /kernel | |
parent | 7a6354e241d8fbc145836ac24e47630f12754536 (diff) |
sched: Move wait code from core.c to wait.c
For some reason only the wait part of the wait api lives in
kernel/sched/wait.c and the wake part still lives in kernel/sched/core.c;
ammend this.
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: http://lkml.kernel.org/n/tip-ftycee88naznulqk7ei5mbci@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched/core.c | 107 | ||||
-rw-r--r-- | kernel/sched/wait.c | 103 |
2 files changed, 105 insertions, 105 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 450a34b2a637..91b28454c218 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -2688,109 +2688,6 @@ int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags, | |||
2688 | } | 2688 | } |
2689 | EXPORT_SYMBOL(default_wake_function); | 2689 | EXPORT_SYMBOL(default_wake_function); |
2690 | 2690 | ||
2691 | /* | ||
2692 | * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just | ||
2693 | * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve | ||
2694 | * number) then we wake all the non-exclusive tasks and one exclusive task. | ||
2695 | * | ||
2696 | * There are circumstances in which we can try to wake a task which has already | ||
2697 | * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns | ||
2698 | * zero in this (rare) case, and we handle it by continuing to scan the queue. | ||
2699 | */ | ||
2700 | static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, | ||
2701 | int nr_exclusive, int wake_flags, void *key) | ||
2702 | { | ||
2703 | wait_queue_t *curr, *next; | ||
2704 | |||
2705 | list_for_each_entry_safe(curr, next, &q->task_list, task_list) { | ||
2706 | unsigned flags = curr->flags; | ||
2707 | |||
2708 | if (curr->func(curr, mode, wake_flags, key) && | ||
2709 | (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive) | ||
2710 | break; | ||
2711 | } | ||
2712 | } | ||
2713 | |||
2714 | /** | ||
2715 | * __wake_up - wake up threads blocked on a waitqueue. | ||
2716 | * @q: the waitqueue | ||
2717 | * @mode: which threads | ||
2718 | * @nr_exclusive: how many wake-one or wake-many threads to wake up | ||
2719 | * @key: is directly passed to the wakeup function | ||
2720 | * | ||
2721 | * It may be assumed that this function implies a write memory barrier before | ||
2722 | * changing the task state if and only if any tasks are woken up. | ||
2723 | */ | ||
2724 | void __wake_up(wait_queue_head_t *q, unsigned int mode, | ||
2725 | int nr_exclusive, void *key) | ||
2726 | { | ||
2727 | unsigned long flags; | ||
2728 | |||
2729 | spin_lock_irqsave(&q->lock, flags); | ||
2730 | __wake_up_common(q, mode, nr_exclusive, 0, key); | ||
2731 | spin_unlock_irqrestore(&q->lock, flags); | ||
2732 | } | ||
2733 | EXPORT_SYMBOL(__wake_up); | ||
2734 | |||
2735 | /* | ||
2736 | * Same as __wake_up but called with the spinlock in wait_queue_head_t held. | ||
2737 | */ | ||
2738 | void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr) | ||
2739 | { | ||
2740 | __wake_up_common(q, mode, nr, 0, NULL); | ||
2741 | } | ||
2742 | EXPORT_SYMBOL_GPL(__wake_up_locked); | ||
2743 | |||
2744 | void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key) | ||
2745 | { | ||
2746 | __wake_up_common(q, mode, 1, 0, key); | ||
2747 | } | ||
2748 | EXPORT_SYMBOL_GPL(__wake_up_locked_key); | ||
2749 | |||
2750 | /** | ||
2751 | * __wake_up_sync_key - wake up threads blocked on a waitqueue. | ||
2752 | * @q: the waitqueue | ||
2753 | * @mode: which threads | ||
2754 | * @nr_exclusive: how many wake-one or wake-many threads to wake up | ||
2755 | * @key: opaque value to be passed to wakeup targets | ||
2756 | * | ||
2757 | * The sync wakeup differs that the waker knows that it will schedule | ||
2758 | * away soon, so while the target thread will be woken up, it will not | ||
2759 | * be migrated to another CPU - ie. the two threads are 'synchronized' | ||
2760 | * with each other. This can prevent needless bouncing between CPUs. | ||
2761 | * | ||
2762 | * On UP it can prevent extra preemption. | ||
2763 | * | ||
2764 | * It may be assumed that this function implies a write memory barrier before | ||
2765 | * changing the task state if and only if any tasks are woken up. | ||
2766 | */ | ||
2767 | void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, | ||
2768 | int nr_exclusive, void *key) | ||
2769 | { | ||
2770 | unsigned long flags; | ||
2771 | int wake_flags = WF_SYNC; | ||
2772 | |||
2773 | if (unlikely(!q)) | ||
2774 | return; | ||
2775 | |||
2776 | if (unlikely(nr_exclusive != 1)) | ||
2777 | wake_flags = 0; | ||
2778 | |||
2779 | spin_lock_irqsave(&q->lock, flags); | ||
2780 | __wake_up_common(q, mode, nr_exclusive, wake_flags, key); | ||
2781 | spin_unlock_irqrestore(&q->lock, flags); | ||
2782 | } | ||
2783 | EXPORT_SYMBOL_GPL(__wake_up_sync_key); | ||
2784 | |||
2785 | /* | ||
2786 | * __wake_up_sync - see __wake_up_sync_key() | ||
2787 | */ | ||
2788 | void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) | ||
2789 | { | ||
2790 | __wake_up_sync_key(q, mode, nr_exclusive, NULL); | ||
2791 | } | ||
2792 | EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ | ||
2793 | |||
2794 | /** | 2691 | /** |
2795 | * complete: - signals a single thread waiting on this completion | 2692 | * complete: - signals a single thread waiting on this completion |
2796 | * @x: holds the state of this particular completion | 2693 | * @x: holds the state of this particular completion |
@@ -2809,7 +2706,7 @@ void complete(struct completion *x) | |||
2809 | 2706 | ||
2810 | spin_lock_irqsave(&x->wait.lock, flags); | 2707 | spin_lock_irqsave(&x->wait.lock, flags); |
2811 | x->done++; | 2708 | x->done++; |
2812 | __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL); | 2709 | __wake_up_locked(&x->wait, TASK_NORMAL, 1); |
2813 | spin_unlock_irqrestore(&x->wait.lock, flags); | 2710 | spin_unlock_irqrestore(&x->wait.lock, flags); |
2814 | } | 2711 | } |
2815 | EXPORT_SYMBOL(complete); | 2712 | EXPORT_SYMBOL(complete); |
@@ -2829,7 +2726,7 @@ void complete_all(struct completion *x) | |||
2829 | 2726 | ||
2830 | spin_lock_irqsave(&x->wait.lock, flags); | 2727 | spin_lock_irqsave(&x->wait.lock, flags); |
2831 | x->done += UINT_MAX/2; | 2728 | x->done += UINT_MAX/2; |
2832 | __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL); | 2729 | __wake_up_locked(&x->wait, TASK_NORMAL, 0); |
2833 | spin_unlock_irqrestore(&x->wait.lock, flags); | 2730 | spin_unlock_irqrestore(&x->wait.lock, flags); |
2834 | } | 2731 | } |
2835 | EXPORT_SYMBOL(complete_all); | 2732 | EXPORT_SYMBOL(complete_all); |
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c index de21c6305a44..7d50f794e248 100644 --- a/kernel/sched/wait.c +++ b/kernel/sched/wait.c | |||
@@ -53,6 +53,109 @@ EXPORT_SYMBOL(remove_wait_queue); | |||
53 | 53 | ||
54 | 54 | ||
55 | /* | 55 | /* |
56 | * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just | ||
57 | * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve | ||
58 | * number) then we wake all the non-exclusive tasks and one exclusive task. | ||
59 | * | ||
60 | * There are circumstances in which we can try to wake a task which has already | ||
61 | * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns | ||
62 | * zero in this (rare) case, and we handle it by continuing to scan the queue. | ||
63 | */ | ||
64 | static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, | ||
65 | int nr_exclusive, int wake_flags, void *key) | ||
66 | { | ||
67 | wait_queue_t *curr, *next; | ||
68 | |||
69 | list_for_each_entry_safe(curr, next, &q->task_list, task_list) { | ||
70 | unsigned flags = curr->flags; | ||
71 | |||
72 | if (curr->func(curr, mode, wake_flags, key) && | ||
73 | (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive) | ||
74 | break; | ||
75 | } | ||
76 | } | ||
77 | |||
78 | /** | ||
79 | * __wake_up - wake up threads blocked on a waitqueue. | ||
80 | * @q: the waitqueue | ||
81 | * @mode: which threads | ||
82 | * @nr_exclusive: how many wake-one or wake-many threads to wake up | ||
83 | * @key: is directly passed to the wakeup function | ||
84 | * | ||
85 | * It may be assumed that this function implies a write memory barrier before | ||
86 | * changing the task state if and only if any tasks are woken up. | ||
87 | */ | ||
88 | void __wake_up(wait_queue_head_t *q, unsigned int mode, | ||
89 | int nr_exclusive, void *key) | ||
90 | { | ||
91 | unsigned long flags; | ||
92 | |||
93 | spin_lock_irqsave(&q->lock, flags); | ||
94 | __wake_up_common(q, mode, nr_exclusive, 0, key); | ||
95 | spin_unlock_irqrestore(&q->lock, flags); | ||
96 | } | ||
97 | EXPORT_SYMBOL(__wake_up); | ||
98 | |||
99 | /* | ||
100 | * Same as __wake_up but called with the spinlock in wait_queue_head_t held. | ||
101 | */ | ||
102 | void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr) | ||
103 | { | ||
104 | __wake_up_common(q, mode, nr, 0, NULL); | ||
105 | } | ||
106 | EXPORT_SYMBOL_GPL(__wake_up_locked); | ||
107 | |||
108 | void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key) | ||
109 | { | ||
110 | __wake_up_common(q, mode, 1, 0, key); | ||
111 | } | ||
112 | EXPORT_SYMBOL_GPL(__wake_up_locked_key); | ||
113 | |||
114 | /** | ||
115 | * __wake_up_sync_key - wake up threads blocked on a waitqueue. | ||
116 | * @q: the waitqueue | ||
117 | * @mode: which threads | ||
118 | * @nr_exclusive: how many wake-one or wake-many threads to wake up | ||
119 | * @key: opaque value to be passed to wakeup targets | ||
120 | * | ||
121 | * The sync wakeup differs that the waker knows that it will schedule | ||
122 | * away soon, so while the target thread will be woken up, it will not | ||
123 | * be migrated to another CPU - ie. the two threads are 'synchronized' | ||
124 | * with each other. This can prevent needless bouncing between CPUs. | ||
125 | * | ||
126 | * On UP it can prevent extra preemption. | ||
127 | * | ||
128 | * It may be assumed that this function implies a write memory barrier before | ||
129 | * changing the task state if and only if any tasks are woken up. | ||
130 | */ | ||
131 | void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, | ||
132 | int nr_exclusive, void *key) | ||
133 | { | ||
134 | unsigned long flags; | ||
135 | int wake_flags = 1; /* XXX WF_SYNC */ | ||
136 | |||
137 | if (unlikely(!q)) | ||
138 | return; | ||
139 | |||
140 | if (unlikely(nr_exclusive != 1)) | ||
141 | wake_flags = 0; | ||
142 | |||
143 | spin_lock_irqsave(&q->lock, flags); | ||
144 | __wake_up_common(q, mode, nr_exclusive, wake_flags, key); | ||
145 | spin_unlock_irqrestore(&q->lock, flags); | ||
146 | } | ||
147 | EXPORT_SYMBOL_GPL(__wake_up_sync_key); | ||
148 | |||
149 | /* | ||
150 | * __wake_up_sync - see __wake_up_sync_key() | ||
151 | */ | ||
152 | void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) | ||
153 | { | ||
154 | __wake_up_sync_key(q, mode, nr_exclusive, NULL); | ||
155 | } | ||
156 | EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ | ||
157 | |||
158 | /* | ||
56 | * Note: we use "set_current_state()" _after_ the wait-queue add, | 159 | * Note: we use "set_current_state()" _after_ the wait-queue add, |
57 | * because we need a memory barrier there on SMP, so that any | 160 | * because we need a memory barrier there on SMP, so that any |
58 | * wake-function that tests for the wait-queue being active | 161 | * wake-function that tests for the wait-queue being active |