diff options
author | Peter Zijlstra <peterz@infradead.org> | 2013-10-04 11:24:35 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-11-06 01:49:18 -0500 |
commit | b4145872f7049e429718b40b86e1b46659988398 (patch) | |
tree | 8bc11b45388ea3d520c678e03c8ff6f99810f84e /kernel/sched/wait.c | |
parent | 7a6354e241d8fbc145836ac24e47630f12754536 (diff) |
sched: Move wait code from core.c to wait.c
For some reason only the wait part of the wait api lives in
kernel/sched/wait.c and the wake part still lives in kernel/sched/core.c;
ammend this.
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: http://lkml.kernel.org/n/tip-ftycee88naznulqk7ei5mbci@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/wait.c')
-rw-r--r-- | kernel/sched/wait.c | 103 |
1 files changed, 103 insertions, 0 deletions
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c index de21c6305a44..7d50f794e248 100644 --- a/kernel/sched/wait.c +++ b/kernel/sched/wait.c | |||
@@ -53,6 +53,109 @@ EXPORT_SYMBOL(remove_wait_queue); | |||
53 | 53 | ||
54 | 54 | ||
55 | /* | 55 | /* |
56 | * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just | ||
57 | * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve | ||
58 | * number) then we wake all the non-exclusive tasks and one exclusive task. | ||
59 | * | ||
60 | * There are circumstances in which we can try to wake a task which has already | ||
61 | * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns | ||
62 | * zero in this (rare) case, and we handle it by continuing to scan the queue. | ||
63 | */ | ||
64 | static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, | ||
65 | int nr_exclusive, int wake_flags, void *key) | ||
66 | { | ||
67 | wait_queue_t *curr, *next; | ||
68 | |||
69 | list_for_each_entry_safe(curr, next, &q->task_list, task_list) { | ||
70 | unsigned flags = curr->flags; | ||
71 | |||
72 | if (curr->func(curr, mode, wake_flags, key) && | ||
73 | (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive) | ||
74 | break; | ||
75 | } | ||
76 | } | ||
77 | |||
78 | /** | ||
79 | * __wake_up - wake up threads blocked on a waitqueue. | ||
80 | * @q: the waitqueue | ||
81 | * @mode: which threads | ||
82 | * @nr_exclusive: how many wake-one or wake-many threads to wake up | ||
83 | * @key: is directly passed to the wakeup function | ||
84 | * | ||
85 | * It may be assumed that this function implies a write memory barrier before | ||
86 | * changing the task state if and only if any tasks are woken up. | ||
87 | */ | ||
88 | void __wake_up(wait_queue_head_t *q, unsigned int mode, | ||
89 | int nr_exclusive, void *key) | ||
90 | { | ||
91 | unsigned long flags; | ||
92 | |||
93 | spin_lock_irqsave(&q->lock, flags); | ||
94 | __wake_up_common(q, mode, nr_exclusive, 0, key); | ||
95 | spin_unlock_irqrestore(&q->lock, flags); | ||
96 | } | ||
97 | EXPORT_SYMBOL(__wake_up); | ||
98 | |||
99 | /* | ||
100 | * Same as __wake_up but called with the spinlock in wait_queue_head_t held. | ||
101 | */ | ||
102 | void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr) | ||
103 | { | ||
104 | __wake_up_common(q, mode, nr, 0, NULL); | ||
105 | } | ||
106 | EXPORT_SYMBOL_GPL(__wake_up_locked); | ||
107 | |||
108 | void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key) | ||
109 | { | ||
110 | __wake_up_common(q, mode, 1, 0, key); | ||
111 | } | ||
112 | EXPORT_SYMBOL_GPL(__wake_up_locked_key); | ||
113 | |||
114 | /** | ||
115 | * __wake_up_sync_key - wake up threads blocked on a waitqueue. | ||
116 | * @q: the waitqueue | ||
117 | * @mode: which threads | ||
118 | * @nr_exclusive: how many wake-one or wake-many threads to wake up | ||
119 | * @key: opaque value to be passed to wakeup targets | ||
120 | * | ||
121 | * The sync wakeup differs that the waker knows that it will schedule | ||
122 | * away soon, so while the target thread will be woken up, it will not | ||
123 | * be migrated to another CPU - ie. the two threads are 'synchronized' | ||
124 | * with each other. This can prevent needless bouncing between CPUs. | ||
125 | * | ||
126 | * On UP it can prevent extra preemption. | ||
127 | * | ||
128 | * It may be assumed that this function implies a write memory barrier before | ||
129 | * changing the task state if and only if any tasks are woken up. | ||
130 | */ | ||
131 | void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, | ||
132 | int nr_exclusive, void *key) | ||
133 | { | ||
134 | unsigned long flags; | ||
135 | int wake_flags = 1; /* XXX WF_SYNC */ | ||
136 | |||
137 | if (unlikely(!q)) | ||
138 | return; | ||
139 | |||
140 | if (unlikely(nr_exclusive != 1)) | ||
141 | wake_flags = 0; | ||
142 | |||
143 | spin_lock_irqsave(&q->lock, flags); | ||
144 | __wake_up_common(q, mode, nr_exclusive, wake_flags, key); | ||
145 | spin_unlock_irqrestore(&q->lock, flags); | ||
146 | } | ||
147 | EXPORT_SYMBOL_GPL(__wake_up_sync_key); | ||
148 | |||
149 | /* | ||
150 | * __wake_up_sync - see __wake_up_sync_key() | ||
151 | */ | ||
152 | void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) | ||
153 | { | ||
154 | __wake_up_sync_key(q, mode, nr_exclusive, NULL); | ||
155 | } | ||
156 | EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ | ||
157 | |||
158 | /* | ||
56 | * Note: we use "set_current_state()" _after_ the wait-queue add, | 159 | * Note: we use "set_current_state()" _after_ the wait-queue add, |
57 | * because we need a memory barrier there on SMP, so that any | 160 | * because we need a memory barrier there on SMP, so that any |
58 | * wake-function that tests for the wait-queue being active | 161 | * wake-function that tests for the wait-queue being active |