diff options
author | Peter Zijlstra <peterz@infradead.org> | 2013-10-31 13:07:08 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-11-06 01:49:16 -0500 |
commit | 7a6354e241d8fbc145836ac24e47630f12754536 (patch) | |
tree | 90e4bb2c4affd4a3ee4c758769c85d8770c8e857 /kernel/sched | |
parent | 7d716456a0ee4e9bd63be9234f886d20382ac950 (diff) |
sched: Move wait.c into kernel/sched/
Suggested-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: http://lkml.kernel.org/n/tip-5q5yqvdaen0rmapwloeaotx3@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/Makefile | 1 | ||||
-rw-r--r-- | kernel/sched/wait.c | 401 |
2 files changed, 402 insertions, 0 deletions
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile index 54adcf35f495..f8d3f4baa1a1 100644 --- a/kernel/sched/Makefile +++ b/kernel/sched/Makefile | |||
@@ -12,6 +12,7 @@ CFLAGS_core.o := $(PROFILING) -fno-omit-frame-pointer | |||
12 | endif | 12 | endif |
13 | 13 | ||
14 | obj-y += core.o proc.o clock.o cputime.o idle_task.o fair.o rt.o stop_task.o | 14 | obj-y += core.o proc.o clock.o cputime.o idle_task.o fair.o rt.o stop_task.o |
15 | obj-y += wait.o | ||
15 | obj-$(CONFIG_SMP) += cpupri.o | 16 | obj-$(CONFIG_SMP) += cpupri.o |
16 | obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o | 17 | obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o |
17 | obj-$(CONFIG_SCHEDSTATS) += stats.o | 18 | obj-$(CONFIG_SCHEDSTATS) += stats.o |
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c new file mode 100644 index 000000000000..de21c6305a44 --- /dev/null +++ b/kernel/sched/wait.c | |||
@@ -0,0 +1,401 @@ | |||
1 | /* | ||
2 | * Generic waiting primitives. | ||
3 | * | ||
4 | * (C) 2004 Nadia Yvette Chambers, Oracle | ||
5 | */ | ||
6 | #include <linux/init.h> | ||
7 | #include <linux/export.h> | ||
8 | #include <linux/sched.h> | ||
9 | #include <linux/mm.h> | ||
10 | #include <linux/wait.h> | ||
11 | #include <linux/hash.h> | ||
12 | |||
13 | void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *key) | ||
14 | { | ||
15 | spin_lock_init(&q->lock); | ||
16 | lockdep_set_class_and_name(&q->lock, key, name); | ||
17 | INIT_LIST_HEAD(&q->task_list); | ||
18 | } | ||
19 | |||
20 | EXPORT_SYMBOL(__init_waitqueue_head); | ||
21 | |||
22 | void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) | ||
23 | { | ||
24 | unsigned long flags; | ||
25 | |||
26 | wait->flags &= ~WQ_FLAG_EXCLUSIVE; | ||
27 | spin_lock_irqsave(&q->lock, flags); | ||
28 | __add_wait_queue(q, wait); | ||
29 | spin_unlock_irqrestore(&q->lock, flags); | ||
30 | } | ||
31 | EXPORT_SYMBOL(add_wait_queue); | ||
32 | |||
33 | void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait) | ||
34 | { | ||
35 | unsigned long flags; | ||
36 | |||
37 | wait->flags |= WQ_FLAG_EXCLUSIVE; | ||
38 | spin_lock_irqsave(&q->lock, flags); | ||
39 | __add_wait_queue_tail(q, wait); | ||
40 | spin_unlock_irqrestore(&q->lock, flags); | ||
41 | } | ||
42 | EXPORT_SYMBOL(add_wait_queue_exclusive); | ||
43 | |||
44 | void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) | ||
45 | { | ||
46 | unsigned long flags; | ||
47 | |||
48 | spin_lock_irqsave(&q->lock, flags); | ||
49 | __remove_wait_queue(q, wait); | ||
50 | spin_unlock_irqrestore(&q->lock, flags); | ||
51 | } | ||
52 | EXPORT_SYMBOL(remove_wait_queue); | ||
53 | |||
54 | |||
55 | /* | ||
56 | * Note: we use "set_current_state()" _after_ the wait-queue add, | ||
57 | * because we need a memory barrier there on SMP, so that any | ||
58 | * wake-function that tests for the wait-queue being active | ||
59 | * will be guaranteed to see waitqueue addition _or_ subsequent | ||
60 | * tests in this thread will see the wakeup having taken place. | ||
61 | * | ||
62 | * The spin_unlock() itself is semi-permeable and only protects | ||
63 | * one way (it only protects stuff inside the critical region and | ||
64 | * stops them from bleeding out - it would still allow subsequent | ||
65 | * loads to move into the critical region). | ||
66 | */ | ||
67 | void | ||
68 | prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state) | ||
69 | { | ||
70 | unsigned long flags; | ||
71 | |||
72 | wait->flags &= ~WQ_FLAG_EXCLUSIVE; | ||
73 | spin_lock_irqsave(&q->lock, flags); | ||
74 | if (list_empty(&wait->task_list)) | ||
75 | __add_wait_queue(q, wait); | ||
76 | set_current_state(state); | ||
77 | spin_unlock_irqrestore(&q->lock, flags); | ||
78 | } | ||
79 | EXPORT_SYMBOL(prepare_to_wait); | ||
80 | |||
81 | void | ||
82 | prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state) | ||
83 | { | ||
84 | unsigned long flags; | ||
85 | |||
86 | wait->flags |= WQ_FLAG_EXCLUSIVE; | ||
87 | spin_lock_irqsave(&q->lock, flags); | ||
88 | if (list_empty(&wait->task_list)) | ||
89 | __add_wait_queue_tail(q, wait); | ||
90 | set_current_state(state); | ||
91 | spin_unlock_irqrestore(&q->lock, flags); | ||
92 | } | ||
93 | EXPORT_SYMBOL(prepare_to_wait_exclusive); | ||
94 | |||
95 | long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state) | ||
96 | { | ||
97 | unsigned long flags; | ||
98 | |||
99 | if (signal_pending_state(state, current)) | ||
100 | return -ERESTARTSYS; | ||
101 | |||
102 | wait->private = current; | ||
103 | wait->func = autoremove_wake_function; | ||
104 | |||
105 | spin_lock_irqsave(&q->lock, flags); | ||
106 | if (list_empty(&wait->task_list)) { | ||
107 | if (wait->flags & WQ_FLAG_EXCLUSIVE) | ||
108 | __add_wait_queue_tail(q, wait); | ||
109 | else | ||
110 | __add_wait_queue(q, wait); | ||
111 | } | ||
112 | set_current_state(state); | ||
113 | spin_unlock_irqrestore(&q->lock, flags); | ||
114 | |||
115 | return 0; | ||
116 | } | ||
117 | EXPORT_SYMBOL(prepare_to_wait_event); | ||
118 | |||
119 | /** | ||
120 | * finish_wait - clean up after waiting in a queue | ||
121 | * @q: waitqueue waited on | ||
122 | * @wait: wait descriptor | ||
123 | * | ||
124 | * Sets current thread back to running state and removes | ||
125 | * the wait descriptor from the given waitqueue if still | ||
126 | * queued. | ||
127 | */ | ||
128 | void finish_wait(wait_queue_head_t *q, wait_queue_t *wait) | ||
129 | { | ||
130 | unsigned long flags; | ||
131 | |||
132 | __set_current_state(TASK_RUNNING); | ||
133 | /* | ||
134 | * We can check for list emptiness outside the lock | ||
135 | * IFF: | ||
136 | * - we use the "careful" check that verifies both | ||
137 | * the next and prev pointers, so that there cannot | ||
138 | * be any half-pending updates in progress on other | ||
139 | * CPU's that we haven't seen yet (and that might | ||
140 | * still change the stack area. | ||
141 | * and | ||
142 | * - all other users take the lock (ie we can only | ||
143 | * have _one_ other CPU that looks at or modifies | ||
144 | * the list). | ||
145 | */ | ||
146 | if (!list_empty_careful(&wait->task_list)) { | ||
147 | spin_lock_irqsave(&q->lock, flags); | ||
148 | list_del_init(&wait->task_list); | ||
149 | spin_unlock_irqrestore(&q->lock, flags); | ||
150 | } | ||
151 | } | ||
152 | EXPORT_SYMBOL(finish_wait); | ||
153 | |||
154 | /** | ||
155 | * abort_exclusive_wait - abort exclusive waiting in a queue | ||
156 | * @q: waitqueue waited on | ||
157 | * @wait: wait descriptor | ||
158 | * @mode: runstate of the waiter to be woken | ||
159 | * @key: key to identify a wait bit queue or %NULL | ||
160 | * | ||
161 | * Sets current thread back to running state and removes | ||
162 | * the wait descriptor from the given waitqueue if still | ||
163 | * queued. | ||
164 | * | ||
165 | * Wakes up the next waiter if the caller is concurrently | ||
166 | * woken up through the queue. | ||
167 | * | ||
168 | * This prevents waiter starvation where an exclusive waiter | ||
169 | * aborts and is woken up concurrently and no one wakes up | ||
170 | * the next waiter. | ||
171 | */ | ||
172 | void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, | ||
173 | unsigned int mode, void *key) | ||
174 | { | ||
175 | unsigned long flags; | ||
176 | |||
177 | __set_current_state(TASK_RUNNING); | ||
178 | spin_lock_irqsave(&q->lock, flags); | ||
179 | if (!list_empty(&wait->task_list)) | ||
180 | list_del_init(&wait->task_list); | ||
181 | else if (waitqueue_active(q)) | ||
182 | __wake_up_locked_key(q, mode, key); | ||
183 | spin_unlock_irqrestore(&q->lock, flags); | ||
184 | } | ||
185 | EXPORT_SYMBOL(abort_exclusive_wait); | ||
186 | |||
187 | int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key) | ||
188 | { | ||
189 | int ret = default_wake_function(wait, mode, sync, key); | ||
190 | |||
191 | if (ret) | ||
192 | list_del_init(&wait->task_list); | ||
193 | return ret; | ||
194 | } | ||
195 | EXPORT_SYMBOL(autoremove_wake_function); | ||
196 | |||
197 | int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *arg) | ||
198 | { | ||
199 | struct wait_bit_key *key = arg; | ||
200 | struct wait_bit_queue *wait_bit | ||
201 | = container_of(wait, struct wait_bit_queue, wait); | ||
202 | |||
203 | if (wait_bit->key.flags != key->flags || | ||
204 | wait_bit->key.bit_nr != key->bit_nr || | ||
205 | test_bit(key->bit_nr, key->flags)) | ||
206 | return 0; | ||
207 | else | ||
208 | return autoremove_wake_function(wait, mode, sync, key); | ||
209 | } | ||
210 | EXPORT_SYMBOL(wake_bit_function); | ||
211 | |||
212 | /* | ||
213 | * To allow interruptible waiting and asynchronous (i.e. nonblocking) | ||
214 | * waiting, the actions of __wait_on_bit() and __wait_on_bit_lock() are | ||
215 | * permitted return codes. Nonzero return codes halt waiting and return. | ||
216 | */ | ||
217 | int __sched | ||
218 | __wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q, | ||
219 | int (*action)(void *), unsigned mode) | ||
220 | { | ||
221 | int ret = 0; | ||
222 | |||
223 | do { | ||
224 | prepare_to_wait(wq, &q->wait, mode); | ||
225 | if (test_bit(q->key.bit_nr, q->key.flags)) | ||
226 | ret = (*action)(q->key.flags); | ||
227 | } while (test_bit(q->key.bit_nr, q->key.flags) && !ret); | ||
228 | finish_wait(wq, &q->wait); | ||
229 | return ret; | ||
230 | } | ||
231 | EXPORT_SYMBOL(__wait_on_bit); | ||
232 | |||
233 | int __sched out_of_line_wait_on_bit(void *word, int bit, | ||
234 | int (*action)(void *), unsigned mode) | ||
235 | { | ||
236 | wait_queue_head_t *wq = bit_waitqueue(word, bit); | ||
237 | DEFINE_WAIT_BIT(wait, word, bit); | ||
238 | |||
239 | return __wait_on_bit(wq, &wait, action, mode); | ||
240 | } | ||
241 | EXPORT_SYMBOL(out_of_line_wait_on_bit); | ||
242 | |||
243 | int __sched | ||
244 | __wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q, | ||
245 | int (*action)(void *), unsigned mode) | ||
246 | { | ||
247 | do { | ||
248 | int ret; | ||
249 | |||
250 | prepare_to_wait_exclusive(wq, &q->wait, mode); | ||
251 | if (!test_bit(q->key.bit_nr, q->key.flags)) | ||
252 | continue; | ||
253 | ret = action(q->key.flags); | ||
254 | if (!ret) | ||
255 | continue; | ||
256 | abort_exclusive_wait(wq, &q->wait, mode, &q->key); | ||
257 | return ret; | ||
258 | } while (test_and_set_bit(q->key.bit_nr, q->key.flags)); | ||
259 | finish_wait(wq, &q->wait); | ||
260 | return 0; | ||
261 | } | ||
262 | EXPORT_SYMBOL(__wait_on_bit_lock); | ||
263 | |||
264 | int __sched out_of_line_wait_on_bit_lock(void *word, int bit, | ||
265 | int (*action)(void *), unsigned mode) | ||
266 | { | ||
267 | wait_queue_head_t *wq = bit_waitqueue(word, bit); | ||
268 | DEFINE_WAIT_BIT(wait, word, bit); | ||
269 | |||
270 | return __wait_on_bit_lock(wq, &wait, action, mode); | ||
271 | } | ||
272 | EXPORT_SYMBOL(out_of_line_wait_on_bit_lock); | ||
273 | |||
274 | void __wake_up_bit(wait_queue_head_t *wq, void *word, int bit) | ||
275 | { | ||
276 | struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit); | ||
277 | if (waitqueue_active(wq)) | ||
278 | __wake_up(wq, TASK_NORMAL, 1, &key); | ||
279 | } | ||
280 | EXPORT_SYMBOL(__wake_up_bit); | ||
281 | |||
282 | /** | ||
283 | * wake_up_bit - wake up a waiter on a bit | ||
284 | * @word: the word being waited on, a kernel virtual address | ||
285 | * @bit: the bit of the word being waited on | ||
286 | * | ||
287 | * There is a standard hashed waitqueue table for generic use. This | ||
288 | * is the part of the hashtable's accessor API that wakes up waiters | ||
289 | * on a bit. For instance, if one were to have waiters on a bitflag, | ||
290 | * one would call wake_up_bit() after clearing the bit. | ||
291 | * | ||
292 | * In order for this to function properly, as it uses waitqueue_active() | ||
293 | * internally, some kind of memory barrier must be done prior to calling | ||
294 | * this. Typically, this will be smp_mb__after_clear_bit(), but in some | ||
295 | * cases where bitflags are manipulated non-atomically under a lock, one | ||
296 | * may need to use a less regular barrier, such fs/inode.c's smp_mb(), | ||
297 | * because spin_unlock() does not guarantee a memory barrier. | ||
298 | */ | ||
299 | void wake_up_bit(void *word, int bit) | ||
300 | { | ||
301 | __wake_up_bit(bit_waitqueue(word, bit), word, bit); | ||
302 | } | ||
303 | EXPORT_SYMBOL(wake_up_bit); | ||
304 | |||
305 | wait_queue_head_t *bit_waitqueue(void *word, int bit) | ||
306 | { | ||
307 | const int shift = BITS_PER_LONG == 32 ? 5 : 6; | ||
308 | const struct zone *zone = page_zone(virt_to_page(word)); | ||
309 | unsigned long val = (unsigned long)word << shift | bit; | ||
310 | |||
311 | return &zone->wait_table[hash_long(val, zone->wait_table_bits)]; | ||
312 | } | ||
313 | EXPORT_SYMBOL(bit_waitqueue); | ||
314 | |||
315 | /* | ||
316 | * Manipulate the atomic_t address to produce a better bit waitqueue table hash | ||
317 | * index (we're keying off bit -1, but that would produce a horrible hash | ||
318 | * value). | ||
319 | */ | ||
320 | static inline wait_queue_head_t *atomic_t_waitqueue(atomic_t *p) | ||
321 | { | ||
322 | if (BITS_PER_LONG == 64) { | ||
323 | unsigned long q = (unsigned long)p; | ||
324 | return bit_waitqueue((void *)(q & ~1), q & 1); | ||
325 | } | ||
326 | return bit_waitqueue(p, 0); | ||
327 | } | ||
328 | |||
329 | static int wake_atomic_t_function(wait_queue_t *wait, unsigned mode, int sync, | ||
330 | void *arg) | ||
331 | { | ||
332 | struct wait_bit_key *key = arg; | ||
333 | struct wait_bit_queue *wait_bit | ||
334 | = container_of(wait, struct wait_bit_queue, wait); | ||
335 | atomic_t *val = key->flags; | ||
336 | |||
337 | if (wait_bit->key.flags != key->flags || | ||
338 | wait_bit->key.bit_nr != key->bit_nr || | ||
339 | atomic_read(val) != 0) | ||
340 | return 0; | ||
341 | return autoremove_wake_function(wait, mode, sync, key); | ||
342 | } | ||
343 | |||
344 | /* | ||
345 | * To allow interruptible waiting and asynchronous (i.e. nonblocking) waiting, | ||
346 | * the actions of __wait_on_atomic_t() are permitted return codes. Nonzero | ||
347 | * return codes halt waiting and return. | ||
348 | */ | ||
349 | static __sched | ||
350 | int __wait_on_atomic_t(wait_queue_head_t *wq, struct wait_bit_queue *q, | ||
351 | int (*action)(atomic_t *), unsigned mode) | ||
352 | { | ||
353 | atomic_t *val; | ||
354 | int ret = 0; | ||
355 | |||
356 | do { | ||
357 | prepare_to_wait(wq, &q->wait, mode); | ||
358 | val = q->key.flags; | ||
359 | if (atomic_read(val) == 0) | ||
360 | break; | ||
361 | ret = (*action)(val); | ||
362 | } while (!ret && atomic_read(val) != 0); | ||
363 | finish_wait(wq, &q->wait); | ||
364 | return ret; | ||
365 | } | ||
366 | |||
367 | #define DEFINE_WAIT_ATOMIC_T(name, p) \ | ||
368 | struct wait_bit_queue name = { \ | ||
369 | .key = __WAIT_ATOMIC_T_KEY_INITIALIZER(p), \ | ||
370 | .wait = { \ | ||
371 | .private = current, \ | ||
372 | .func = wake_atomic_t_function, \ | ||
373 | .task_list = \ | ||
374 | LIST_HEAD_INIT((name).wait.task_list), \ | ||
375 | }, \ | ||
376 | } | ||
377 | |||
378 | __sched int out_of_line_wait_on_atomic_t(atomic_t *p, int (*action)(atomic_t *), | ||
379 | unsigned mode) | ||
380 | { | ||
381 | wait_queue_head_t *wq = atomic_t_waitqueue(p); | ||
382 | DEFINE_WAIT_ATOMIC_T(wait, p); | ||
383 | |||
384 | return __wait_on_atomic_t(wq, &wait, action, mode); | ||
385 | } | ||
386 | EXPORT_SYMBOL(out_of_line_wait_on_atomic_t); | ||
387 | |||
388 | /** | ||
389 | * wake_up_atomic_t - Wake up a waiter on a atomic_t | ||
390 | * @p: The atomic_t being waited on, a kernel virtual address | ||
391 | * | ||
392 | * Wake up anyone waiting for the atomic_t to go to zero. | ||
393 | * | ||
394 | * Abuse the bit-waker function and its waitqueue hash table set (the atomic_t | ||
395 | * check is done by the waiter's wake function, not the by the waker itself). | ||
396 | */ | ||
397 | void wake_up_atomic_t(atomic_t *p) | ||
398 | { | ||
399 | __wake_up_bit(atomic_t_waitqueue(p), p, WAIT_ATOMIC_T_BIT_NR); | ||
400 | } | ||
401 | EXPORT_SYMBOL(wake_up_atomic_t); | ||