aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/wait.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /include/linux/wait.h
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'include/linux/wait.h')
-rw-r--r--include/linux/wait.h460
1 files changed, 460 insertions, 0 deletions
diff --git a/include/linux/wait.h b/include/linux/wait.h
new file mode 100644
index 000000000000..17c874a8eb3f
--- /dev/null
+++ b/include/linux/wait.h
@@ -0,0 +1,460 @@
1#ifndef _LINUX_WAIT_H
2#define _LINUX_WAIT_H
3
4#define WNOHANG 0x00000001
5#define WUNTRACED 0x00000002
6#define WSTOPPED WUNTRACED
7#define WEXITED 0x00000004
8#define WCONTINUED 0x00000008
9#define WNOWAIT 0x01000000 /* Don't reap, just poll status. */
10
11#define __WNOTHREAD 0x20000000 /* Don't wait on children of other threads in this group */
12#define __WALL 0x40000000 /* Wait on all children, regardless of type */
13#define __WCLONE 0x80000000 /* Wait only on non-SIGCHLD children */
14
15/* First argument to waitid: */
16#define P_ALL 0
17#define P_PID 1
18#define P_PGID 2
19
20#ifdef __KERNEL__
21
22#include <linux/config.h>
23#include <linux/list.h>
24#include <linux/stddef.h>
25#include <linux/spinlock.h>
26#include <asm/system.h>
27#include <asm/current.h>
28
29typedef struct __wait_queue wait_queue_t;
30typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int sync, void *key);
31int default_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
32
33struct __wait_queue {
34 unsigned int flags;
35#define WQ_FLAG_EXCLUSIVE 0x01
36 struct task_struct * task;
37 wait_queue_func_t func;
38 struct list_head task_list;
39};
40
41struct wait_bit_key {
42 void *flags;
43 int bit_nr;
44};
45
46struct wait_bit_queue {
47 struct wait_bit_key key;
48 wait_queue_t wait;
49};
50
51struct __wait_queue_head {
52 spinlock_t lock;
53 struct list_head task_list;
54};
55typedef struct __wait_queue_head wait_queue_head_t;
56
57
58/*
59 * Macros for declaration and initialisaton of the datatypes
60 */
61
62#define __WAITQUEUE_INITIALIZER(name, tsk) { \
63 .task = tsk, \
64 .func = default_wake_function, \
65 .task_list = { NULL, NULL } }
66
67#define DECLARE_WAITQUEUE(name, tsk) \
68 wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
69
70#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
71 .lock = SPIN_LOCK_UNLOCKED, \
72 .task_list = { &(name).task_list, &(name).task_list } }
73
74#define DECLARE_WAIT_QUEUE_HEAD(name) \
75 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
76
77#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
78 { .flags = word, .bit_nr = bit, }
79
80static inline void init_waitqueue_head(wait_queue_head_t *q)
81{
82 spin_lock_init(&q->lock);
83 INIT_LIST_HEAD(&q->task_list);
84}
85
86static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
87{
88 q->flags = 0;
89 q->task = p;
90 q->func = default_wake_function;
91}
92
93static inline void init_waitqueue_func_entry(wait_queue_t *q,
94 wait_queue_func_t func)
95{
96 q->flags = 0;
97 q->task = NULL;
98 q->func = func;
99}
100
101static inline int waitqueue_active(wait_queue_head_t *q)
102{
103 return !list_empty(&q->task_list);
104}
105
106/*
107 * Used to distinguish between sync and async io wait context:
108 * sync i/o typically specifies a NULL wait queue entry or a wait
109 * queue entry bound to a task (current task) to wake up.
110 * aio specifies a wait queue entry with an async notification
111 * callback routine, not associated with any task.
112 */
113#define is_sync_wait(wait) (!(wait) || ((wait)->task))
114
115extern void FASTCALL(add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait));
116extern void FASTCALL(add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait));
117extern void FASTCALL(remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait));
118
119static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
120{
121 list_add(&new->task_list, &head->task_list);
122}
123
124/*
125 * Used for wake-one threads:
126 */
127static inline void __add_wait_queue_tail(wait_queue_head_t *head,
128 wait_queue_t *new)
129{
130 list_add_tail(&new->task_list, &head->task_list);
131}
132
133static inline void __remove_wait_queue(wait_queue_head_t *head,
134 wait_queue_t *old)
135{
136 list_del(&old->task_list);
137}
138
139void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key));
140extern void FASTCALL(__wake_up_locked(wait_queue_head_t *q, unsigned int mode));
141extern void FASTCALL(__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr));
142void FASTCALL(__wake_up_bit(wait_queue_head_t *, void *, int));
143int FASTCALL(__wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned));
144int FASTCALL(__wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned));
145void FASTCALL(wake_up_bit(void *, int));
146int FASTCALL(out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned));
147int FASTCALL(out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned));
148wait_queue_head_t *FASTCALL(bit_waitqueue(void *, int));
149
150#define wake_up(x) __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, NULL)
151#define wake_up_nr(x, nr) __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr, NULL)
152#define wake_up_all(x) __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0, NULL)
153#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
154#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
155#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
156#define wake_up_locked(x) __wake_up_locked((x), TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE)
157#define wake_up_interruptible_sync(x) __wake_up_sync((x),TASK_INTERRUPTIBLE, 1)
158
159#define __wait_event(wq, condition) \
160do { \
161 DEFINE_WAIT(__wait); \
162 \
163 for (;;) { \
164 prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
165 if (condition) \
166 break; \
167 schedule(); \
168 } \
169 finish_wait(&wq, &__wait); \
170} while (0)
171
172/**
173 * wait_event - sleep until a condition gets true
174 * @wq: the waitqueue to wait on
175 * @condition: a C expression for the event to wait for
176 *
177 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
178 * @condition evaluates to true. The @condition is checked each time
179 * the waitqueue @wq is woken up.
180 *
181 * wake_up() has to be called after changing any variable that could
182 * change the result of the wait condition.
183 */
184#define wait_event(wq, condition) \
185do { \
186 if (condition) \
187 break; \
188 __wait_event(wq, condition); \
189} while (0)
190
191#define __wait_event_timeout(wq, condition, ret) \
192do { \
193 DEFINE_WAIT(__wait); \
194 \
195 for (;;) { \
196 prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
197 if (condition) \
198 break; \
199 ret = schedule_timeout(ret); \
200 if (!ret) \
201 break; \
202 } \
203 finish_wait(&wq, &__wait); \
204} while (0)
205
206/**
207 * wait_event_timeout - sleep until a condition gets true or a timeout elapses
208 * @wq: the waitqueue to wait on
209 * @condition: a C expression for the event to wait for
210 * @timeout: timeout, in jiffies
211 *
212 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
213 * @condition evaluates to true. The @condition is checked each time
214 * the waitqueue @wq is woken up.
215 *
216 * wake_up() has to be called after changing any variable that could
217 * change the result of the wait condition.
218 *
219 * The function returns 0 if the @timeout elapsed, and the remaining
220 * jiffies if the condition evaluated to true before the timeout elapsed.
221 */
222#define wait_event_timeout(wq, condition, timeout) \
223({ \
224 long __ret = timeout; \
225 if (!(condition)) \
226 __wait_event_timeout(wq, condition, __ret); \
227 __ret; \
228})
229
230#define __wait_event_interruptible(wq, condition, ret) \
231do { \
232 DEFINE_WAIT(__wait); \
233 \
234 for (;;) { \
235 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
236 if (condition) \
237 break; \
238 if (!signal_pending(current)) { \
239 schedule(); \
240 continue; \
241 } \
242 ret = -ERESTARTSYS; \
243 break; \
244 } \
245 finish_wait(&wq, &__wait); \
246} while (0)
247
248/**
249 * wait_event_interruptible - sleep until a condition gets true
250 * @wq: the waitqueue to wait on
251 * @condition: a C expression for the event to wait for
252 *
253 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
254 * @condition evaluates to true or a signal is received.
255 * The @condition is checked each time the waitqueue @wq is woken up.
256 *
257 * wake_up() has to be called after changing any variable that could
258 * change the result of the wait condition.
259 *
260 * The function will return -ERESTARTSYS if it was interrupted by a
261 * signal and 0 if @condition evaluated to true.
262 */
263#define wait_event_interruptible(wq, condition) \
264({ \
265 int __ret = 0; \
266 if (!(condition)) \
267 __wait_event_interruptible(wq, condition, __ret); \
268 __ret; \
269})
270
271#define __wait_event_interruptible_timeout(wq, condition, ret) \
272do { \
273 DEFINE_WAIT(__wait); \
274 \
275 for (;;) { \
276 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
277 if (condition) \
278 break; \
279 if (!signal_pending(current)) { \
280 ret = schedule_timeout(ret); \
281 if (!ret) \
282 break; \
283 continue; \
284 } \
285 ret = -ERESTARTSYS; \
286 break; \
287 } \
288 finish_wait(&wq, &__wait); \
289} while (0)
290
291/**
292 * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
293 * @wq: the waitqueue to wait on
294 * @condition: a C expression for the event to wait for
295 * @timeout: timeout, in jiffies
296 *
297 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
298 * @condition evaluates to true or a signal is received.
299 * The @condition is checked each time the waitqueue @wq is woken up.
300 *
301 * wake_up() has to be called after changing any variable that could
302 * change the result of the wait condition.
303 *
304 * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
305 * was interrupted by a signal, and the remaining jiffies otherwise
306 * if the condition evaluated to true before the timeout elapsed.
307 */
308#define wait_event_interruptible_timeout(wq, condition, timeout) \
309({ \
310 long __ret = timeout; \
311 if (!(condition)) \
312 __wait_event_interruptible_timeout(wq, condition, __ret); \
313 __ret; \
314})
315
316#define __wait_event_interruptible_exclusive(wq, condition, ret) \
317do { \
318 DEFINE_WAIT(__wait); \
319 \
320 for (;;) { \
321 prepare_to_wait_exclusive(&wq, &__wait, \
322 TASK_INTERRUPTIBLE); \
323 if (condition) \
324 break; \
325 if (!signal_pending(current)) { \
326 schedule(); \
327 continue; \
328 } \
329 ret = -ERESTARTSYS; \
330 break; \
331 } \
332 finish_wait(&wq, &__wait); \
333} while (0)
334
335#define wait_event_interruptible_exclusive(wq, condition) \
336({ \
337 int __ret = 0; \
338 if (!(condition)) \
339 __wait_event_interruptible_exclusive(wq, condition, __ret);\
340 __ret; \
341})
342
343/*
344 * Must be called with the spinlock in the wait_queue_head_t held.
345 */
346static inline void add_wait_queue_exclusive_locked(wait_queue_head_t *q,
347 wait_queue_t * wait)
348{
349 wait->flags |= WQ_FLAG_EXCLUSIVE;
350 __add_wait_queue_tail(q, wait);
351}
352
353/*
354 * Must be called with the spinlock in the wait_queue_head_t held.
355 */
356static inline void remove_wait_queue_locked(wait_queue_head_t *q,
357 wait_queue_t * wait)
358{
359 __remove_wait_queue(q, wait);
360}
361
362/*
363 * These are the old interfaces to sleep waiting for an event.
364 * They are racy. DO NOT use them, use the wait_event* interfaces above.
365 * We plan to remove these interfaces during 2.7.
366 */
367extern void FASTCALL(sleep_on(wait_queue_head_t *q));
368extern long FASTCALL(sleep_on_timeout(wait_queue_head_t *q,
369 signed long timeout));
370extern void FASTCALL(interruptible_sleep_on(wait_queue_head_t *q));
371extern long FASTCALL(interruptible_sleep_on_timeout(wait_queue_head_t *q,
372 signed long timeout));
373
374/*
375 * Waitqueues which are removed from the waitqueue_head at wakeup time
376 */
377void FASTCALL(prepare_to_wait(wait_queue_head_t *q,
378 wait_queue_t *wait, int state));
379void FASTCALL(prepare_to_wait_exclusive(wait_queue_head_t *q,
380 wait_queue_t *wait, int state));
381void FASTCALL(finish_wait(wait_queue_head_t *q, wait_queue_t *wait));
382int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
383int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
384
385#define DEFINE_WAIT(name) \
386 wait_queue_t name = { \
387 .task = current, \
388 .func = autoremove_wake_function, \
389 .task_list = { .next = &(name).task_list, \
390 .prev = &(name).task_list, \
391 }, \
392 }
393
394#define DEFINE_WAIT_BIT(name, word, bit) \
395 struct wait_bit_queue name = { \
396 .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
397 .wait = { \
398 .task = current, \
399 .func = wake_bit_function, \
400 .task_list = \
401 LIST_HEAD_INIT((name).wait.task_list), \
402 }, \
403 }
404
405#define init_wait(wait) \
406 do { \
407 (wait)->task = current; \
408 (wait)->func = autoremove_wake_function; \
409 INIT_LIST_HEAD(&(wait)->task_list); \
410 } while (0)
411
412/**
413 * wait_on_bit - wait for a bit to be cleared
414 * @word: the word being waited on, a kernel virtual address
415 * @bit: the bit of the word being waited on
416 * @action: the function used to sleep, which may take special actions
417 * @mode: the task state to sleep in
418 *
419 * There is a standard hashed waitqueue table for generic use. This
420 * is the part of the hashtable's accessor API that waits on a bit.
421 * For instance, if one were to have waiters on a bitflag, one would
422 * call wait_on_bit() in threads waiting for the bit to clear.
423 * One uses wait_on_bit() where one is waiting for the bit to clear,
424 * but has no intention of setting it.
425 */
426static inline int wait_on_bit(void *word, int bit,
427 int (*action)(void *), unsigned mode)
428{
429 if (!test_bit(bit, word))
430 return 0;
431 return out_of_line_wait_on_bit(word, bit, action, mode);
432}
433
434/**
435 * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
436 * @word: the word being waited on, a kernel virtual address
437 * @bit: the bit of the word being waited on
438 * @action: the function used to sleep, which may take special actions
439 * @mode: the task state to sleep in
440 *
441 * There is a standard hashed waitqueue table for generic use. This
442 * is the part of the hashtable's accessor API that waits on a bit
443 * when one intends to set it, for instance, trying to lock bitflags.
444 * For instance, if one were to have waiters trying to set bitflag
445 * and waiting for it to clear before setting it, one would call
446 * wait_on_bit() in threads waiting to be able to set the bit.
447 * One uses wait_on_bit_lock() where one is waiting for the bit to
448 * clear with the intention of setting it, and when done, clearing it.
449 */
450static inline int wait_on_bit_lock(void *word, int bit,
451 int (*action)(void *), unsigned mode)
452{
453 if (!test_and_set_bit(bit, word))
454 return 0;
455 return out_of_line_wait_on_bit_lock(word, bit, action, mode);
456}
457
458#endif /* __KERNEL__ */
459
460#endif