aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/wait.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/wait.h')
-rw-r--r--include/linux/wait.h374
1 files changed, 141 insertions, 233 deletions
diff --git a/include/linux/wait.h b/include/linux/wait.h
index a67fc1635592..61939ba30aa0 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -1,7 +1,8 @@
1#ifndef _LINUX_WAIT_H 1#ifndef _LINUX_WAIT_H
2#define _LINUX_WAIT_H 2#define _LINUX_WAIT_H
3 3/*
4 4 * Linux wait queue related types and methods
5 */
5#include <linux/list.h> 6#include <linux/list.h>
6#include <linux/stddef.h> 7#include <linux/stddef.h>
7#include <linux/spinlock.h> 8#include <linux/spinlock.h>
@@ -13,27 +14,27 @@ typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, v
13int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key); 14int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
14 15
15struct __wait_queue { 16struct __wait_queue {
16 unsigned int flags; 17 unsigned int flags;
17#define WQ_FLAG_EXCLUSIVE 0x01 18#define WQ_FLAG_EXCLUSIVE 0x01
18 void *private; 19 void *private;
19 wait_queue_func_t func; 20 wait_queue_func_t func;
20 struct list_head task_list; 21 struct list_head task_list;
21}; 22};
22 23
23struct wait_bit_key { 24struct wait_bit_key {
24 void *flags; 25 void *flags;
25 int bit_nr; 26 int bit_nr;
26#define WAIT_ATOMIC_T_BIT_NR -1 27#define WAIT_ATOMIC_T_BIT_NR -1
27}; 28};
28 29
29struct wait_bit_queue { 30struct wait_bit_queue {
30 struct wait_bit_key key; 31 struct wait_bit_key key;
31 wait_queue_t wait; 32 wait_queue_t wait;
32}; 33};
33 34
34struct __wait_queue_head { 35struct __wait_queue_head {
35 spinlock_t lock; 36 spinlock_t lock;
36 struct list_head task_list; 37 struct list_head task_list;
37}; 38};
38typedef struct __wait_queue_head wait_queue_head_t; 39typedef struct __wait_queue_head wait_queue_head_t;
39 40
@@ -84,17 +85,17 @@ extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct
84 85
85static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p) 86static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
86{ 87{
87 q->flags = 0; 88 q->flags = 0;
88 q->private = p; 89 q->private = p;
89 q->func = default_wake_function; 90 q->func = default_wake_function;
90} 91}
91 92
92static inline void init_waitqueue_func_entry(wait_queue_t *q, 93static inline void
93 wait_queue_func_t func) 94init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func)
94{ 95{
95 q->flags = 0; 96 q->flags = 0;
96 q->private = NULL; 97 q->private = NULL;
97 q->func = func; 98 q->func = func;
98} 99}
99 100
100static inline int waitqueue_active(wait_queue_head_t *q) 101static inline int waitqueue_active(wait_queue_head_t *q)
@@ -114,8 +115,8 @@ static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
114/* 115/*
115 * Used for wake-one threads: 116 * Used for wake-one threads:
116 */ 117 */
117static inline void __add_wait_queue_exclusive(wait_queue_head_t *q, 118static inline void
118 wait_queue_t *wait) 119__add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
119{ 120{
120 wait->flags |= WQ_FLAG_EXCLUSIVE; 121 wait->flags |= WQ_FLAG_EXCLUSIVE;
121 __add_wait_queue(q, wait); 122 __add_wait_queue(q, wait);
@@ -127,23 +128,22 @@ static inline void __add_wait_queue_tail(wait_queue_head_t *head,
127 list_add_tail(&new->task_list, &head->task_list); 128 list_add_tail(&new->task_list, &head->task_list);
128} 129}
129 130
130static inline void __add_wait_queue_tail_exclusive(wait_queue_head_t *q, 131static inline void
131 wait_queue_t *wait) 132__add_wait_queue_tail_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
132{ 133{
133 wait->flags |= WQ_FLAG_EXCLUSIVE; 134 wait->flags |= WQ_FLAG_EXCLUSIVE;
134 __add_wait_queue_tail(q, wait); 135 __add_wait_queue_tail(q, wait);
135} 136}
136 137
137static inline void __remove_wait_queue(wait_queue_head_t *head, 138static inline void
138 wait_queue_t *old) 139__remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
139{ 140{
140 list_del(&old->task_list); 141 list_del(&old->task_list);
141} 142}
142 143
143void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); 144void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
144void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key); 145void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
145void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, 146void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
146 void *key);
147void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr); 147void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
148void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr); 148void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
149void __wake_up_bit(wait_queue_head_t *, void *, int); 149void __wake_up_bit(wait_queue_head_t *, void *, int);
@@ -170,27 +170,64 @@ wait_queue_head_t *bit_waitqueue(void *, int);
170/* 170/*
171 * Wakeup macros to be used to report events to the targets. 171 * Wakeup macros to be used to report events to the targets.
172 */ 172 */
173#define wake_up_poll(x, m) \ 173#define wake_up_poll(x, m) \
174 __wake_up(x, TASK_NORMAL, 1, (void *) (m)) 174 __wake_up(x, TASK_NORMAL, 1, (void *) (m))
175#define wake_up_locked_poll(x, m) \ 175#define wake_up_locked_poll(x, m) \
176 __wake_up_locked_key((x), TASK_NORMAL, (void *) (m)) 176 __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
177#define wake_up_interruptible_poll(x, m) \ 177#define wake_up_interruptible_poll(x, m) \
178 __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m)) 178 __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
179#define wake_up_interruptible_sync_poll(x, m) \ 179#define wake_up_interruptible_sync_poll(x, m) \
180 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m)) 180 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
181 181
182#define __wait_event(wq, condition) \ 182#define ___wait_cond_timeout(condition) \
183do { \ 183({ \
184 DEFINE_WAIT(__wait); \ 184 bool __cond = (condition); \
185 if (__cond && !__ret) \
186 __ret = 1; \
187 __cond || !__ret; \
188})
189
190#define ___wait_is_interruptible(state) \
191 (!__builtin_constant_p(state) || \
192 state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
193
194#define ___wait_event(wq, condition, state, exclusive, ret, cmd) \
195({ \
196 __label__ __out; \
197 wait_queue_t __wait; \
198 long __ret = ret; \
199 \
200 INIT_LIST_HEAD(&__wait.task_list); \
201 if (exclusive) \
202 __wait.flags = WQ_FLAG_EXCLUSIVE; \
203 else \
204 __wait.flags = 0; \
185 \ 205 \
186 for (;;) { \ 206 for (;;) { \
187 prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \ 207 long __int = prepare_to_wait_event(&wq, &__wait, state);\
208 \
188 if (condition) \ 209 if (condition) \
189 break; \ 210 break; \
190 schedule(); \ 211 \
212 if (___wait_is_interruptible(state) && __int) { \
213 __ret = __int; \
214 if (exclusive) { \
215 abort_exclusive_wait(&wq, &__wait, \
216 state, NULL); \
217 goto __out; \
218 } \
219 break; \
220 } \
221 \
222 cmd; \
191 } \ 223 } \
192 finish_wait(&wq, &__wait); \ 224 finish_wait(&wq, &__wait); \
193} while (0) 225__out: __ret; \
226})
227
228#define __wait_event(wq, condition) \
229 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
230 schedule())
194 231
195/** 232/**
196 * wait_event - sleep until a condition gets true 233 * wait_event - sleep until a condition gets true
@@ -204,29 +241,17 @@ do { \
204 * wake_up() has to be called after changing any variable that could 241 * wake_up() has to be called after changing any variable that could
205 * change the result of the wait condition. 242 * change the result of the wait condition.
206 */ 243 */
207#define wait_event(wq, condition) \ 244#define wait_event(wq, condition) \
208do { \ 245do { \
209 if (condition) \ 246 if (condition) \
210 break; \ 247 break; \
211 __wait_event(wq, condition); \ 248 __wait_event(wq, condition); \
212} while (0) 249} while (0)
213 250
214#define __wait_event_timeout(wq, condition, ret) \ 251#define __wait_event_timeout(wq, condition, timeout) \
215do { \ 252 ___wait_event(wq, ___wait_cond_timeout(condition), \
216 DEFINE_WAIT(__wait); \ 253 TASK_UNINTERRUPTIBLE, 0, timeout, \
217 \ 254 __ret = schedule_timeout(__ret))
218 for (;;) { \
219 prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
220 if (condition) \
221 break; \
222 ret = schedule_timeout(ret); \
223 if (!ret) \
224 break; \
225 } \
226 if (!ret && (condition)) \
227 ret = 1; \
228 finish_wait(&wq, &__wait); \
229} while (0)
230 255
231/** 256/**
232 * wait_event_timeout - sleep until a condition gets true or a timeout elapses 257 * wait_event_timeout - sleep until a condition gets true or a timeout elapses
@@ -248,28 +273,14 @@ do { \
248#define wait_event_timeout(wq, condition, timeout) \ 273#define wait_event_timeout(wq, condition, timeout) \
249({ \ 274({ \
250 long __ret = timeout; \ 275 long __ret = timeout; \
251 if (!(condition)) \ 276 if (!___wait_cond_timeout(condition)) \
252 __wait_event_timeout(wq, condition, __ret); \ 277 __ret = __wait_event_timeout(wq, condition, timeout); \
253 __ret; \ 278 __ret; \
254}) 279})
255 280
256#define __wait_event_interruptible(wq, condition, ret) \ 281#define __wait_event_interruptible(wq, condition) \
257do { \ 282 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
258 DEFINE_WAIT(__wait); \ 283 schedule())
259 \
260 for (;;) { \
261 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
262 if (condition) \
263 break; \
264 if (!signal_pending(current)) { \
265 schedule(); \
266 continue; \
267 } \
268 ret = -ERESTARTSYS; \
269 break; \
270 } \
271 finish_wait(&wq, &__wait); \
272} while (0)
273 284
274/** 285/**
275 * wait_event_interruptible - sleep until a condition gets true 286 * wait_event_interruptible - sleep until a condition gets true
@@ -290,31 +301,14 @@ do { \
290({ \ 301({ \
291 int __ret = 0; \ 302 int __ret = 0; \
292 if (!(condition)) \ 303 if (!(condition)) \
293 __wait_event_interruptible(wq, condition, __ret); \ 304 __ret = __wait_event_interruptible(wq, condition); \
294 __ret; \ 305 __ret; \
295}) 306})
296 307
297#define __wait_event_interruptible_timeout(wq, condition, ret) \ 308#define __wait_event_interruptible_timeout(wq, condition, timeout) \
298do { \ 309 ___wait_event(wq, ___wait_cond_timeout(condition), \
299 DEFINE_WAIT(__wait); \ 310 TASK_INTERRUPTIBLE, 0, timeout, \
300 \ 311 __ret = schedule_timeout(__ret))
301 for (;;) { \
302 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
303 if (condition) \
304 break; \
305 if (!signal_pending(current)) { \
306 ret = schedule_timeout(ret); \
307 if (!ret) \
308 break; \
309 continue; \
310 } \
311 ret = -ERESTARTSYS; \
312 break; \
313 } \
314 if (!ret && (condition)) \
315 ret = 1; \
316 finish_wait(&wq, &__wait); \
317} while (0)
318 312
319/** 313/**
320 * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses 314 * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
@@ -337,15 +331,15 @@ do { \
337#define wait_event_interruptible_timeout(wq, condition, timeout) \ 331#define wait_event_interruptible_timeout(wq, condition, timeout) \
338({ \ 332({ \
339 long __ret = timeout; \ 333 long __ret = timeout; \
340 if (!(condition)) \ 334 if (!___wait_cond_timeout(condition)) \
341 __wait_event_interruptible_timeout(wq, condition, __ret); \ 335 __ret = __wait_event_interruptible_timeout(wq, \
336 condition, timeout); \
342 __ret; \ 337 __ret; \
343}) 338})
344 339
345#define __wait_event_hrtimeout(wq, condition, timeout, state) \ 340#define __wait_event_hrtimeout(wq, condition, timeout, state) \
346({ \ 341({ \
347 int __ret = 0; \ 342 int __ret = 0; \
348 DEFINE_WAIT(__wait); \
349 struct hrtimer_sleeper __t; \ 343 struct hrtimer_sleeper __t; \
350 \ 344 \
351 hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \ 345 hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \
@@ -356,25 +350,15 @@ do { \
356 current->timer_slack_ns, \ 350 current->timer_slack_ns, \
357 HRTIMER_MODE_REL); \ 351 HRTIMER_MODE_REL); \
358 \ 352 \
359 for (;;) { \ 353 __ret = ___wait_event(wq, condition, state, 0, 0, \
360 prepare_to_wait(&wq, &__wait, state); \
361 if (condition) \
362 break; \
363 if (state == TASK_INTERRUPTIBLE && \
364 signal_pending(current)) { \
365 __ret = -ERESTARTSYS; \
366 break; \
367 } \
368 if (!__t.task) { \ 354 if (!__t.task) { \
369 __ret = -ETIME; \ 355 __ret = -ETIME; \
370 break; \ 356 break; \
371 } \ 357 } \
372 schedule(); \ 358 schedule()); \
373 } \
374 \ 359 \
375 hrtimer_cancel(&__t.timer); \ 360 hrtimer_cancel(&__t.timer); \
376 destroy_hrtimer_on_stack(&__t.timer); \ 361 destroy_hrtimer_on_stack(&__t.timer); \
377 finish_wait(&wq, &__wait); \
378 __ret; \ 362 __ret; \
379}) 363})
380 364
@@ -428,33 +412,15 @@ do { \
428 __ret; \ 412 __ret; \
429}) 413})
430 414
431#define __wait_event_interruptible_exclusive(wq, condition, ret) \ 415#define __wait_event_interruptible_exclusive(wq, condition) \
432do { \ 416 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
433 DEFINE_WAIT(__wait); \ 417 schedule())
434 \
435 for (;;) { \
436 prepare_to_wait_exclusive(&wq, &__wait, \
437 TASK_INTERRUPTIBLE); \
438 if (condition) { \
439 finish_wait(&wq, &__wait); \
440 break; \
441 } \
442 if (!signal_pending(current)) { \
443 schedule(); \
444 continue; \
445 } \
446 ret = -ERESTARTSYS; \
447 abort_exclusive_wait(&wq, &__wait, \
448 TASK_INTERRUPTIBLE, NULL); \
449 break; \
450 } \
451} while (0)
452 418
453#define wait_event_interruptible_exclusive(wq, condition) \ 419#define wait_event_interruptible_exclusive(wq, condition) \
454({ \ 420({ \
455 int __ret = 0; \ 421 int __ret = 0; \
456 if (!(condition)) \ 422 if (!(condition)) \
457 __wait_event_interruptible_exclusive(wq, condition, __ret);\ 423 __ret = __wait_event_interruptible_exclusive(wq, condition);\
458 __ret; \ 424 __ret; \
459}) 425})
460 426
@@ -606,24 +572,8 @@ do { \
606 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1)) 572 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
607 573
608 574
609 575#define __wait_event_killable(wq, condition) \
610#define __wait_event_killable(wq, condition, ret) \ 576 ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
611do { \
612 DEFINE_WAIT(__wait); \
613 \
614 for (;;) { \
615 prepare_to_wait(&wq, &__wait, TASK_KILLABLE); \
616 if (condition) \
617 break; \
618 if (!fatal_signal_pending(current)) { \
619 schedule(); \
620 continue; \
621 } \
622 ret = -ERESTARTSYS; \
623 break; \
624 } \
625 finish_wait(&wq, &__wait); \
626} while (0)
627 577
628/** 578/**
629 * wait_event_killable - sleep until a condition gets true 579 * wait_event_killable - sleep until a condition gets true
@@ -644,26 +594,17 @@ do { \
644({ \ 594({ \
645 int __ret = 0; \ 595 int __ret = 0; \
646 if (!(condition)) \ 596 if (!(condition)) \
647 __wait_event_killable(wq, condition, __ret); \ 597 __ret = __wait_event_killable(wq, condition); \
648 __ret; \ 598 __ret; \
649}) 599})
650 600
651 601
652#define __wait_event_lock_irq(wq, condition, lock, cmd) \ 602#define __wait_event_lock_irq(wq, condition, lock, cmd) \
653do { \ 603 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
654 DEFINE_WAIT(__wait); \ 604 spin_unlock_irq(&lock); \
655 \ 605 cmd; \
656 for (;;) { \ 606 schedule(); \
657 prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \ 607 spin_lock_irq(&lock))
658 if (condition) \
659 break; \
660 spin_unlock_irq(&lock); \
661 cmd; \
662 schedule(); \
663 spin_lock_irq(&lock); \
664 } \
665 finish_wait(&wq, &__wait); \
666} while (0)
667 608
668/** 609/**
669 * wait_event_lock_irq_cmd - sleep until a condition gets true. The 610 * wait_event_lock_irq_cmd - sleep until a condition gets true. The
@@ -723,26 +664,12 @@ do { \
723} while (0) 664} while (0)
724 665
725 666
726#define __wait_event_interruptible_lock_irq(wq, condition, \ 667#define __wait_event_interruptible_lock_irq(wq, condition, lock, cmd) \
727 lock, ret, cmd) \ 668 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
728do { \ 669 spin_unlock_irq(&lock); \
729 DEFINE_WAIT(__wait); \ 670 cmd; \
730 \ 671 schedule(); \
731 for (;;) { \ 672 spin_lock_irq(&lock))
732 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
733 if (condition) \
734 break; \
735 if (signal_pending(current)) { \
736 ret = -ERESTARTSYS; \
737 break; \
738 } \
739 spin_unlock_irq(&lock); \
740 cmd; \
741 schedule(); \
742 spin_lock_irq(&lock); \
743 } \
744 finish_wait(&wq, &__wait); \
745} while (0)
746 673
747/** 674/**
748 * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true. 675 * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
@@ -772,10 +699,9 @@ do { \
772#define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \ 699#define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \
773({ \ 700({ \
774 int __ret = 0; \ 701 int __ret = 0; \
775 \
776 if (!(condition)) \ 702 if (!(condition)) \
777 __wait_event_interruptible_lock_irq(wq, condition, \ 703 __ret = __wait_event_interruptible_lock_irq(wq, \
778 lock, __ret, cmd); \ 704 condition, lock, cmd); \
779 __ret; \ 705 __ret; \
780}) 706})
781 707
@@ -804,39 +730,24 @@ do { \
804#define wait_event_interruptible_lock_irq(wq, condition, lock) \ 730#define wait_event_interruptible_lock_irq(wq, condition, lock) \
805({ \ 731({ \
806 int __ret = 0; \ 732 int __ret = 0; \
807 \
808 if (!(condition)) \ 733 if (!(condition)) \
809 __wait_event_interruptible_lock_irq(wq, condition, \ 734 __ret = __wait_event_interruptible_lock_irq(wq, \
810 lock, __ret, ); \ 735 condition, lock,); \
811 __ret; \ 736 __ret; \
812}) 737})
813 738
814#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \ 739#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
815 lock, ret) \ 740 lock, timeout) \
816do { \ 741 ___wait_event(wq, ___wait_cond_timeout(condition), \
817 DEFINE_WAIT(__wait); \ 742 TASK_INTERRUPTIBLE, 0, timeout, \
818 \ 743 spin_unlock_irq(&lock); \
819 for (;;) { \ 744 __ret = schedule_timeout(__ret); \
820 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \ 745 spin_lock_irq(&lock));
821 if (condition) \
822 break; \
823 if (signal_pending(current)) { \
824 ret = -ERESTARTSYS; \
825 break; \
826 } \
827 spin_unlock_irq(&lock); \
828 ret = schedule_timeout(ret); \
829 spin_lock_irq(&lock); \
830 if (!ret) \
831 break; \
832 } \
833 finish_wait(&wq, &__wait); \
834} while (0)
835 746
836/** 747/**
837 * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets true or a timeout elapses. 748 * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
838 * The condition is checked under the lock. This is expected 749 * true or a timeout elapses. The condition is checked under
839 * to be called with the lock taken. 750 * the lock. This is expected to be called with the lock taken.
840 * @wq: the waitqueue to wait on 751 * @wq: the waitqueue to wait on
841 * @condition: a C expression for the event to wait for 752 * @condition: a C expression for the event to wait for
842 * @lock: a locked spinlock_t, which will be released before schedule() 753 * @lock: a locked spinlock_t, which will be released before schedule()
@@ -860,11 +771,10 @@ do { \
860#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \ 771#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
861 timeout) \ 772 timeout) \
862({ \ 773({ \
863 int __ret = timeout; \ 774 long __ret = timeout; \
864 \ 775 if (!___wait_cond_timeout(condition)) \
865 if (!(condition)) \ 776 __ret = __wait_event_interruptible_lock_irq_timeout( \
866 __wait_event_interruptible_lock_irq_timeout( \ 777 wq, condition, lock, timeout); \
867 wq, condition, lock, __ret); \
868 __ret; \ 778 __ret; \
869}) 779})
870 780
@@ -875,20 +785,18 @@ do { \
875 * We plan to remove these interfaces. 785 * We plan to remove these interfaces.
876 */ 786 */
877extern void sleep_on(wait_queue_head_t *q); 787extern void sleep_on(wait_queue_head_t *q);
878extern long sleep_on_timeout(wait_queue_head_t *q, 788extern long sleep_on_timeout(wait_queue_head_t *q, signed long timeout);
879 signed long timeout);
880extern void interruptible_sleep_on(wait_queue_head_t *q); 789extern void interruptible_sleep_on(wait_queue_head_t *q);
881extern long interruptible_sleep_on_timeout(wait_queue_head_t *q, 790extern long interruptible_sleep_on_timeout(wait_queue_head_t *q, signed long timeout);
882 signed long timeout);
883 791
884/* 792/*
885 * Waitqueues which are removed from the waitqueue_head at wakeup time 793 * Waitqueues which are removed from the waitqueue_head at wakeup time
886 */ 794 */
887void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state); 795void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
888void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state); 796void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
797long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state);
889void finish_wait(wait_queue_head_t *q, wait_queue_t *wait); 798void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
890void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, 799void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, unsigned int mode, void *key);
891 unsigned int mode, void *key);
892int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); 800int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
893int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key); 801int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
894 802
@@ -934,8 +842,8 @@ int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
934 * One uses wait_on_bit() where one is waiting for the bit to clear, 842 * One uses wait_on_bit() where one is waiting for the bit to clear,
935 * but has no intention of setting it. 843 * but has no intention of setting it.
936 */ 844 */
937static inline int wait_on_bit(void *word, int bit, 845static inline int
938 int (*action)(void *), unsigned mode) 846wait_on_bit(void *word, int bit, int (*action)(void *), unsigned mode)
939{ 847{
940 if (!test_bit(bit, word)) 848 if (!test_bit(bit, word))
941 return 0; 849 return 0;
@@ -958,8 +866,8 @@ static inline int wait_on_bit(void *word, int bit,
958 * One uses wait_on_bit_lock() where one is waiting for the bit to 866 * One uses wait_on_bit_lock() where one is waiting for the bit to
959 * clear with the intention of setting it, and when done, clearing it. 867 * clear with the intention of setting it, and when done, clearing it.
960 */ 868 */
961static inline int wait_on_bit_lock(void *word, int bit, 869static inline int
962 int (*action)(void *), unsigned mode) 870wait_on_bit_lock(void *word, int bit, int (*action)(void *), unsigned mode)
963{ 871{
964 if (!test_and_set_bit(bit, word)) 872 if (!test_and_set_bit(bit, word))
965 return 0; 873 return 0;
@@ -983,5 +891,5 @@ int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
983 return 0; 891 return 0;
984 return out_of_line_wait_on_atomic_t(val, action, mode); 892 return out_of_line_wait_on_atomic_t(val, action, mode);
985} 893}
986 894
987#endif 895#endif /* _LINUX_WAIT_H */