summaryrefslogtreecommitdiffstats
path: root/include/linux/wait.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/wait.h')
-rw-r--r--include/linux/wait.h393
1 files changed, 163 insertions, 230 deletions
diff --git a/include/linux/wait.h b/include/linux/wait.h
index a67fc1635592..eaa00b10abaa 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -1,7 +1,8 @@
1#ifndef _LINUX_WAIT_H 1#ifndef _LINUX_WAIT_H
2#define _LINUX_WAIT_H 2#define _LINUX_WAIT_H
3 3/*
4 4 * Linux wait queue related types and methods
5 */
5#include <linux/list.h> 6#include <linux/list.h>
6#include <linux/stddef.h> 7#include <linux/stddef.h>
7#include <linux/spinlock.h> 8#include <linux/spinlock.h>
@@ -13,27 +14,27 @@ typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, v
13int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key); 14int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
14 15
15struct __wait_queue { 16struct __wait_queue {
16 unsigned int flags; 17 unsigned int flags;
17#define WQ_FLAG_EXCLUSIVE 0x01 18#define WQ_FLAG_EXCLUSIVE 0x01
18 void *private; 19 void *private;
19 wait_queue_func_t func; 20 wait_queue_func_t func;
20 struct list_head task_list; 21 struct list_head task_list;
21}; 22};
22 23
23struct wait_bit_key { 24struct wait_bit_key {
24 void *flags; 25 void *flags;
25 int bit_nr; 26 int bit_nr;
26#define WAIT_ATOMIC_T_BIT_NR -1 27#define WAIT_ATOMIC_T_BIT_NR -1
27}; 28};
28 29
29struct wait_bit_queue { 30struct wait_bit_queue {
30 struct wait_bit_key key; 31 struct wait_bit_key key;
31 wait_queue_t wait; 32 wait_queue_t wait;
32}; 33};
33 34
34struct __wait_queue_head { 35struct __wait_queue_head {
35 spinlock_t lock; 36 spinlock_t lock;
36 struct list_head task_list; 37 struct list_head task_list;
37}; 38};
38typedef struct __wait_queue_head wait_queue_head_t; 39typedef struct __wait_queue_head wait_queue_head_t;
39 40
@@ -84,17 +85,17 @@ extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct
84 85
85static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p) 86static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
86{ 87{
87 q->flags = 0; 88 q->flags = 0;
88 q->private = p; 89 q->private = p;
89 q->func = default_wake_function; 90 q->func = default_wake_function;
90} 91}
91 92
92static inline void init_waitqueue_func_entry(wait_queue_t *q, 93static inline void
93 wait_queue_func_t func) 94init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func)
94{ 95{
95 q->flags = 0; 96 q->flags = 0;
96 q->private = NULL; 97 q->private = NULL;
97 q->func = func; 98 q->func = func;
98} 99}
99 100
100static inline int waitqueue_active(wait_queue_head_t *q) 101static inline int waitqueue_active(wait_queue_head_t *q)
@@ -114,8 +115,8 @@ static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
114/* 115/*
115 * Used for wake-one threads: 116 * Used for wake-one threads:
116 */ 117 */
117static inline void __add_wait_queue_exclusive(wait_queue_head_t *q, 118static inline void
118 wait_queue_t *wait) 119__add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
119{ 120{
120 wait->flags |= WQ_FLAG_EXCLUSIVE; 121 wait->flags |= WQ_FLAG_EXCLUSIVE;
121 __add_wait_queue(q, wait); 122 __add_wait_queue(q, wait);
@@ -127,23 +128,22 @@ static inline void __add_wait_queue_tail(wait_queue_head_t *head,
127 list_add_tail(&new->task_list, &head->task_list); 128 list_add_tail(&new->task_list, &head->task_list);
128} 129}
129 130
130static inline void __add_wait_queue_tail_exclusive(wait_queue_head_t *q, 131static inline void
131 wait_queue_t *wait) 132__add_wait_queue_tail_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
132{ 133{
133 wait->flags |= WQ_FLAG_EXCLUSIVE; 134 wait->flags |= WQ_FLAG_EXCLUSIVE;
134 __add_wait_queue_tail(q, wait); 135 __add_wait_queue_tail(q, wait);
135} 136}
136 137
137static inline void __remove_wait_queue(wait_queue_head_t *head, 138static inline void
138 wait_queue_t *old) 139__remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
139{ 140{
140 list_del(&old->task_list); 141 list_del(&old->task_list);
141} 142}
142 143
143void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); 144void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
144void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key); 145void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
145void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, 146void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
146 void *key);
147void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr); 147void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
148void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr); 148void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
149void __wake_up_bit(wait_queue_head_t *, void *, int); 149void __wake_up_bit(wait_queue_head_t *, void *, int);
@@ -170,27 +170,64 @@ wait_queue_head_t *bit_waitqueue(void *, int);
170/* 170/*
171 * Wakeup macros to be used to report events to the targets. 171 * Wakeup macros to be used to report events to the targets.
172 */ 172 */
173#define wake_up_poll(x, m) \ 173#define wake_up_poll(x, m) \
174 __wake_up(x, TASK_NORMAL, 1, (void *) (m)) 174 __wake_up(x, TASK_NORMAL, 1, (void *) (m))
175#define wake_up_locked_poll(x, m) \ 175#define wake_up_locked_poll(x, m) \
176 __wake_up_locked_key((x), TASK_NORMAL, (void *) (m)) 176 __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
177#define wake_up_interruptible_poll(x, m) \ 177#define wake_up_interruptible_poll(x, m) \
178 __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m)) 178 __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
179#define wake_up_interruptible_sync_poll(x, m) \ 179#define wake_up_interruptible_sync_poll(x, m) \
180 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m)) 180 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
181 181
182#define __wait_event(wq, condition) \ 182#define ___wait_cond_timeout(condition) \
183do { \ 183({ \
184 DEFINE_WAIT(__wait); \ 184 bool __cond = (condition); \
185 if (__cond && !__ret) \
186 __ret = 1; \
187 __cond || !__ret; \
188})
189
190#define ___wait_is_interruptible(state) \
191 (!__builtin_constant_p(state) || \
192 state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
193
194#define ___wait_event(wq, condition, state, exclusive, ret, cmd) \
195({ \
196 __label__ __out; \
197 wait_queue_t __wait; \
198 long __ret = ret; \
199 \
200 INIT_LIST_HEAD(&__wait.task_list); \
201 if (exclusive) \
202 __wait.flags = WQ_FLAG_EXCLUSIVE; \
203 else \
204 __wait.flags = 0; \
185 \ 205 \
186 for (;;) { \ 206 for (;;) { \
187 prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \ 207 long __int = prepare_to_wait_event(&wq, &__wait, state);\
208 \
188 if (condition) \ 209 if (condition) \
189 break; \ 210 break; \
190 schedule(); \ 211 \
212 if (___wait_is_interruptible(state) && __int) { \
213 __ret = __int; \
214 if (exclusive) { \
215 abort_exclusive_wait(&wq, &__wait, \
216 state, NULL); \
217 goto __out; \
218 } \
219 break; \
220 } \
221 \
222 cmd; \
191 } \ 223 } \
192 finish_wait(&wq, &__wait); \ 224 finish_wait(&wq, &__wait); \
193} while (0) 225__out: __ret; \
226})
227
228#define __wait_event(wq, condition) \
229 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
230 schedule())
194 231
195/** 232/**
196 * wait_event - sleep until a condition gets true 233 * wait_event - sleep until a condition gets true
@@ -204,29 +241,17 @@ do { \
204 * wake_up() has to be called after changing any variable that could 241 * wake_up() has to be called after changing any variable that could
205 * change the result of the wait condition. 242 * change the result of the wait condition.
206 */ 243 */
207#define wait_event(wq, condition) \ 244#define wait_event(wq, condition) \
208do { \ 245do { \
209 if (condition) \ 246 if (condition) \
210 break; \ 247 break; \
211 __wait_event(wq, condition); \ 248 __wait_event(wq, condition); \
212} while (0) 249} while (0)
213 250
214#define __wait_event_timeout(wq, condition, ret) \ 251#define __wait_event_timeout(wq, condition, timeout) \
215do { \ 252 ___wait_event(wq, ___wait_cond_timeout(condition), \
216 DEFINE_WAIT(__wait); \ 253 TASK_UNINTERRUPTIBLE, 0, timeout, \
217 \ 254 __ret = schedule_timeout(__ret))
218 for (;;) { \
219 prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
220 if (condition) \
221 break; \
222 ret = schedule_timeout(ret); \
223 if (!ret) \
224 break; \
225 } \
226 if (!ret && (condition)) \
227 ret = 1; \
228 finish_wait(&wq, &__wait); \
229} while (0)
230 255
231/** 256/**
232 * wait_event_timeout - sleep until a condition gets true or a timeout elapses 257 * wait_event_timeout - sleep until a condition gets true or a timeout elapses
@@ -248,29 +273,40 @@ do { \
248#define wait_event_timeout(wq, condition, timeout) \ 273#define wait_event_timeout(wq, condition, timeout) \
249({ \ 274({ \
250 long __ret = timeout; \ 275 long __ret = timeout; \
251 if (!(condition)) \ 276 if (!___wait_cond_timeout(condition)) \
252 __wait_event_timeout(wq, condition, __ret); \ 277 __ret = __wait_event_timeout(wq, condition, timeout); \
253 __ret; \ 278 __ret; \
254}) 279})
255 280
256#define __wait_event_interruptible(wq, condition, ret) \ 281#define __wait_event_cmd(wq, condition, cmd1, cmd2) \
282 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
283 cmd1; schedule(); cmd2)
284
285/**
286 * wait_event_cmd - sleep until a condition gets true
287 * @wq: the waitqueue to wait on
288 * @condition: a C expression for the event to wait for
289 * cmd1: the command will be executed before sleep
290 * cmd2: the command will be executed after sleep
291 *
292 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
293 * @condition evaluates to true. The @condition is checked each time
294 * the waitqueue @wq is woken up.
295 *
296 * wake_up() has to be called after changing any variable that could
297 * change the result of the wait condition.
298 */
299#define wait_event_cmd(wq, condition, cmd1, cmd2) \
257do { \ 300do { \
258 DEFINE_WAIT(__wait); \ 301 if (condition) \
259 \
260 for (;;) { \
261 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
262 if (condition) \
263 break; \
264 if (!signal_pending(current)) { \
265 schedule(); \
266 continue; \
267 } \
268 ret = -ERESTARTSYS; \
269 break; \ 302 break; \
270 } \ 303 __wait_event_cmd(wq, condition, cmd1, cmd2); \
271 finish_wait(&wq, &__wait); \
272} while (0) 304} while (0)
273 305
306#define __wait_event_interruptible(wq, condition) \
307 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
308 schedule())
309
274/** 310/**
275 * wait_event_interruptible - sleep until a condition gets true 311 * wait_event_interruptible - sleep until a condition gets true
276 * @wq: the waitqueue to wait on 312 * @wq: the waitqueue to wait on
@@ -290,31 +326,14 @@ do { \
290({ \ 326({ \
291 int __ret = 0; \ 327 int __ret = 0; \
292 if (!(condition)) \ 328 if (!(condition)) \
293 __wait_event_interruptible(wq, condition, __ret); \ 329 __ret = __wait_event_interruptible(wq, condition); \
294 __ret; \ 330 __ret; \
295}) 331})
296 332
297#define __wait_event_interruptible_timeout(wq, condition, ret) \ 333#define __wait_event_interruptible_timeout(wq, condition, timeout) \
298do { \ 334 ___wait_event(wq, ___wait_cond_timeout(condition), \
299 DEFINE_WAIT(__wait); \ 335 TASK_INTERRUPTIBLE, 0, timeout, \
300 \ 336 __ret = schedule_timeout(__ret))
301 for (;;) { \
302 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
303 if (condition) \
304 break; \
305 if (!signal_pending(current)) { \
306 ret = schedule_timeout(ret); \
307 if (!ret) \
308 break; \
309 continue; \
310 } \
311 ret = -ERESTARTSYS; \
312 break; \
313 } \
314 if (!ret && (condition)) \
315 ret = 1; \
316 finish_wait(&wq, &__wait); \
317} while (0)
318 337
319/** 338/**
320 * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses 339 * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
@@ -337,15 +356,15 @@ do { \
337#define wait_event_interruptible_timeout(wq, condition, timeout) \ 356#define wait_event_interruptible_timeout(wq, condition, timeout) \
338({ \ 357({ \
339 long __ret = timeout; \ 358 long __ret = timeout; \
340 if (!(condition)) \ 359 if (!___wait_cond_timeout(condition)) \
341 __wait_event_interruptible_timeout(wq, condition, __ret); \ 360 __ret = __wait_event_interruptible_timeout(wq, \
361 condition, timeout); \
342 __ret; \ 362 __ret; \
343}) 363})
344 364
345#define __wait_event_hrtimeout(wq, condition, timeout, state) \ 365#define __wait_event_hrtimeout(wq, condition, timeout, state) \
346({ \ 366({ \
347 int __ret = 0; \ 367 int __ret = 0; \
348 DEFINE_WAIT(__wait); \
349 struct hrtimer_sleeper __t; \ 368 struct hrtimer_sleeper __t; \
350 \ 369 \
351 hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \ 370 hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \
@@ -356,25 +375,15 @@ do { \
356 current->timer_slack_ns, \ 375 current->timer_slack_ns, \
357 HRTIMER_MODE_REL); \ 376 HRTIMER_MODE_REL); \
358 \ 377 \
359 for (;;) { \ 378 __ret = ___wait_event(wq, condition, state, 0, 0, \
360 prepare_to_wait(&wq, &__wait, state); \
361 if (condition) \
362 break; \
363 if (state == TASK_INTERRUPTIBLE && \
364 signal_pending(current)) { \
365 __ret = -ERESTARTSYS; \
366 break; \
367 } \
368 if (!__t.task) { \ 379 if (!__t.task) { \
369 __ret = -ETIME; \ 380 __ret = -ETIME; \
370 break; \ 381 break; \
371 } \ 382 } \
372 schedule(); \ 383 schedule()); \
373 } \
374 \ 384 \
375 hrtimer_cancel(&__t.timer); \ 385 hrtimer_cancel(&__t.timer); \
376 destroy_hrtimer_on_stack(&__t.timer); \ 386 destroy_hrtimer_on_stack(&__t.timer); \
377 finish_wait(&wq, &__wait); \
378 __ret; \ 387 __ret; \
379}) 388})
380 389
@@ -428,33 +437,15 @@ do { \
428 __ret; \ 437 __ret; \
429}) 438})
430 439
431#define __wait_event_interruptible_exclusive(wq, condition, ret) \ 440#define __wait_event_interruptible_exclusive(wq, condition) \
432do { \ 441 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
433 DEFINE_WAIT(__wait); \ 442 schedule())
434 \
435 for (;;) { \
436 prepare_to_wait_exclusive(&wq, &__wait, \
437 TASK_INTERRUPTIBLE); \
438 if (condition) { \
439 finish_wait(&wq, &__wait); \
440 break; \
441 } \
442 if (!signal_pending(current)) { \
443 schedule(); \
444 continue; \
445 } \
446 ret = -ERESTARTSYS; \
447 abort_exclusive_wait(&wq, &__wait, \
448 TASK_INTERRUPTIBLE, NULL); \
449 break; \
450 } \
451} while (0)
452 443
453#define wait_event_interruptible_exclusive(wq, condition) \ 444#define wait_event_interruptible_exclusive(wq, condition) \
454({ \ 445({ \
455 int __ret = 0; \ 446 int __ret = 0; \
456 if (!(condition)) \ 447 if (!(condition)) \
457 __wait_event_interruptible_exclusive(wq, condition, __ret);\ 448 __ret = __wait_event_interruptible_exclusive(wq, condition);\
458 __ret; \ 449 __ret; \
459}) 450})
460 451
@@ -606,24 +597,8 @@ do { \
606 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1)) 597 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
607 598
608 599
609 600#define __wait_event_killable(wq, condition) \
610#define __wait_event_killable(wq, condition, ret) \ 601 ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
611do { \
612 DEFINE_WAIT(__wait); \
613 \
614 for (;;) { \
615 prepare_to_wait(&wq, &__wait, TASK_KILLABLE); \
616 if (condition) \
617 break; \
618 if (!fatal_signal_pending(current)) { \
619 schedule(); \
620 continue; \
621 } \
622 ret = -ERESTARTSYS; \
623 break; \
624 } \
625 finish_wait(&wq, &__wait); \
626} while (0)
627 602
628/** 603/**
629 * wait_event_killable - sleep until a condition gets true 604 * wait_event_killable - sleep until a condition gets true
@@ -644,26 +619,17 @@ do { \
644({ \ 619({ \
645 int __ret = 0; \ 620 int __ret = 0; \
646 if (!(condition)) \ 621 if (!(condition)) \
647 __wait_event_killable(wq, condition, __ret); \ 622 __ret = __wait_event_killable(wq, condition); \
648 __ret; \ 623 __ret; \
649}) 624})
650 625
651 626
652#define __wait_event_lock_irq(wq, condition, lock, cmd) \ 627#define __wait_event_lock_irq(wq, condition, lock, cmd) \
653do { \ 628 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
654 DEFINE_WAIT(__wait); \ 629 spin_unlock_irq(&lock); \
655 \ 630 cmd; \
656 for (;;) { \ 631 schedule(); \
657 prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \ 632 spin_lock_irq(&lock))
658 if (condition) \
659 break; \
660 spin_unlock_irq(&lock); \
661 cmd; \
662 schedule(); \
663 spin_lock_irq(&lock); \
664 } \
665 finish_wait(&wq, &__wait); \
666} while (0)
667 633
668/** 634/**
669 * wait_event_lock_irq_cmd - sleep until a condition gets true. The 635 * wait_event_lock_irq_cmd - sleep until a condition gets true. The
@@ -723,26 +689,12 @@ do { \
723} while (0) 689} while (0)
724 690
725 691
726#define __wait_event_interruptible_lock_irq(wq, condition, \ 692#define __wait_event_interruptible_lock_irq(wq, condition, lock, cmd) \
727 lock, ret, cmd) \ 693 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
728do { \ 694 spin_unlock_irq(&lock); \
729 DEFINE_WAIT(__wait); \ 695 cmd; \
730 \ 696 schedule(); \
731 for (;;) { \ 697 spin_lock_irq(&lock))
732 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
733 if (condition) \
734 break; \
735 if (signal_pending(current)) { \
736 ret = -ERESTARTSYS; \
737 break; \
738 } \
739 spin_unlock_irq(&lock); \
740 cmd; \
741 schedule(); \
742 spin_lock_irq(&lock); \
743 } \
744 finish_wait(&wq, &__wait); \
745} while (0)
746 698
747/** 699/**
748 * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true. 700 * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
@@ -772,10 +724,9 @@ do { \
772#define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \ 724#define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \
773({ \ 725({ \
774 int __ret = 0; \ 726 int __ret = 0; \
775 \
776 if (!(condition)) \ 727 if (!(condition)) \
777 __wait_event_interruptible_lock_irq(wq, condition, \ 728 __ret = __wait_event_interruptible_lock_irq(wq, \
778 lock, __ret, cmd); \ 729 condition, lock, cmd); \
779 __ret; \ 730 __ret; \
780}) 731})
781 732
@@ -804,39 +755,24 @@ do { \
804#define wait_event_interruptible_lock_irq(wq, condition, lock) \ 755#define wait_event_interruptible_lock_irq(wq, condition, lock) \
805({ \ 756({ \
806 int __ret = 0; \ 757 int __ret = 0; \
807 \
808 if (!(condition)) \ 758 if (!(condition)) \
809 __wait_event_interruptible_lock_irq(wq, condition, \ 759 __ret = __wait_event_interruptible_lock_irq(wq, \
810 lock, __ret, ); \ 760 condition, lock,); \
811 __ret; \ 761 __ret; \
812}) 762})
813 763
814#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \ 764#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
815 lock, ret) \ 765 lock, timeout) \
816do { \ 766 ___wait_event(wq, ___wait_cond_timeout(condition), \
817 DEFINE_WAIT(__wait); \ 767 TASK_INTERRUPTIBLE, 0, timeout, \
818 \ 768 spin_unlock_irq(&lock); \
819 for (;;) { \ 769 __ret = schedule_timeout(__ret); \
820 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \ 770 spin_lock_irq(&lock));
821 if (condition) \
822 break; \
823 if (signal_pending(current)) { \
824 ret = -ERESTARTSYS; \
825 break; \
826 } \
827 spin_unlock_irq(&lock); \
828 ret = schedule_timeout(ret); \
829 spin_lock_irq(&lock); \
830 if (!ret) \
831 break; \
832 } \
833 finish_wait(&wq, &__wait); \
834} while (0)
835 771
836/** 772/**
837 * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets true or a timeout elapses. 773 * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
838 * The condition is checked under the lock. This is expected 774 * true or a timeout elapses. The condition is checked under
839 * to be called with the lock taken. 775 * the lock. This is expected to be called with the lock taken.
840 * @wq: the waitqueue to wait on 776 * @wq: the waitqueue to wait on
841 * @condition: a C expression for the event to wait for 777 * @condition: a C expression for the event to wait for
842 * @lock: a locked spinlock_t, which will be released before schedule() 778 * @lock: a locked spinlock_t, which will be released before schedule()
@@ -860,11 +796,10 @@ do { \
860#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \ 796#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
861 timeout) \ 797 timeout) \
862({ \ 798({ \
863 int __ret = timeout; \ 799 long __ret = timeout; \
864 \ 800 if (!___wait_cond_timeout(condition)) \
865 if (!(condition)) \ 801 __ret = __wait_event_interruptible_lock_irq_timeout( \
866 __wait_event_interruptible_lock_irq_timeout( \ 802 wq, condition, lock, timeout); \
867 wq, condition, lock, __ret); \
868 __ret; \ 803 __ret; \
869}) 804})
870 805
@@ -875,20 +810,18 @@ do { \
875 * We plan to remove these interfaces. 810 * We plan to remove these interfaces.
876 */ 811 */
877extern void sleep_on(wait_queue_head_t *q); 812extern void sleep_on(wait_queue_head_t *q);
878extern long sleep_on_timeout(wait_queue_head_t *q, 813extern long sleep_on_timeout(wait_queue_head_t *q, signed long timeout);
879 signed long timeout);
880extern void interruptible_sleep_on(wait_queue_head_t *q); 814extern void interruptible_sleep_on(wait_queue_head_t *q);
881extern long interruptible_sleep_on_timeout(wait_queue_head_t *q, 815extern long interruptible_sleep_on_timeout(wait_queue_head_t *q, signed long timeout);
882 signed long timeout);
883 816
884/* 817/*
885 * Waitqueues which are removed from the waitqueue_head at wakeup time 818 * Waitqueues which are removed from the waitqueue_head at wakeup time
886 */ 819 */
887void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state); 820void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
888void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state); 821void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
822long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state);
889void finish_wait(wait_queue_head_t *q, wait_queue_t *wait); 823void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
890void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, 824void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, unsigned int mode, void *key);
891 unsigned int mode, void *key);
892int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); 825int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
893int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key); 826int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
894 827
@@ -934,8 +867,8 @@ int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
934 * One uses wait_on_bit() where one is waiting for the bit to clear, 867 * One uses wait_on_bit() where one is waiting for the bit to clear,
935 * but has no intention of setting it. 868 * but has no intention of setting it.
936 */ 869 */
937static inline int wait_on_bit(void *word, int bit, 870static inline int
938 int (*action)(void *), unsigned mode) 871wait_on_bit(void *word, int bit, int (*action)(void *), unsigned mode)
939{ 872{
940 if (!test_bit(bit, word)) 873 if (!test_bit(bit, word))
941 return 0; 874 return 0;
@@ -958,8 +891,8 @@ static inline int wait_on_bit(void *word, int bit,
958 * One uses wait_on_bit_lock() where one is waiting for the bit to 891 * One uses wait_on_bit_lock() where one is waiting for the bit to
959 * clear with the intention of setting it, and when done, clearing it. 892 * clear with the intention of setting it, and when done, clearing it.
960 */ 893 */
961static inline int wait_on_bit_lock(void *word, int bit, 894static inline int
962 int (*action)(void *), unsigned mode) 895wait_on_bit_lock(void *word, int bit, int (*action)(void *), unsigned mode)
963{ 896{
964 if (!test_and_set_bit(bit, word)) 897 if (!test_and_set_bit(bit, word))
965 return 0; 898 return 0;
@@ -983,5 +916,5 @@ int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
983 return 0; 916 return 0;
984 return out_of_line_wait_on_atomic_t(val, action, mode); 917 return out_of_line_wait_on_atomic_t(val, action, mode);
985} 918}
986 919
987#endif 920#endif /* _LINUX_WAIT_H */