aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2013-10-02 05:22:33 -0400
committerIngo Molnar <mingo@kernel.org>2013-10-04 04:16:25 -0400
commit35a2af94c7ce7130ca292c68b1d27fcfdb648f6b (patch)
tree10b28670fb387984751de104913db9408ade8741
parentebdc195f2ec68576876216081035293e37318e86 (diff)
sched/wait: Make the __wait_event*() interface more friendly
Change all __wait_event*() implementations to match the corresponding wait_event*() signature for convenience. In particular this does away with the weird 'ret' logic. Since there are __wait_event*() users this requires we update them too. Reviewed-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20131002092529.042563462@infradead.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/mips/kernel/rtlx.c19
-rw-r--r--include/linux/tty.h10
-rw-r--r--include/linux/wait.h113
-rw-r--r--net/irda/af_irda.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c7
5 files changed, 73 insertions, 81 deletions
diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c
index d763f11e35e2..2c12ea1668d1 100644
--- a/arch/mips/kernel/rtlx.c
+++ b/arch/mips/kernel/rtlx.c
@@ -172,8 +172,9 @@ int rtlx_open(int index, int can_sleep)
172 if (rtlx == NULL) { 172 if (rtlx == NULL) {
173 if( (p = vpe_get_shared(tclimit)) == NULL) { 173 if( (p = vpe_get_shared(tclimit)) == NULL) {
174 if (can_sleep) { 174 if (can_sleep) {
175 __wait_event_interruptible(channel_wqs[index].lx_queue, 175 ret = __wait_event_interruptible(
176 (p = vpe_get_shared(tclimit)), ret); 176 channel_wqs[index].lx_queue,
177 (p = vpe_get_shared(tclimit)));
177 if (ret) 178 if (ret)
178 goto out_fail; 179 goto out_fail;
179 } else { 180 } else {
@@ -263,11 +264,10 @@ unsigned int rtlx_read_poll(int index, int can_sleep)
263 /* data available to read? */ 264 /* data available to read? */
264 if (chan->lx_read == chan->lx_write) { 265 if (chan->lx_read == chan->lx_write) {
265 if (can_sleep) { 266 if (can_sleep) {
266 int ret = 0; 267 int ret = __wait_event_interruptible(
267 268 channel_wqs[index].lx_queue,
268 __wait_event_interruptible(channel_wqs[index].lx_queue,
269 (chan->lx_read != chan->lx_write) || 269 (chan->lx_read != chan->lx_write) ||
270 sp_stopping, ret); 270 sp_stopping);
271 if (ret) 271 if (ret)
272 return ret; 272 return ret;
273 273
@@ -440,14 +440,13 @@ static ssize_t file_write(struct file *file, const char __user * buffer,
440 440
441 /* any space left... */ 441 /* any space left... */
442 if (!rtlx_write_poll(minor)) { 442 if (!rtlx_write_poll(minor)) {
443 int ret = 0; 443 int ret;
444 444
445 if (file->f_flags & O_NONBLOCK) 445 if (file->f_flags & O_NONBLOCK)
446 return -EAGAIN; 446 return -EAGAIN;
447 447
448 __wait_event_interruptible(channel_wqs[minor].rt_queue, 448 ret = __wait_event_interruptible(channel_wqs[minor].rt_queue,
449 rtlx_write_poll(minor), 449 rtlx_write_poll(minor));
450 ret);
451 if (ret) 450 if (ret)
452 return ret; 451 return ret;
453 } 452 }
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 6e803291028f..633cac77f9f9 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -672,14 +672,14 @@ static inline void tty_wait_until_sent_from_close(struct tty_struct *tty,
672#define wait_event_interruptible_tty(tty, wq, condition) \ 672#define wait_event_interruptible_tty(tty, wq, condition) \
673({ \ 673({ \
674 int __ret = 0; \ 674 int __ret = 0; \
675 if (!(condition)) { \ 675 if (!(condition)) \
676 __wait_event_interruptible_tty(tty, wq, condition, __ret); \ 676 __ret = __wait_event_interruptible_tty(tty, wq, \
677 } \ 677 condition); \
678 __ret; \ 678 __ret; \
679}) 679})
680 680
681#define __wait_event_interruptible_tty(tty, wq, condition, ret) \ 681#define __wait_event_interruptible_tty(tty, wq, condition) \
682 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, ret, \ 682 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
683 tty_unlock(tty); \ 683 tty_unlock(tty); \
684 schedule(); \ 684 schedule(); \
685 tty_lock(tty)) 685 tty_lock(tty))
diff --git a/include/linux/wait.h b/include/linux/wait.h
index c065e8af9749..bd4bd7b479b6 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -179,24 +179,23 @@ wait_queue_head_t *bit_waitqueue(void *, int);
179#define wake_up_interruptible_sync_poll(x, m) \ 179#define wake_up_interruptible_sync_poll(x, m) \
180 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m)) 180 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
181 181
182#define ___wait_cond_timeout(condition, ret) \ 182#define ___wait_cond_timeout(condition) \
183({ \ 183({ \
184 bool __cond = (condition); \ 184 bool __cond = (condition); \
185 if (__cond && !ret) \ 185 if (__cond && !__ret) \
186 ret = 1; \ 186 __ret = 1; \
187 __cond || !ret; \ 187 __cond || !__ret; \
188}) 188})
189 189
190#define ___wait_signal_pending(state) \ 190#define ___wait_signal_pending(state) \
191 ((state == TASK_INTERRUPTIBLE && signal_pending(current)) || \ 191 ((state == TASK_INTERRUPTIBLE && signal_pending(current)) || \
192 (state == TASK_KILLABLE && fatal_signal_pending(current))) 192 (state == TASK_KILLABLE && fatal_signal_pending(current)))
193 193
194#define ___wait_nop_ret int ret __always_unused
195
196#define ___wait_event(wq, condition, state, exclusive, ret, cmd) \ 194#define ___wait_event(wq, condition, state, exclusive, ret, cmd) \
197do { \ 195({ \
198 __label__ __out; \ 196 __label__ __out; \
199 DEFINE_WAIT(__wait); \ 197 DEFINE_WAIT(__wait); \
198 long __ret = ret; \
200 \ 199 \
201 for (;;) { \ 200 for (;;) { \
202 if (exclusive) \ 201 if (exclusive) \
@@ -208,7 +207,7 @@ do { \
208 break; \ 207 break; \
209 \ 208 \
210 if (___wait_signal_pending(state)) { \ 209 if (___wait_signal_pending(state)) { \
211 ret = -ERESTARTSYS; \ 210 __ret = -ERESTARTSYS; \
212 if (exclusive) { \ 211 if (exclusive) { \
213 abort_exclusive_wait(&wq, &__wait, \ 212 abort_exclusive_wait(&wq, &__wait, \
214 state, NULL); \ 213 state, NULL); \
@@ -220,12 +219,12 @@ do { \
220 cmd; \ 219 cmd; \
221 } \ 220 } \
222 finish_wait(&wq, &__wait); \ 221 finish_wait(&wq, &__wait); \
223__out: ; \ 222__out: __ret; \
224} while (0) 223})
225 224
226#define __wait_event(wq, condition) \ 225#define __wait_event(wq, condition) \
227 ___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \ 226 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
228 ___wait_nop_ret, schedule()) 227 schedule())
229 228
230/** 229/**
231 * wait_event - sleep until a condition gets true 230 * wait_event - sleep until a condition gets true
@@ -246,10 +245,10 @@ do { \
246 __wait_event(wq, condition); \ 245 __wait_event(wq, condition); \
247} while (0) 246} while (0)
248 247
249#define __wait_event_timeout(wq, condition, ret) \ 248#define __wait_event_timeout(wq, condition, timeout) \
250 ___wait_event(wq, ___wait_cond_timeout(condition, ret), \ 249 ___wait_event(wq, ___wait_cond_timeout(condition), \
251 TASK_UNINTERRUPTIBLE, 0, ret, \ 250 TASK_UNINTERRUPTIBLE, 0, timeout, \
252 ret = schedule_timeout(ret)) 251 __ret = schedule_timeout(__ret))
253 252
254/** 253/**
255 * wait_event_timeout - sleep until a condition gets true or a timeout elapses 254 * wait_event_timeout - sleep until a condition gets true or a timeout elapses
@@ -272,12 +271,12 @@ do { \
272({ \ 271({ \
273 long __ret = timeout; \ 272 long __ret = timeout; \
274 if (!(condition)) \ 273 if (!(condition)) \
275 __wait_event_timeout(wq, condition, __ret); \ 274 __ret = __wait_event_timeout(wq, condition, timeout); \
276 __ret; \ 275 __ret; \
277}) 276})
278 277
279#define __wait_event_interruptible(wq, condition, ret) \ 278#define __wait_event_interruptible(wq, condition) \
280 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, ret, \ 279 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
281 schedule()) 280 schedule())
282 281
283/** 282/**
@@ -299,14 +298,14 @@ do { \
299({ \ 298({ \
300 int __ret = 0; \ 299 int __ret = 0; \
301 if (!(condition)) \ 300 if (!(condition)) \
302 __wait_event_interruptible(wq, condition, __ret); \ 301 __ret = __wait_event_interruptible(wq, condition); \
303 __ret; \ 302 __ret; \
304}) 303})
305 304
306#define __wait_event_interruptible_timeout(wq, condition, ret) \ 305#define __wait_event_interruptible_timeout(wq, condition, timeout) \
307 ___wait_event(wq, ___wait_cond_timeout(condition, ret), \ 306 ___wait_event(wq, ___wait_cond_timeout(condition), \
308 TASK_INTERRUPTIBLE, 0, ret, \ 307 TASK_INTERRUPTIBLE, 0, timeout, \
309 ret = schedule_timeout(ret)) 308 __ret = schedule_timeout(__ret))
310 309
311/** 310/**
312 * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses 311 * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
@@ -330,7 +329,8 @@ do { \
330({ \ 329({ \
331 long __ret = timeout; \ 330 long __ret = timeout; \
332 if (!(condition)) \ 331 if (!(condition)) \
333 __wait_event_interruptible_timeout(wq, condition, __ret); \ 332 __ret = __wait_event_interruptible_timeout(wq, \
333 condition, timeout); \
334 __ret; \ 334 __ret; \
335}) 335})
336 336
@@ -347,7 +347,7 @@ do { \
347 current->timer_slack_ns, \ 347 current->timer_slack_ns, \
348 HRTIMER_MODE_REL); \ 348 HRTIMER_MODE_REL); \
349 \ 349 \
350 ___wait_event(wq, condition, state, 0, __ret, \ 350 __ret = ___wait_event(wq, condition, state, 0, 0, \
351 if (!__t.task) { \ 351 if (!__t.task) { \
352 __ret = -ETIME; \ 352 __ret = -ETIME; \
353 break; \ 353 break; \
@@ -409,15 +409,15 @@ do { \
409 __ret; \ 409 __ret; \
410}) 410})
411 411
412#define __wait_event_interruptible_exclusive(wq, condition, ret) \ 412#define __wait_event_interruptible_exclusive(wq, condition) \
413 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, ret, \ 413 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
414 schedule()) 414 schedule())
415 415
416#define wait_event_interruptible_exclusive(wq, condition) \ 416#define wait_event_interruptible_exclusive(wq, condition) \
417({ \ 417({ \
418 int __ret = 0; \ 418 int __ret = 0; \
419 if (!(condition)) \ 419 if (!(condition)) \
420 __wait_event_interruptible_exclusive(wq, condition, __ret);\ 420 __ret = __wait_event_interruptible_exclusive(wq, condition);\
421 __ret; \ 421 __ret; \
422}) 422})
423 423
@@ -570,8 +570,8 @@ do { \
570 570
571 571
572 572
573#define __wait_event_killable(wq, condition, ret) \ 573#define __wait_event_killable(wq, condition) \
574 ___wait_event(wq, condition, TASK_KILLABLE, 0, ret, schedule()) 574 ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
575 575
576/** 576/**
577 * wait_event_killable - sleep until a condition gets true 577 * wait_event_killable - sleep until a condition gets true
@@ -592,18 +592,17 @@ do { \
592({ \ 592({ \
593 int __ret = 0; \ 593 int __ret = 0; \
594 if (!(condition)) \ 594 if (!(condition)) \
595 __wait_event_killable(wq, condition, __ret); \ 595 __ret = __wait_event_killable(wq, condition); \
596 __ret; \ 596 __ret; \
597}) 597})
598 598
599 599
600#define __wait_event_lock_irq(wq, condition, lock, cmd) \ 600#define __wait_event_lock_irq(wq, condition, lock, cmd) \
601 ___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \ 601 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
602 ___wait_nop_ret, \ 602 spin_unlock_irq(&lock); \
603 spin_unlock_irq(&lock); \ 603 cmd; \
604 cmd; \ 604 schedule(); \
605 schedule(); \ 605 spin_lock_irq(&lock))
606 spin_lock_irq(&lock))
607 606
608/** 607/**
609 * wait_event_lock_irq_cmd - sleep until a condition gets true. The 608 * wait_event_lock_irq_cmd - sleep until a condition gets true. The
@@ -663,11 +662,11 @@ do { \
663} while (0) 662} while (0)
664 663
665 664
666#define __wait_event_interruptible_lock_irq(wq, condition, lock, ret, cmd) \ 665#define __wait_event_interruptible_lock_irq(wq, condition, lock, cmd) \
667 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, ret, \ 666 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
668 spin_unlock_irq(&lock); \ 667 spin_unlock_irq(&lock); \
669 cmd; \ 668 cmd; \
670 schedule(); \ 669 schedule(); \
671 spin_lock_irq(&lock)) 670 spin_lock_irq(&lock))
672 671
673/** 672/**
@@ -698,10 +697,9 @@ do { \
698#define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \ 697#define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \
699({ \ 698({ \
700 int __ret = 0; \ 699 int __ret = 0; \
701 \
702 if (!(condition)) \ 700 if (!(condition)) \
703 __wait_event_interruptible_lock_irq(wq, condition, \ 701 __ret = __wait_event_interruptible_lock_irq(wq, \
704 lock, __ret, cmd); \ 702 condition, lock, cmd); \
705 __ret; \ 703 __ret; \
706}) 704})
707 705
@@ -730,18 +728,18 @@ do { \
730#define wait_event_interruptible_lock_irq(wq, condition, lock) \ 728#define wait_event_interruptible_lock_irq(wq, condition, lock) \
731({ \ 729({ \
732 int __ret = 0; \ 730 int __ret = 0; \
733 \
734 if (!(condition)) \ 731 if (!(condition)) \
735 __wait_event_interruptible_lock_irq(wq, condition, \ 732 __ret = __wait_event_interruptible_lock_irq(wq, \
736 lock, __ret, ); \ 733 condition, lock,) \
737 __ret; \ 734 __ret; \
738}) 735})
739 736
740#define __wait_event_interruptible_lock_irq_timeout(wq, condition, lock, ret) \ 737#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
741 ___wait_event(wq, ___wait_cond_timeout(condition, ret), \ 738 lock, timeout) \
742 TASK_INTERRUPTIBLE, 0, ret, \ 739 ___wait_event(wq, ___wait_cond_timeout(condition), \
743 spin_unlock_irq(&lock); \ 740 TASK_INTERRUPTIBLE, 0, ret, \
744 ret = schedule_timeout(ret); \ 741 spin_unlock_irq(&lock); \
742 __ret = schedule_timeout(__ret); \
745 spin_lock_irq(&lock)); 743 spin_lock_irq(&lock));
746 744
747/** 745/**
@@ -771,11 +769,10 @@ do { \
771#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \ 769#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
772 timeout) \ 770 timeout) \
773({ \ 771({ \
774 int __ret = timeout; \ 772 long __ret = timeout; \
775 \
776 if (!(condition)) \ 773 if (!(condition)) \
777 __wait_event_interruptible_lock_irq_timeout( \ 774 __ret = __wait_event_interruptible_lock_irq_timeout( \
778 wq, condition, lock, __ret); \ 775 wq, condition, lock, timeout); \
779 __ret; \ 776 __ret; \
780}) 777})
781 778
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 0578d4fa00a9..0f676908d15b 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -2563,9 +2563,8 @@ bed:
2563 jiffies + msecs_to_jiffies(val)); 2563 jiffies + msecs_to_jiffies(val));
2564 2564
2565 /* Wait for IR-LMP to call us back */ 2565 /* Wait for IR-LMP to call us back */
2566 __wait_event_interruptible(self->query_wait, 2566 err = __wait_event_interruptible(self->query_wait,
2567 (self->cachedaddr != 0 || self->errno == -ETIME), 2567 (self->cachedaddr != 0 || self->errno == -ETIME));
2568 err);
2569 2568
2570 /* If watchdog is still activated, kill it! */ 2569 /* If watchdog is still activated, kill it! */
2571 del_timer(&(self->watchdog)); 2570 del_timer(&(self->watchdog));
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index f4484719f3e6..f63c2388f38d 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -1637,12 +1637,9 @@ static int sync_thread_master(void *data)
1637 continue; 1637 continue;
1638 } 1638 }
1639 while (ip_vs_send_sync_msg(tinfo->sock, sb->mesg) < 0) { 1639 while (ip_vs_send_sync_msg(tinfo->sock, sb->mesg) < 0) {
1640 int ret = 0; 1640 int ret = __wait_event_interruptible(*sk_sleep(sk),
1641
1642 __wait_event_interruptible(*sk_sleep(sk),
1643 sock_writeable(sk) || 1641 sock_writeable(sk) ||
1644 kthread_should_stop(), 1642 kthread_should_stop());
1645 ret);
1646 if (unlikely(kthread_should_stop())) 1643 if (unlikely(kthread_should_stop()))
1647 goto done; 1644 goto done;
1648 } 1645 }