diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-03-07 18:33:14 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-03-08 13:36:03 -0500 |
commit | bd0f9b356d00aa241ced36fb075a07041c28d3b8 (patch) | |
tree | 3184549164dad820211b93695cfc5f6cf7fecee5 | |
parent | ec3b93ae0bf4742e9cbb40e1964129926c1464e0 (diff) |
sched/headers: fix up header file dependency on <linux/sched/signal.h>
The scheduler header file split and cleanups ended up exposing a few
nasty header file dependencies, and in particular it showed how we in
<linux/wait.h> ended up depending on "signal_pending()", which now comes
from <linux/sched/signal.h>.
That's a very subtle and annoying dependency, which already caused a
semantic merge conflict (see commit e58bc927835a "Pull overlayfs updates
from Miklos Szeredi", which added that fixup in the merge commit).
It turns out that we can avoid this dependency _and_ improve code
generation by moving the guts of the fairly nasty helper #define
__wait_event_interruptible_locked() to out-of-line code. The code that
includes the signal_pending() check is all in the slow-path where we
actually go to sleep waiting for the event anyway, so using a helper
function is the right thing to do.
Using a helper function is also what we already did for the non-locked
versions, see the "__wait_event*()" macros and the "prepare_to_wait*()"
set of helper functions.
We might want to try to unify all these macro games, we have a _lot_ of
subtly different wait-event loops. But this is the minimal patch to fix
the annoying header dependency.
Acked-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/wait.h | 31 | ||||
-rw-r--r-- | kernel/sched/wait.c | 39 |
2 files changed, 49 insertions, 21 deletions
diff --git a/include/linux/wait.h b/include/linux/wait.h index aacb1282d19a..db076ca7f11d 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h | |||
@@ -620,30 +620,19 @@ do { \ | |||
620 | __ret; \ | 620 | __ret; \ |
621 | }) | 621 | }) |
622 | 622 | ||
623 | extern int do_wait_intr(wait_queue_head_t *, wait_queue_t *); | ||
624 | extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_t *); | ||
623 | 625 | ||
624 | #define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \ | 626 | #define __wait_event_interruptible_locked(wq, condition, exclusive, fn) \ |
625 | ({ \ | 627 | ({ \ |
626 | int __ret = 0; \ | 628 | int __ret; \ |
627 | DEFINE_WAIT(__wait); \ | 629 | DEFINE_WAIT(__wait); \ |
628 | if (exclusive) \ | 630 | if (exclusive) \ |
629 | __wait.flags |= WQ_FLAG_EXCLUSIVE; \ | 631 | __wait.flags |= WQ_FLAG_EXCLUSIVE; \ |
630 | do { \ | 632 | do { \ |
631 | if (likely(list_empty(&__wait.task_list))) \ | 633 | __ret = fn(&(wq), &__wait); \ |
632 | __add_wait_queue_tail(&(wq), &__wait); \ | 634 | if (__ret) \ |
633 | set_current_state(TASK_INTERRUPTIBLE); \ | ||
634 | if (signal_pending(current)) { \ | ||
635 | __ret = -ERESTARTSYS; \ | ||
636 | break; \ | 635 | break; \ |
637 | } \ | ||
638 | if (irq) \ | ||
639 | spin_unlock_irq(&(wq).lock); \ | ||
640 | else \ | ||
641 | spin_unlock(&(wq).lock); \ | ||
642 | schedule(); \ | ||
643 | if (irq) \ | ||
644 | spin_lock_irq(&(wq).lock); \ | ||
645 | else \ | ||
646 | spin_lock(&(wq).lock); \ | ||
647 | } while (!(condition)); \ | 636 | } while (!(condition)); \ |
648 | __remove_wait_queue(&(wq), &__wait); \ | 637 | __remove_wait_queue(&(wq), &__wait); \ |
649 | __set_current_state(TASK_RUNNING); \ | 638 | __set_current_state(TASK_RUNNING); \ |
@@ -676,7 +665,7 @@ do { \ | |||
676 | */ | 665 | */ |
677 | #define wait_event_interruptible_locked(wq, condition) \ | 666 | #define wait_event_interruptible_locked(wq, condition) \ |
678 | ((condition) \ | 667 | ((condition) \ |
679 | ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0)) | 668 | ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr)) |
680 | 669 | ||
681 | /** | 670 | /** |
682 | * wait_event_interruptible_locked_irq - sleep until a condition gets true | 671 | * wait_event_interruptible_locked_irq - sleep until a condition gets true |
@@ -703,7 +692,7 @@ do { \ | |||
703 | */ | 692 | */ |
704 | #define wait_event_interruptible_locked_irq(wq, condition) \ | 693 | #define wait_event_interruptible_locked_irq(wq, condition) \ |
705 | ((condition) \ | 694 | ((condition) \ |
706 | ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1)) | 695 | ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr_irq)) |
707 | 696 | ||
708 | /** | 697 | /** |
709 | * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true | 698 | * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true |
@@ -734,7 +723,7 @@ do { \ | |||
734 | */ | 723 | */ |
735 | #define wait_event_interruptible_exclusive_locked(wq, condition) \ | 724 | #define wait_event_interruptible_exclusive_locked(wq, condition) \ |
736 | ((condition) \ | 725 | ((condition) \ |
737 | ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0)) | 726 | ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr)) |
738 | 727 | ||
739 | /** | 728 | /** |
740 | * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true | 729 | * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true |
@@ -765,7 +754,7 @@ do { \ | |||
765 | */ | 754 | */ |
766 | #define wait_event_interruptible_exclusive_locked_irq(wq, condition) \ | 755 | #define wait_event_interruptible_exclusive_locked_irq(wq, condition) \ |
767 | ((condition) \ | 756 | ((condition) \ |
768 | ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1)) | 757 | ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr_irq)) |
769 | 758 | ||
770 | 759 | ||
771 | #define __wait_event_killable(wq, condition) \ | 760 | #define __wait_event_killable(wq, condition) \ |
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c index 4d2ea6f25568..b8c84c6dee64 100644 --- a/kernel/sched/wait.c +++ b/kernel/sched/wait.c | |||
@@ -242,6 +242,45 @@ long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state) | |||
242 | } | 242 | } |
243 | EXPORT_SYMBOL(prepare_to_wait_event); | 243 | EXPORT_SYMBOL(prepare_to_wait_event); |
244 | 244 | ||
245 | /* | ||
246 | * Note! These two wait functions are entered with the | ||
247 | * wait-queue lock held (and interrupts off in the _irq | ||
248 | * case), so there is no race with testing the wakeup | ||
249 | * condition in the caller before they add the wait | ||
250 | * entry to the wake queue. | ||
251 | */ | ||
252 | int do_wait_intr(wait_queue_head_t *wq, wait_queue_t *wait) | ||
253 | { | ||
254 | if (likely(list_empty(&wait->task_list))) | ||
255 | __add_wait_queue_tail(wq, wait); | ||
256 | |||
257 | set_current_state(TASK_INTERRUPTIBLE); | ||
258 | if (signal_pending(current)) | ||
259 | return -ERESTARTSYS; | ||
260 | |||
261 | spin_unlock(&wq->lock); | ||
262 | schedule(); | ||
263 | spin_lock(&wq->lock); | ||
264 | return 0; | ||
265 | } | ||
266 | EXPORT_SYMBOL(do_wait_intr); | ||
267 | |||
268 | int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_t *wait) | ||
269 | { | ||
270 | if (likely(list_empty(&wait->task_list))) | ||
271 | __add_wait_queue_tail(wq, wait); | ||
272 | |||
273 | set_current_state(TASK_INTERRUPTIBLE); | ||
274 | if (signal_pending(current)) | ||
275 | return -ERESTARTSYS; | ||
276 | |||
277 | spin_unlock_irq(&wq->lock); | ||
278 | schedule(); | ||
279 | spin_lock_irq(&wq->lock); | ||
280 | return 0; | ||
281 | } | ||
282 | EXPORT_SYMBOL(do_wait_intr_irq); | ||
283 | |||
245 | /** | 284 | /** |
246 | * finish_wait - clean up after waiting in a queue | 285 | * finish_wait - clean up after waiting in a queue |
247 | * @q: waitqueue waited on | 286 | * @q: waitqueue waited on |