diff options
author | Oleg Nesterov <oleg@redhat.com> | 2013-10-07 12:18:24 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-10-16 08:22:18 -0400 |
commit | c2d816443ef305aba8eaf0bf368f4d3d87494f06 (patch) | |
tree | 0331463c4ea621c1467e83894a9cebf3a91cb136 | |
parent | 8922915b38cd8b72f8e5af614b95be71d1d299d4 (diff) |
sched/wait: Introduce prepare_to_wait_event()
Add the new helper, prepare_to_wait_event() which should only be used
by ___wait_event().
prepare_to_wait_event() returns -ERESTARTSYS if signal_pending_state()
is true, otherwise it does prepare_to_wait/exclusive. This allows to
uninline the signal-pending checks in wait_event*() macros.
Also, it can initialize wait->private/func. We do not care if they were
already initialized, the values are the same. This also shaves a couple
of insns from the inlined code.
This obviously makes prepare_*() path a little bit slower, but we are
likely going to sleep anyway, so I think it makes sense to shrink .text:
text data bss dec hex filename
===================================================
before: 5126092 2959248 10117120 18202460 115bf5c vmlinux
after: 5124618 2955152 10117120 18196890 115a99a vmlinux
on my build.
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/20131007161824.GA29757@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | include/linux/wait.h | 24 | ||||
-rw-r--r-- | kernel/wait.c | 24 |
2 files changed, 38 insertions, 10 deletions
diff --git a/include/linux/wait.h b/include/linux/wait.h index 04c0260bda8f..ec099b03e11b 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h | |||
@@ -187,27 +187,30 @@ wait_queue_head_t *bit_waitqueue(void *, int); | |||
187 | __cond || !__ret; \ | 187 | __cond || !__ret; \ |
188 | }) | 188 | }) |
189 | 189 | ||
190 | #define ___wait_signal_pending(state) \ | 190 | #define ___wait_is_interruptible(state) \ |
191 | ((state == TASK_INTERRUPTIBLE && signal_pending(current)) || \ | 191 | (!__builtin_constant_p(state) || \ |
192 | (state == TASK_KILLABLE && fatal_signal_pending(current))) | 192 | state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \ |
193 | 193 | ||
194 | #define ___wait_event(wq, condition, state, exclusive, ret, cmd) \ | 194 | #define ___wait_event(wq, condition, state, exclusive, ret, cmd) \ |
195 | ({ \ | 195 | ({ \ |
196 | __label__ __out; \ | 196 | __label__ __out; \ |
197 | DEFINE_WAIT(__wait); \ | 197 | wait_queue_t __wait; \ |
198 | long __ret = ret; \ | 198 | long __ret = ret; \ |
199 | \ | 199 | \ |
200 | INIT_LIST_HEAD(&__wait.task_list); \ | ||
201 | if (exclusive) \ | ||
202 | __wait.flags = WQ_FLAG_EXCLUSIVE; \ | ||
203 | else \ | ||
204 | __wait.flags = 0; \ | ||
205 | \ | ||
200 | for (;;) { \ | 206 | for (;;) { \ |
201 | if (exclusive) \ | 207 | long __int = prepare_to_wait_event(&wq, &__wait, state);\ |
202 | prepare_to_wait_exclusive(&wq, &__wait, state); \ | ||
203 | else \ | ||
204 | prepare_to_wait(&wq, &__wait, state); \ | ||
205 | \ | 208 | \ |
206 | if (condition) \ | 209 | if (condition) \ |
207 | break; \ | 210 | break; \ |
208 | \ | 211 | \ |
209 | if (___wait_signal_pending(state)) { \ | 212 | if (___wait_is_interruptible(state) && __int) { \ |
210 | __ret = -ERESTARTSYS; \ | 213 | __ret = __int; \ |
211 | if (exclusive) { \ | 214 | if (exclusive) { \ |
212 | abort_exclusive_wait(&wq, &__wait, \ | 215 | abort_exclusive_wait(&wq, &__wait, \ |
213 | state, NULL); \ | 216 | state, NULL); \ |
@@ -791,6 +794,7 @@ extern long interruptible_sleep_on_timeout(wait_queue_head_t *q, signed long tim | |||
791 | */ | 794 | */ |
792 | void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state); | 795 | void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state); |
793 | void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state); | 796 | void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state); |
797 | long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state); | ||
794 | void finish_wait(wait_queue_head_t *q, wait_queue_t *wait); | 798 | void finish_wait(wait_queue_head_t *q, wait_queue_t *wait); |
795 | void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, unsigned int mode, void *key); | 799 | void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, unsigned int mode, void *key); |
796 | int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); | 800 | int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); |
diff --git a/kernel/wait.c b/kernel/wait.c index d550920e040c..de21c6305a44 100644 --- a/kernel/wait.c +++ b/kernel/wait.c | |||
@@ -92,6 +92,30 @@ prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state) | |||
92 | } | 92 | } |
93 | EXPORT_SYMBOL(prepare_to_wait_exclusive); | 93 | EXPORT_SYMBOL(prepare_to_wait_exclusive); |
94 | 94 | ||
95 | long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state) | ||
96 | { | ||
97 | unsigned long flags; | ||
98 | |||
99 | if (signal_pending_state(state, current)) | ||
100 | return -ERESTARTSYS; | ||
101 | |||
102 | wait->private = current; | ||
103 | wait->func = autoremove_wake_function; | ||
104 | |||
105 | spin_lock_irqsave(&q->lock, flags); | ||
106 | if (list_empty(&wait->task_list)) { | ||
107 | if (wait->flags & WQ_FLAG_EXCLUSIVE) | ||
108 | __add_wait_queue_tail(q, wait); | ||
109 | else | ||
110 | __add_wait_queue(q, wait); | ||
111 | } | ||
112 | set_current_state(state); | ||
113 | spin_unlock_irqrestore(&q->lock, flags); | ||
114 | |||
115 | return 0; | ||
116 | } | ||
117 | EXPORT_SYMBOL(prepare_to_wait_event); | ||
118 | |||
95 | /** | 119 | /** |
96 | * finish_wait - clean up after waiting in a queue | 120 | * finish_wait - clean up after waiting in a queue |
97 | * @q: waitqueue waited on | 121 | * @q: waitqueue waited on |