diff options
| author | Peter Zijlstra <peterz@infradead.org> | 2018-03-15 06:40:33 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2018-03-20 03:23:17 -0400 |
| commit | 6b2bb7265f0b62605e8caee3613449ed0db270b9 (patch) | |
| tree | 62780e9b912f05daccd56f1bab31b0c280bb4043 /include/linux/wait_bit.h | |
| parent | fc4c5a3828bdba157f8ea406e1f4ceb75c13039c (diff) | |
sched/wait: Introduce wait_var_event()
As a replacement for the wait_on_atomic_t() API provide the
wait_var_event() API.
The wait_var_event() API is based on the very same hashed-waitqueue
idea, but doesn't care about the type (atomic_t) or the specific
condition (atomic_read() == 0). IOW. it's much more widely
applicable/flexible.
It shares all the benefits/disadvantages of a hashed-waitqueue
approach with the existing wait_on_atomic_t/wait_on_bit() APIs.
The API is modeled after the existing wait_event() API, but instead of
taking a wait_queue_head, it takes an address. This addresses is
hashed to obtain a wait_queue_head from the bit_wait_table.
Similar to the wait_event() API, it takes a condition expression as
second argument and will wait until this expression becomes true.
The following are (mostly) identical replacements:
wait_on_atomic_t(&my_atomic, atomic_t_wait, TASK_UNINTERRUPTIBLE);
wake_up_atomic_t(&my_atomic);
wait_var_event(&my_atomic, !atomic_read(&my_atomic));
wake_up_var(&my_atomic);
The only difference is that wake_up_var() is an unconditional wakeup
and doesn't check the previously hard-coded (atomic_read() == 0)
condition here. This is of little concequence, since most callers are
already conditional on atomic_dec_and_test() and the ones that are
not, are trivial to make so.
Tested-by: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: David Howells <dhowells@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/linux/wait_bit.h')
| -rw-r--r-- | include/linux/wait_bit.h | 70 |
1 files changed, 70 insertions, 0 deletions
diff --git a/include/linux/wait_bit.h b/include/linux/wait_bit.h index 61b39eaf7cad..3fcdb75d69cf 100644 --- a/include/linux/wait_bit.h +++ b/include/linux/wait_bit.h | |||
| @@ -262,4 +262,74 @@ int wait_on_atomic_t(atomic_t *val, wait_atomic_t_action_f action, unsigned mode | |||
| 262 | return out_of_line_wait_on_atomic_t(val, action, mode); | 262 | return out_of_line_wait_on_atomic_t(val, action, mode); |
| 263 | } | 263 | } |
| 264 | 264 | ||
| 265 | extern void init_wait_var_entry(struct wait_bit_queue_entry *wbq_entry, void *var, int flags); | ||
| 266 | extern void wake_up_var(void *var); | ||
| 267 | extern wait_queue_head_t *__var_waitqueue(void *p); | ||
| 268 | |||
| 269 | #define ___wait_var_event(var, condition, state, exclusive, ret, cmd) \ | ||
| 270 | ({ \ | ||
| 271 | __label__ __out; \ | ||
| 272 | struct wait_queue_head *__wq_head = __var_waitqueue(var); \ | ||
| 273 | struct wait_bit_queue_entry __wbq_entry; \ | ||
| 274 | long __ret = ret; /* explicit shadow */ \ | ||
| 275 | \ | ||
| 276 | init_wait_var_entry(&__wbq_entry, var, \ | ||
| 277 | exclusive ? WQ_FLAG_EXCLUSIVE : 0); \ | ||
| 278 | for (;;) { \ | ||
| 279 | long __int = prepare_to_wait_event(__wq_head, \ | ||
| 280 | &__wbq_entry.wq_entry, \ | ||
| 281 | state); \ | ||
| 282 | if (condition) \ | ||
| 283 | break; \ | ||
| 284 | \ | ||
| 285 | if (___wait_is_interruptible(state) && __int) { \ | ||
| 286 | __ret = __int; \ | ||
| 287 | goto __out; \ | ||
| 288 | } \ | ||
| 289 | \ | ||
| 290 | cmd; \ | ||
| 291 | } \ | ||
| 292 | finish_wait(__wq_head, &__wbq_entry.wq_entry); \ | ||
| 293 | __out: __ret; \ | ||
| 294 | }) | ||
| 295 | |||
| 296 | #define __wait_var_event(var, condition) \ | ||
| 297 | ___wait_var_event(var, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ | ||
| 298 | schedule()) | ||
| 299 | |||
| 300 | #define wait_var_event(var, condition) \ | ||
| 301 | do { \ | ||
| 302 | might_sleep(); \ | ||
| 303 | if (condition) \ | ||
| 304 | break; \ | ||
| 305 | __wait_var_event(var, condition); \ | ||
| 306 | } while (0) | ||
| 307 | |||
| 308 | #define __wait_var_event_killable(var, condition) \ | ||
| 309 | ___wait_var_event(var, condition, TASK_KILLABLE, 0, 0, \ | ||
| 310 | schedule()) | ||
| 311 | |||
| 312 | #define wait_var_event_killable(var, condition) \ | ||
| 313 | ({ \ | ||
| 314 | int __ret = 0; \ | ||
| 315 | might_sleep(); \ | ||
| 316 | if (!(condition)) \ | ||
| 317 | __ret = __wait_var_event_killable(var, condition); \ | ||
| 318 | __ret; \ | ||
| 319 | }) | ||
| 320 | |||
| 321 | #define __wait_var_event_timeout(var, condition, timeout) \ | ||
| 322 | ___wait_var_event(var, ___wait_cond_timeout(condition), \ | ||
| 323 | TASK_UNINTERRUPTIBLE, 0, timeout, \ | ||
| 324 | __ret = schedule_timeout(__ret)) | ||
| 325 | |||
| 326 | #define wait_var_event_timeout(var, condition, timeout) \ | ||
| 327 | ({ \ | ||
| 328 | long __ret = timeout; \ | ||
| 329 | might_sleep(); \ | ||
| 330 | if (!___wait_cond_timeout(condition)) \ | ||
| 331 | __ret = __wait_var_event_timeout(var, condition, timeout); \ | ||
| 332 | __ret; \ | ||
| 333 | }) | ||
| 334 | |||
| 265 | #endif /* _LINUX_WAIT_BIT_H */ | 335 | #endif /* _LINUX_WAIT_BIT_H */ |
