diff options
Diffstat (limited to 'kernel/wait.c')
| -rw-r--r-- | kernel/wait.c | 88 |
1 files changed, 88 insertions, 0 deletions
diff --git a/kernel/wait.c b/kernel/wait.c index 6698e0c04ead..d550920e040c 100644 --- a/kernel/wait.c +++ b/kernel/wait.c | |||
| @@ -287,3 +287,91 @@ wait_queue_head_t *bit_waitqueue(void *word, int bit) | |||
| 287 | return &zone->wait_table[hash_long(val, zone->wait_table_bits)]; | 287 | return &zone->wait_table[hash_long(val, zone->wait_table_bits)]; |
| 288 | } | 288 | } |
| 289 | EXPORT_SYMBOL(bit_waitqueue); | 289 | EXPORT_SYMBOL(bit_waitqueue); |
| 290 | |||
| 291 | /* | ||
| 292 | * Manipulate the atomic_t address to produce a better bit waitqueue table hash | ||
| 293 | * index (we're keying off bit -1, but that would produce a horrible hash | ||
| 294 | * value). | ||
| 295 | */ | ||
| 296 | static inline wait_queue_head_t *atomic_t_waitqueue(atomic_t *p) | ||
| 297 | { | ||
| 298 | if (BITS_PER_LONG == 64) { | ||
| 299 | unsigned long q = (unsigned long)p; | ||
| 300 | return bit_waitqueue((void *)(q & ~1), q & 1); | ||
| 301 | } | ||
| 302 | return bit_waitqueue(p, 0); | ||
| 303 | } | ||
| 304 | |||
| 305 | static int wake_atomic_t_function(wait_queue_t *wait, unsigned mode, int sync, | ||
| 306 | void *arg) | ||
| 307 | { | ||
| 308 | struct wait_bit_key *key = arg; | ||
| 309 | struct wait_bit_queue *wait_bit | ||
| 310 | = container_of(wait, struct wait_bit_queue, wait); | ||
| 311 | atomic_t *val = key->flags; | ||
| 312 | |||
| 313 | if (wait_bit->key.flags != key->flags || | ||
| 314 | wait_bit->key.bit_nr != key->bit_nr || | ||
| 315 | atomic_read(val) != 0) | ||
| 316 | return 0; | ||
| 317 | return autoremove_wake_function(wait, mode, sync, key); | ||
| 318 | } | ||
| 319 | |||
| 320 | /* | ||
| 321 | * To allow interruptible waiting and asynchronous (i.e. nonblocking) waiting, | ||
| 322 | * the actions of __wait_on_atomic_t() are permitted return codes. Nonzero | ||
| 323 | * return codes halt waiting and return. | ||
| 324 | */ | ||
| 325 | static __sched | ||
| 326 | int __wait_on_atomic_t(wait_queue_head_t *wq, struct wait_bit_queue *q, | ||
| 327 | int (*action)(atomic_t *), unsigned mode) | ||
| 328 | { | ||
| 329 | atomic_t *val; | ||
| 330 | int ret = 0; | ||
| 331 | |||
| 332 | do { | ||
| 333 | prepare_to_wait(wq, &q->wait, mode); | ||
| 334 | val = q->key.flags; | ||
| 335 | if (atomic_read(val) == 0) | ||
| 336 | break; | ||
| 337 | ret = (*action)(val); | ||
| 338 | } while (!ret && atomic_read(val) != 0); | ||
| 339 | finish_wait(wq, &q->wait); | ||
| 340 | return ret; | ||
| 341 | } | ||
| 342 | |||
| 343 | #define DEFINE_WAIT_ATOMIC_T(name, p) \ | ||
| 344 | struct wait_bit_queue name = { \ | ||
| 345 | .key = __WAIT_ATOMIC_T_KEY_INITIALIZER(p), \ | ||
| 346 | .wait = { \ | ||
| 347 | .private = current, \ | ||
| 348 | .func = wake_atomic_t_function, \ | ||
| 349 | .task_list = \ | ||
| 350 | LIST_HEAD_INIT((name).wait.task_list), \ | ||
| 351 | }, \ | ||
| 352 | } | ||
| 353 | |||
| 354 | __sched int out_of_line_wait_on_atomic_t(atomic_t *p, int (*action)(atomic_t *), | ||
| 355 | unsigned mode) | ||
| 356 | { | ||
| 357 | wait_queue_head_t *wq = atomic_t_waitqueue(p); | ||
| 358 | DEFINE_WAIT_ATOMIC_T(wait, p); | ||
| 359 | |||
| 360 | return __wait_on_atomic_t(wq, &wait, action, mode); | ||
| 361 | } | ||
| 362 | EXPORT_SYMBOL(out_of_line_wait_on_atomic_t); | ||
| 363 | |||
| 364 | /** | ||
| 365 | * wake_up_atomic_t - Wake up a waiter on a atomic_t | ||
| 366 | * @p: The atomic_t being waited on, a kernel virtual address | ||
| 367 | * | ||
| 368 | * Wake up anyone waiting for the atomic_t to go to zero. | ||
| 369 | * | ||
| 370 | * Abuse the bit-waker function and its waitqueue hash table set (the atomic_t | ||
| 371 | * check is done by the waiter's wake function, not the by the waker itself). | ||
| 372 | */ | ||
| 373 | void wake_up_atomic_t(atomic_t *p) | ||
| 374 | { | ||
| 375 | __wake_up_bit(atomic_t_waitqueue(p), p, WAIT_ATOMIC_T_BIT_NR); | ||
| 376 | } | ||
| 377 | EXPORT_SYMBOL(wake_up_atomic_t); | ||
