aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/wait.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/wait.c')
-rw-r--r--kernel/sched/wait.c257
1 files changed, 0 insertions, 257 deletions
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
index 95e6d3820cba..6bcd7c3c4501 100644
--- a/kernel/sched/wait.c
+++ b/kernel/sched/wait.c
@@ -390,260 +390,3 @@ int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sy
390 return default_wake_function(wq_entry, mode, sync, key); 390 return default_wake_function(wq_entry, mode, sync, key);
391} 391}
392EXPORT_SYMBOL(woken_wake_function); 392EXPORT_SYMBOL(woken_wake_function);
393
394int wake_bit_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *arg)
395{
396 struct wait_bit_key *key = arg;
397 struct wait_bit_queue_entry *wait_bit = container_of(wq_entry, struct wait_bit_queue_entry, wq_entry);
398
399 if (wait_bit->key.flags != key->flags ||
400 wait_bit->key.bit_nr != key->bit_nr ||
401 test_bit(key->bit_nr, key->flags))
402 return 0;
403 else
404 return autoremove_wake_function(wq_entry, mode, sync, key);
405}
406EXPORT_SYMBOL(wake_bit_function);
407
408/*
409 * To allow interruptible waiting and asynchronous (i.e. nonblocking)
410 * waiting, the actions of __wait_on_bit() and __wait_on_bit_lock() are
411 * permitted return codes. Nonzero return codes halt waiting and return.
412 */
413int __sched
414__wait_on_bit(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry,
415 wait_bit_action_f *action, unsigned mode)
416{
417 int ret = 0;
418
419 do {
420 prepare_to_wait(wq_head, &wbq_entry->wq_entry, mode);
421 if (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags))
422 ret = (*action)(&wbq_entry->key, mode);
423 } while (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags) && !ret);
424 finish_wait(wq_head, &wbq_entry->wq_entry);
425 return ret;
426}
427EXPORT_SYMBOL(__wait_on_bit);
428
429int __sched out_of_line_wait_on_bit(void *word, int bit,
430 wait_bit_action_f *action, unsigned mode)
431{
432 struct wait_queue_head *wq_head = bit_waitqueue(word, bit);
433 DEFINE_WAIT_BIT(wq_entry, word, bit);
434
435 return __wait_on_bit(wq_head, &wq_entry, action, mode);
436}
437EXPORT_SYMBOL(out_of_line_wait_on_bit);
438
439int __sched out_of_line_wait_on_bit_timeout(
440 void *word, int bit, wait_bit_action_f *action,
441 unsigned mode, unsigned long timeout)
442{
443 struct wait_queue_head *wq_head = bit_waitqueue(word, bit);
444 DEFINE_WAIT_BIT(wq_entry, word, bit);
445
446 wq_entry.key.timeout = jiffies + timeout;
447 return __wait_on_bit(wq_head, &wq_entry, action, mode);
448}
449EXPORT_SYMBOL_GPL(out_of_line_wait_on_bit_timeout);
450
451int __sched
452__wait_on_bit_lock(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry,
453 wait_bit_action_f *action, unsigned mode)
454{
455 int ret = 0;
456
457 for (;;) {
458 prepare_to_wait_exclusive(wq_head, &wbq_entry->wq_entry, mode);
459 if (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags)) {
460 ret = action(&wbq_entry->key, mode);
461 /*
462 * See the comment in prepare_to_wait_event().
463 * finish_wait() does not necessarily takes wwq_head->lock,
464 * but test_and_set_bit() implies mb() which pairs with
465 * smp_mb__after_atomic() before wake_up_page().
466 */
467 if (ret)
468 finish_wait(wq_head, &wbq_entry->wq_entry);
469 }
470 if (!test_and_set_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags)) {
471 if (!ret)
472 finish_wait(wq_head, &wbq_entry->wq_entry);
473 return 0;
474 } else if (ret) {
475 return ret;
476 }
477 }
478}
479EXPORT_SYMBOL(__wait_on_bit_lock);
480
481int __sched out_of_line_wait_on_bit_lock(void *word, int bit,
482 wait_bit_action_f *action, unsigned mode)
483{
484 struct wait_queue_head *wq_head = bit_waitqueue(word, bit);
485 DEFINE_WAIT_BIT(wq_entry, word, bit);
486
487 return __wait_on_bit_lock(wq_head, &wq_entry, action, mode);
488}
489EXPORT_SYMBOL(out_of_line_wait_on_bit_lock);
490
491void __wake_up_bit(struct wait_queue_head *wq_head, void *word, int bit)
492{
493 struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
494 if (waitqueue_active(wq_head))
495 __wake_up(wq_head, TASK_NORMAL, 1, &key);
496}
497EXPORT_SYMBOL(__wake_up_bit);
498
499/**
500 * wake_up_bit - wake up a waiter on a bit
501 * @word: the word being waited on, a kernel virtual address
502 * @bit: the bit of the word being waited on
503 *
504 * There is a standard hashed waitqueue table for generic use. This
505 * is the part of the hashtable's accessor API that wakes up waiters
506 * on a bit. For instance, if one were to have waiters on a bitflag,
507 * one would call wake_up_bit() after clearing the bit.
508 *
509 * In order for this to function properly, as it uses waitqueue_active()
510 * internally, some kind of memory barrier must be done prior to calling
511 * this. Typically, this will be smp_mb__after_atomic(), but in some
512 * cases where bitflags are manipulated non-atomically under a lock, one
513 * may need to use a less regular barrier, such fs/inode.c's smp_mb(),
514 * because spin_unlock() does not guarantee a memory barrier.
515 */
516void wake_up_bit(void *word, int bit)
517{
518 __wake_up_bit(bit_waitqueue(word, bit), word, bit);
519}
520EXPORT_SYMBOL(wake_up_bit);
521
522/*
523 * Manipulate the atomic_t address to produce a better bit waitqueue table hash
524 * index (we're keying off bit -1, but that would produce a horrible hash
525 * value).
526 */
527static inline wait_queue_head_t *atomic_t_waitqueue(atomic_t *p)
528{
529 if (BITS_PER_LONG == 64) {
530 unsigned long q = (unsigned long)p;
531 return bit_waitqueue((void *)(q & ~1), q & 1);
532 }
533 return bit_waitqueue(p, 0);
534}
535
536static int wake_atomic_t_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync,
537 void *arg)
538{
539 struct wait_bit_key *key = arg;
540 struct wait_bit_queue_entry *wait_bit = container_of(wq_entry, struct wait_bit_queue_entry, wq_entry);
541 atomic_t *val = key->flags;
542
543 if (wait_bit->key.flags != key->flags ||
544 wait_bit->key.bit_nr != key->bit_nr ||
545 atomic_read(val) != 0)
546 return 0;
547 return autoremove_wake_function(wq_entry, mode, sync, key);
548}
549
550/*
551 * To allow interruptible waiting and asynchronous (i.e. nonblocking) waiting,
552 * the actions of __wait_on_atomic_t() are permitted return codes. Nonzero
553 * return codes halt waiting and return.
554 */
555static __sched
556int __wait_on_atomic_t(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry,
557 int (*action)(atomic_t *), unsigned mode)
558{
559 atomic_t *val;
560 int ret = 0;
561
562 do {
563 prepare_to_wait(wq_head, &wbq_entry->wq_entry, mode);
564 val = wbq_entry->key.flags;
565 if (atomic_read(val) == 0)
566 break;
567 ret = (*action)(val);
568 } while (!ret && atomic_read(val) != 0);
569 finish_wait(wq_head, &wbq_entry->wq_entry);
570 return ret;
571}
572
573#define DEFINE_WAIT_ATOMIC_T(name, p) \
574 struct wait_bit_queue_entry name = { \
575 .key = __WAIT_ATOMIC_T_KEY_INITIALIZER(p), \
576 .wq_entry = { \
577 .private = current, \
578 .func = wake_atomic_t_function, \
579 .task_list = \
580 LIST_HEAD_INIT((name).wq_entry.task_list), \
581 }, \
582 }
583
584__sched int out_of_line_wait_on_atomic_t(atomic_t *p, int (*action)(atomic_t *),
585 unsigned mode)
586{
587 struct wait_queue_head *wq_head = atomic_t_waitqueue(p);
588 DEFINE_WAIT_ATOMIC_T(wq_entry, p);
589
590 return __wait_on_atomic_t(wq_head, &wq_entry, action, mode);
591}
592EXPORT_SYMBOL(out_of_line_wait_on_atomic_t);
593
594/**
595 * wake_up_atomic_t - Wake up a waiter on a atomic_t
596 * @p: The atomic_t being waited on, a kernel virtual address
597 *
598 * Wake up anyone waiting for the atomic_t to go to zero.
599 *
600 * Abuse the bit-waker function and its waitqueue hash table set (the atomic_t
601 * check is done by the waiter's wake function, not the by the waker itself).
602 */
603void wake_up_atomic_t(atomic_t *p)
604{
605 __wake_up_bit(atomic_t_waitqueue(p), p, WAIT_ATOMIC_T_BIT_NR);
606}
607EXPORT_SYMBOL(wake_up_atomic_t);
608
609__sched int bit_wait(struct wait_bit_key *word, int mode)
610{
611 schedule();
612 if (signal_pending_state(mode, current))
613 return -EINTR;
614 return 0;
615}
616EXPORT_SYMBOL(bit_wait);
617
618__sched int bit_wait_io(struct wait_bit_key *word, int mode)
619{
620 io_schedule();
621 if (signal_pending_state(mode, current))
622 return -EINTR;
623 return 0;
624}
625EXPORT_SYMBOL(bit_wait_io);
626
627__sched int bit_wait_timeout(struct wait_bit_key *word, int mode)
628{
629 unsigned long now = READ_ONCE(jiffies);
630 if (time_after_eq(now, word->timeout))
631 return -EAGAIN;
632 schedule_timeout(word->timeout - now);
633 if (signal_pending_state(mode, current))
634 return -EINTR;
635 return 0;
636}
637EXPORT_SYMBOL_GPL(bit_wait_timeout);
638
639__sched int bit_wait_io_timeout(struct wait_bit_key *word, int mode)
640{
641 unsigned long now = READ_ONCE(jiffies);
642 if (time_after_eq(now, word->timeout))
643 return -EAGAIN;
644 io_schedule_timeout(word->timeout - now);
645 if (signal_pending_state(mode, current))
646 return -EINTR;
647 return 0;
648}
649EXPORT_SYMBOL_GPL(bit_wait_io_timeout);