diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched/Makefile | 2 | ||||
-rw-r--r-- | kernel/sched/wait.c | 257 | ||||
-rw-r--r-- | kernel/sched/wait_bit.c | 263 |
3 files changed, 264 insertions, 258 deletions
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile index 89ab6758667b..16277e2ed8ee 100644 --- a/kernel/sched/Makefile +++ b/kernel/sched/Makefile | |||
@@ -17,7 +17,7 @@ endif | |||
17 | 17 | ||
18 | obj-y += core.o loadavg.o clock.o cputime.o | 18 | obj-y += core.o loadavg.o clock.o cputime.o |
19 | obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o | 19 | obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o |
20 | obj-y += wait.o swait.o completion.o idle.o | 20 | obj-y += wait.o wait_bit.o swait.o completion.o idle.o |
21 | obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o | 21 | obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o |
22 | obj-$(CONFIG_SCHED_AUTOGROUP) += autogroup.o | 22 | obj-$(CONFIG_SCHED_AUTOGROUP) += autogroup.o |
23 | obj-$(CONFIG_SCHEDSTATS) += stats.o | 23 | obj-$(CONFIG_SCHEDSTATS) += stats.o |
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c index 95e6d3820cba..6bcd7c3c4501 100644 --- a/kernel/sched/wait.c +++ b/kernel/sched/wait.c | |||
@@ -390,260 +390,3 @@ int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sy | |||
390 | return default_wake_function(wq_entry, mode, sync, key); | 390 | return default_wake_function(wq_entry, mode, sync, key); |
391 | } | 391 | } |
392 | EXPORT_SYMBOL(woken_wake_function); | 392 | EXPORT_SYMBOL(woken_wake_function); |
393 | |||
394 | int wake_bit_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *arg) | ||
395 | { | ||
396 | struct wait_bit_key *key = arg; | ||
397 | struct wait_bit_queue_entry *wait_bit = container_of(wq_entry, struct wait_bit_queue_entry, wq_entry); | ||
398 | |||
399 | if (wait_bit->key.flags != key->flags || | ||
400 | wait_bit->key.bit_nr != key->bit_nr || | ||
401 | test_bit(key->bit_nr, key->flags)) | ||
402 | return 0; | ||
403 | else | ||
404 | return autoremove_wake_function(wq_entry, mode, sync, key); | ||
405 | } | ||
406 | EXPORT_SYMBOL(wake_bit_function); | ||
407 | |||
408 | /* | ||
409 | * To allow interruptible waiting and asynchronous (i.e. nonblocking) | ||
410 | * waiting, the actions of __wait_on_bit() and __wait_on_bit_lock() are | ||
411 | * permitted return codes. Nonzero return codes halt waiting and return. | ||
412 | */ | ||
413 | int __sched | ||
414 | __wait_on_bit(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, | ||
415 | wait_bit_action_f *action, unsigned mode) | ||
416 | { | ||
417 | int ret = 0; | ||
418 | |||
419 | do { | ||
420 | prepare_to_wait(wq_head, &wbq_entry->wq_entry, mode); | ||
421 | if (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags)) | ||
422 | ret = (*action)(&wbq_entry->key, mode); | ||
423 | } while (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags) && !ret); | ||
424 | finish_wait(wq_head, &wbq_entry->wq_entry); | ||
425 | return ret; | ||
426 | } | ||
427 | EXPORT_SYMBOL(__wait_on_bit); | ||
428 | |||
429 | int __sched out_of_line_wait_on_bit(void *word, int bit, | ||
430 | wait_bit_action_f *action, unsigned mode) | ||
431 | { | ||
432 | struct wait_queue_head *wq_head = bit_waitqueue(word, bit); | ||
433 | DEFINE_WAIT_BIT(wq_entry, word, bit); | ||
434 | |||
435 | return __wait_on_bit(wq_head, &wq_entry, action, mode); | ||
436 | } | ||
437 | EXPORT_SYMBOL(out_of_line_wait_on_bit); | ||
438 | |||
439 | int __sched out_of_line_wait_on_bit_timeout( | ||
440 | void *word, int bit, wait_bit_action_f *action, | ||
441 | unsigned mode, unsigned long timeout) | ||
442 | { | ||
443 | struct wait_queue_head *wq_head = bit_waitqueue(word, bit); | ||
444 | DEFINE_WAIT_BIT(wq_entry, word, bit); | ||
445 | |||
446 | wq_entry.key.timeout = jiffies + timeout; | ||
447 | return __wait_on_bit(wq_head, &wq_entry, action, mode); | ||
448 | } | ||
449 | EXPORT_SYMBOL_GPL(out_of_line_wait_on_bit_timeout); | ||
450 | |||
451 | int __sched | ||
452 | __wait_on_bit_lock(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, | ||
453 | wait_bit_action_f *action, unsigned mode) | ||
454 | { | ||
455 | int ret = 0; | ||
456 | |||
457 | for (;;) { | ||
458 | prepare_to_wait_exclusive(wq_head, &wbq_entry->wq_entry, mode); | ||
459 | if (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags)) { | ||
460 | ret = action(&wbq_entry->key, mode); | ||
461 | /* | ||
462 | * See the comment in prepare_to_wait_event(). | ||
463 | * finish_wait() does not necessarily takes wwq_head->lock, | ||
464 | * but test_and_set_bit() implies mb() which pairs with | ||
465 | * smp_mb__after_atomic() before wake_up_page(). | ||
466 | */ | ||
467 | if (ret) | ||
468 | finish_wait(wq_head, &wbq_entry->wq_entry); | ||
469 | } | ||
470 | if (!test_and_set_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags)) { | ||
471 | if (!ret) | ||
472 | finish_wait(wq_head, &wbq_entry->wq_entry); | ||
473 | return 0; | ||
474 | } else if (ret) { | ||
475 | return ret; | ||
476 | } | ||
477 | } | ||
478 | } | ||
479 | EXPORT_SYMBOL(__wait_on_bit_lock); | ||
480 | |||
481 | int __sched out_of_line_wait_on_bit_lock(void *word, int bit, | ||
482 | wait_bit_action_f *action, unsigned mode) | ||
483 | { | ||
484 | struct wait_queue_head *wq_head = bit_waitqueue(word, bit); | ||
485 | DEFINE_WAIT_BIT(wq_entry, word, bit); | ||
486 | |||
487 | return __wait_on_bit_lock(wq_head, &wq_entry, action, mode); | ||
488 | } | ||
489 | EXPORT_SYMBOL(out_of_line_wait_on_bit_lock); | ||
490 | |||
491 | void __wake_up_bit(struct wait_queue_head *wq_head, void *word, int bit) | ||
492 | { | ||
493 | struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit); | ||
494 | if (waitqueue_active(wq_head)) | ||
495 | __wake_up(wq_head, TASK_NORMAL, 1, &key); | ||
496 | } | ||
497 | EXPORT_SYMBOL(__wake_up_bit); | ||
498 | |||
499 | /** | ||
500 | * wake_up_bit - wake up a waiter on a bit | ||
501 | * @word: the word being waited on, a kernel virtual address | ||
502 | * @bit: the bit of the word being waited on | ||
503 | * | ||
504 | * There is a standard hashed waitqueue table for generic use. This | ||
505 | * is the part of the hashtable's accessor API that wakes up waiters | ||
506 | * on a bit. For instance, if one were to have waiters on a bitflag, | ||
507 | * one would call wake_up_bit() after clearing the bit. | ||
508 | * | ||
509 | * In order for this to function properly, as it uses waitqueue_active() | ||
510 | * internally, some kind of memory barrier must be done prior to calling | ||
511 | * this. Typically, this will be smp_mb__after_atomic(), but in some | ||
512 | * cases where bitflags are manipulated non-atomically under a lock, one | ||
513 | * may need to use a less regular barrier, such fs/inode.c's smp_mb(), | ||
514 | * because spin_unlock() does not guarantee a memory barrier. | ||
515 | */ | ||
516 | void wake_up_bit(void *word, int bit) | ||
517 | { | ||
518 | __wake_up_bit(bit_waitqueue(word, bit), word, bit); | ||
519 | } | ||
520 | EXPORT_SYMBOL(wake_up_bit); | ||
521 | |||
522 | /* | ||
523 | * Manipulate the atomic_t address to produce a better bit waitqueue table hash | ||
524 | * index (we're keying off bit -1, but that would produce a horrible hash | ||
525 | * value). | ||
526 | */ | ||
527 | static inline wait_queue_head_t *atomic_t_waitqueue(atomic_t *p) | ||
528 | { | ||
529 | if (BITS_PER_LONG == 64) { | ||
530 | unsigned long q = (unsigned long)p; | ||
531 | return bit_waitqueue((void *)(q & ~1), q & 1); | ||
532 | } | ||
533 | return bit_waitqueue(p, 0); | ||
534 | } | ||
535 | |||
536 | static int wake_atomic_t_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, | ||
537 | void *arg) | ||
538 | { | ||
539 | struct wait_bit_key *key = arg; | ||
540 | struct wait_bit_queue_entry *wait_bit = container_of(wq_entry, struct wait_bit_queue_entry, wq_entry); | ||
541 | atomic_t *val = key->flags; | ||
542 | |||
543 | if (wait_bit->key.flags != key->flags || | ||
544 | wait_bit->key.bit_nr != key->bit_nr || | ||
545 | atomic_read(val) != 0) | ||
546 | return 0; | ||
547 | return autoremove_wake_function(wq_entry, mode, sync, key); | ||
548 | } | ||
549 | |||
550 | /* | ||
551 | * To allow interruptible waiting and asynchronous (i.e. nonblocking) waiting, | ||
552 | * the actions of __wait_on_atomic_t() are permitted return codes. Nonzero | ||
553 | * return codes halt waiting and return. | ||
554 | */ | ||
555 | static __sched | ||
556 | int __wait_on_atomic_t(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, | ||
557 | int (*action)(atomic_t *), unsigned mode) | ||
558 | { | ||
559 | atomic_t *val; | ||
560 | int ret = 0; | ||
561 | |||
562 | do { | ||
563 | prepare_to_wait(wq_head, &wbq_entry->wq_entry, mode); | ||
564 | val = wbq_entry->key.flags; | ||
565 | if (atomic_read(val) == 0) | ||
566 | break; | ||
567 | ret = (*action)(val); | ||
568 | } while (!ret && atomic_read(val) != 0); | ||
569 | finish_wait(wq_head, &wbq_entry->wq_entry); | ||
570 | return ret; | ||
571 | } | ||
572 | |||
573 | #define DEFINE_WAIT_ATOMIC_T(name, p) \ | ||
574 | struct wait_bit_queue_entry name = { \ | ||
575 | .key = __WAIT_ATOMIC_T_KEY_INITIALIZER(p), \ | ||
576 | .wq_entry = { \ | ||
577 | .private = current, \ | ||
578 | .func = wake_atomic_t_function, \ | ||
579 | .task_list = \ | ||
580 | LIST_HEAD_INIT((name).wq_entry.task_list), \ | ||
581 | }, \ | ||
582 | } | ||
583 | |||
584 | __sched int out_of_line_wait_on_atomic_t(atomic_t *p, int (*action)(atomic_t *), | ||
585 | unsigned mode) | ||
586 | { | ||
587 | struct wait_queue_head *wq_head = atomic_t_waitqueue(p); | ||
588 | DEFINE_WAIT_ATOMIC_T(wq_entry, p); | ||
589 | |||
590 | return __wait_on_atomic_t(wq_head, &wq_entry, action, mode); | ||
591 | } | ||
592 | EXPORT_SYMBOL(out_of_line_wait_on_atomic_t); | ||
593 | |||
594 | /** | ||
595 | * wake_up_atomic_t - Wake up a waiter on a atomic_t | ||
596 | * @p: The atomic_t being waited on, a kernel virtual address | ||
597 | * | ||
598 | * Wake up anyone waiting for the atomic_t to go to zero. | ||
599 | * | ||
600 | * Abuse the bit-waker function and its waitqueue hash table set (the atomic_t | ||
601 | * check is done by the waiter's wake function, not the by the waker itself). | ||
602 | */ | ||
603 | void wake_up_atomic_t(atomic_t *p) | ||
604 | { | ||
605 | __wake_up_bit(atomic_t_waitqueue(p), p, WAIT_ATOMIC_T_BIT_NR); | ||
606 | } | ||
607 | EXPORT_SYMBOL(wake_up_atomic_t); | ||
608 | |||
609 | __sched int bit_wait(struct wait_bit_key *word, int mode) | ||
610 | { | ||
611 | schedule(); | ||
612 | if (signal_pending_state(mode, current)) | ||
613 | return -EINTR; | ||
614 | return 0; | ||
615 | } | ||
616 | EXPORT_SYMBOL(bit_wait); | ||
617 | |||
618 | __sched int bit_wait_io(struct wait_bit_key *word, int mode) | ||
619 | { | ||
620 | io_schedule(); | ||
621 | if (signal_pending_state(mode, current)) | ||
622 | return -EINTR; | ||
623 | return 0; | ||
624 | } | ||
625 | EXPORT_SYMBOL(bit_wait_io); | ||
626 | |||
627 | __sched int bit_wait_timeout(struct wait_bit_key *word, int mode) | ||
628 | { | ||
629 | unsigned long now = READ_ONCE(jiffies); | ||
630 | if (time_after_eq(now, word->timeout)) | ||
631 | return -EAGAIN; | ||
632 | schedule_timeout(word->timeout - now); | ||
633 | if (signal_pending_state(mode, current)) | ||
634 | return -EINTR; | ||
635 | return 0; | ||
636 | } | ||
637 | EXPORT_SYMBOL_GPL(bit_wait_timeout); | ||
638 | |||
639 | __sched int bit_wait_io_timeout(struct wait_bit_key *word, int mode) | ||
640 | { | ||
641 | unsigned long now = READ_ONCE(jiffies); | ||
642 | if (time_after_eq(now, word->timeout)) | ||
643 | return -EAGAIN; | ||
644 | io_schedule_timeout(word->timeout - now); | ||
645 | if (signal_pending_state(mode, current)) | ||
646 | return -EINTR; | ||
647 | return 0; | ||
648 | } | ||
649 | EXPORT_SYMBOL_GPL(bit_wait_io_timeout); | ||
diff --git a/kernel/sched/wait_bit.c b/kernel/sched/wait_bit.c new file mode 100644 index 000000000000..463bac84dfd1 --- /dev/null +++ b/kernel/sched/wait_bit.c | |||
@@ -0,0 +1,263 @@ | |||
1 | /* | ||
2 | * The implementation of the wait_bit*() and related waiting APIs: | ||
3 | */ | ||
4 | #include <linux/wait_bit.h> | ||
5 | #include <linux/sched/signal.h> | ||
6 | #include <linux/sched/debug.h> | ||
7 | |||
8 | int wake_bit_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *arg) | ||
9 | { | ||
10 | struct wait_bit_key *key = arg; | ||
11 | struct wait_bit_queue_entry *wait_bit = container_of(wq_entry, struct wait_bit_queue_entry, wq_entry); | ||
12 | |||
13 | if (wait_bit->key.flags != key->flags || | ||
14 | wait_bit->key.bit_nr != key->bit_nr || | ||
15 | test_bit(key->bit_nr, key->flags)) | ||
16 | return 0; | ||
17 | else | ||
18 | return autoremove_wake_function(wq_entry, mode, sync, key); | ||
19 | } | ||
20 | EXPORT_SYMBOL(wake_bit_function); | ||
21 | |||
22 | /* | ||
23 | * To allow interruptible waiting and asynchronous (i.e. nonblocking) | ||
24 | * waiting, the actions of __wait_on_bit() and __wait_on_bit_lock() are | ||
25 | * permitted return codes. Nonzero return codes halt waiting and return. | ||
26 | */ | ||
27 | int __sched | ||
28 | __wait_on_bit(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, | ||
29 | wait_bit_action_f *action, unsigned mode) | ||
30 | { | ||
31 | int ret = 0; | ||
32 | |||
33 | do { | ||
34 | prepare_to_wait(wq_head, &wbq_entry->wq_entry, mode); | ||
35 | if (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags)) | ||
36 | ret = (*action)(&wbq_entry->key, mode); | ||
37 | } while (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags) && !ret); | ||
38 | finish_wait(wq_head, &wbq_entry->wq_entry); | ||
39 | return ret; | ||
40 | } | ||
41 | EXPORT_SYMBOL(__wait_on_bit); | ||
42 | |||
43 | int __sched out_of_line_wait_on_bit(void *word, int bit, | ||
44 | wait_bit_action_f *action, unsigned mode) | ||
45 | { | ||
46 | struct wait_queue_head *wq_head = bit_waitqueue(word, bit); | ||
47 | DEFINE_WAIT_BIT(wq_entry, word, bit); | ||
48 | |||
49 | return __wait_on_bit(wq_head, &wq_entry, action, mode); | ||
50 | } | ||
51 | EXPORT_SYMBOL(out_of_line_wait_on_bit); | ||
52 | |||
53 | int __sched out_of_line_wait_on_bit_timeout( | ||
54 | void *word, int bit, wait_bit_action_f *action, | ||
55 | unsigned mode, unsigned long timeout) | ||
56 | { | ||
57 | struct wait_queue_head *wq_head = bit_waitqueue(word, bit); | ||
58 | DEFINE_WAIT_BIT(wq_entry, word, bit); | ||
59 | |||
60 | wq_entry.key.timeout = jiffies + timeout; | ||
61 | return __wait_on_bit(wq_head, &wq_entry, action, mode); | ||
62 | } | ||
63 | EXPORT_SYMBOL_GPL(out_of_line_wait_on_bit_timeout); | ||
64 | |||
65 | int __sched | ||
66 | __wait_on_bit_lock(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, | ||
67 | wait_bit_action_f *action, unsigned mode) | ||
68 | { | ||
69 | int ret = 0; | ||
70 | |||
71 | for (;;) { | ||
72 | prepare_to_wait_exclusive(wq_head, &wbq_entry->wq_entry, mode); | ||
73 | if (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags)) { | ||
74 | ret = action(&wbq_entry->key, mode); | ||
75 | /* | ||
76 | * See the comment in prepare_to_wait_event(). | ||
77 | * finish_wait() does not necessarily takes wwq_head->lock, | ||
78 | * but test_and_set_bit() implies mb() which pairs with | ||
79 | * smp_mb__after_atomic() before wake_up_page(). | ||
80 | */ | ||
81 | if (ret) | ||
82 | finish_wait(wq_head, &wbq_entry->wq_entry); | ||
83 | } | ||
84 | if (!test_and_set_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags)) { | ||
85 | if (!ret) | ||
86 | finish_wait(wq_head, &wbq_entry->wq_entry); | ||
87 | return 0; | ||
88 | } else if (ret) { | ||
89 | return ret; | ||
90 | } | ||
91 | } | ||
92 | } | ||
93 | EXPORT_SYMBOL(__wait_on_bit_lock); | ||
94 | |||
95 | int __sched out_of_line_wait_on_bit_lock(void *word, int bit, | ||
96 | wait_bit_action_f *action, unsigned mode) | ||
97 | { | ||
98 | struct wait_queue_head *wq_head = bit_waitqueue(word, bit); | ||
99 | DEFINE_WAIT_BIT(wq_entry, word, bit); | ||
100 | |||
101 | return __wait_on_bit_lock(wq_head, &wq_entry, action, mode); | ||
102 | } | ||
103 | EXPORT_SYMBOL(out_of_line_wait_on_bit_lock); | ||
104 | |||
105 | void __wake_up_bit(struct wait_queue_head *wq_head, void *word, int bit) | ||
106 | { | ||
107 | struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit); | ||
108 | if (waitqueue_active(wq_head)) | ||
109 | __wake_up(wq_head, TASK_NORMAL, 1, &key); | ||
110 | } | ||
111 | EXPORT_SYMBOL(__wake_up_bit); | ||
112 | |||
113 | /** | ||
114 | * wake_up_bit - wake up a waiter on a bit | ||
115 | * @word: the word being waited on, a kernel virtual address | ||
116 | * @bit: the bit of the word being waited on | ||
117 | * | ||
118 | * There is a standard hashed waitqueue table for generic use. This | ||
119 | * is the part of the hashtable's accessor API that wakes up waiters | ||
120 | * on a bit. For instance, if one were to have waiters on a bitflag, | ||
121 | * one would call wake_up_bit() after clearing the bit. | ||
122 | * | ||
123 | * In order for this to function properly, as it uses waitqueue_active() | ||
124 | * internally, some kind of memory barrier must be done prior to calling | ||
125 | * this. Typically, this will be smp_mb__after_atomic(), but in some | ||
126 | * cases where bitflags are manipulated non-atomically under a lock, one | ||
127 | * may need to use a less regular barrier, such fs/inode.c's smp_mb(), | ||
128 | * because spin_unlock() does not guarantee a memory barrier. | ||
129 | */ | ||
130 | void wake_up_bit(void *word, int bit) | ||
131 | { | ||
132 | __wake_up_bit(bit_waitqueue(word, bit), word, bit); | ||
133 | } | ||
134 | EXPORT_SYMBOL(wake_up_bit); | ||
135 | |||
136 | /* | ||
137 | * Manipulate the atomic_t address to produce a better bit waitqueue table hash | ||
138 | * index (we're keying off bit -1, but that would produce a horrible hash | ||
139 | * value). | ||
140 | */ | ||
141 | static inline wait_queue_head_t *atomic_t_waitqueue(atomic_t *p) | ||
142 | { | ||
143 | if (BITS_PER_LONG == 64) { | ||
144 | unsigned long q = (unsigned long)p; | ||
145 | return bit_waitqueue((void *)(q & ~1), q & 1); | ||
146 | } | ||
147 | return bit_waitqueue(p, 0); | ||
148 | } | ||
149 | |||
150 | static int wake_atomic_t_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, | ||
151 | void *arg) | ||
152 | { | ||
153 | struct wait_bit_key *key = arg; | ||
154 | struct wait_bit_queue_entry *wait_bit = container_of(wq_entry, struct wait_bit_queue_entry, wq_entry); | ||
155 | atomic_t *val = key->flags; | ||
156 | |||
157 | if (wait_bit->key.flags != key->flags || | ||
158 | wait_bit->key.bit_nr != key->bit_nr || | ||
159 | atomic_read(val) != 0) | ||
160 | return 0; | ||
161 | return autoremove_wake_function(wq_entry, mode, sync, key); | ||
162 | } | ||
163 | |||
164 | /* | ||
165 | * To allow interruptible waiting and asynchronous (i.e. nonblocking) waiting, | ||
166 | * the actions of __wait_on_atomic_t() are permitted return codes. Nonzero | ||
167 | * return codes halt waiting and return. | ||
168 | */ | ||
169 | static __sched | ||
170 | int __wait_on_atomic_t(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, | ||
171 | int (*action)(atomic_t *), unsigned mode) | ||
172 | { | ||
173 | atomic_t *val; | ||
174 | int ret = 0; | ||
175 | |||
176 | do { | ||
177 | prepare_to_wait(wq_head, &wbq_entry->wq_entry, mode); | ||
178 | val = wbq_entry->key.flags; | ||
179 | if (atomic_read(val) == 0) | ||
180 | break; | ||
181 | ret = (*action)(val); | ||
182 | } while (!ret && atomic_read(val) != 0); | ||
183 | finish_wait(wq_head, &wbq_entry->wq_entry); | ||
184 | return ret; | ||
185 | } | ||
186 | |||
187 | #define DEFINE_WAIT_ATOMIC_T(name, p) \ | ||
188 | struct wait_bit_queue_entry name = { \ | ||
189 | .key = __WAIT_ATOMIC_T_KEY_INITIALIZER(p), \ | ||
190 | .wq_entry = { \ | ||
191 | .private = current, \ | ||
192 | .func = wake_atomic_t_function, \ | ||
193 | .task_list = \ | ||
194 | LIST_HEAD_INIT((name).wq_entry.task_list), \ | ||
195 | }, \ | ||
196 | } | ||
197 | |||
198 | __sched int out_of_line_wait_on_atomic_t(atomic_t *p, int (*action)(atomic_t *), | ||
199 | unsigned mode) | ||
200 | { | ||
201 | struct wait_queue_head *wq_head = atomic_t_waitqueue(p); | ||
202 | DEFINE_WAIT_ATOMIC_T(wq_entry, p); | ||
203 | |||
204 | return __wait_on_atomic_t(wq_head, &wq_entry, action, mode); | ||
205 | } | ||
206 | EXPORT_SYMBOL(out_of_line_wait_on_atomic_t); | ||
207 | |||
208 | /** | ||
209 | * wake_up_atomic_t - Wake up a waiter on a atomic_t | ||
210 | * @p: The atomic_t being waited on, a kernel virtual address | ||
211 | * | ||
212 | * Wake up anyone waiting for the atomic_t to go to zero. | ||
213 | * | ||
214 | * Abuse the bit-waker function and its waitqueue hash table set (the atomic_t | ||
215 | * check is done by the waiter's wake function, not the by the waker itself). | ||
216 | */ | ||
217 | void wake_up_atomic_t(atomic_t *p) | ||
218 | { | ||
219 | __wake_up_bit(atomic_t_waitqueue(p), p, WAIT_ATOMIC_T_BIT_NR); | ||
220 | } | ||
221 | EXPORT_SYMBOL(wake_up_atomic_t); | ||
222 | |||
223 | __sched int bit_wait(struct wait_bit_key *word, int mode) | ||
224 | { | ||
225 | schedule(); | ||
226 | if (signal_pending_state(mode, current)) | ||
227 | return -EINTR; | ||
228 | return 0; | ||
229 | } | ||
230 | EXPORT_SYMBOL(bit_wait); | ||
231 | |||
232 | __sched int bit_wait_io(struct wait_bit_key *word, int mode) | ||
233 | { | ||
234 | io_schedule(); | ||
235 | if (signal_pending_state(mode, current)) | ||
236 | return -EINTR; | ||
237 | return 0; | ||
238 | } | ||
239 | EXPORT_SYMBOL(bit_wait_io); | ||
240 | |||
241 | __sched int bit_wait_timeout(struct wait_bit_key *word, int mode) | ||
242 | { | ||
243 | unsigned long now = READ_ONCE(jiffies); | ||
244 | if (time_after_eq(now, word->timeout)) | ||
245 | return -EAGAIN; | ||
246 | schedule_timeout(word->timeout - now); | ||
247 | if (signal_pending_state(mode, current)) | ||
248 | return -EINTR; | ||
249 | return 0; | ||
250 | } | ||
251 | EXPORT_SYMBOL_GPL(bit_wait_timeout); | ||
252 | |||
253 | __sched int bit_wait_io_timeout(struct wait_bit_key *word, int mode) | ||
254 | { | ||
255 | unsigned long now = READ_ONCE(jiffies); | ||
256 | if (time_after_eq(now, word->timeout)) | ||
257 | return -EAGAIN; | ||
258 | io_schedule_timeout(word->timeout - now); | ||
259 | if (signal_pending_state(mode, current)) | ||
260 | return -EINTR; | ||
261 | return 0; | ||
262 | } | ||
263 | EXPORT_SYMBOL_GPL(bit_wait_io_timeout); | ||