aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2017-06-20 06:19:09 -0400
committerIngo Molnar <mingo@kernel.org>2017-06-20 06:19:09 -0400
commit5dd43ce2f69d42a71dcacdb13d17d8c0ac1fe8f7 (patch)
tree8fd9fc956274bf3b64b4ec736cdb38d9ba9bf6c3
parent4b1c480bfa3b246e292f4d50167756252a9717ed (diff)
sched/wait: Split out the wait_bit*() APIs from <linux/wait.h> into <linux/wait_bit.h>
The wait_bit*() types and APIs are mixed into wait.h, but they are a pretty orthogonal extension of wait-queues. Furthermore, only about 50 kernel files use these APIs, while over 1000 use the regular wait-queue functionality. So clean up the main wait.h by moving the wait-bit functionality out of it, into a separate .h and .c file: include/linux/wait_bit.h for types and APIs kernel/sched/wait_bit.c for the implementation Update all header dependencies. This reduces the size of wait.h rather significantly, by about 30%. Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--fs/cachefiles/internal.h2
-rw-r--r--fs/cifs/inode.c1
-rw-r--r--fs/nfs/internal.h1
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/sunrpc/sched.h2
-rw-r--r--include/linux/wait.h250
-rw-r--r--include/linux/wait_bit.h260
-rw-r--r--kernel/sched/Makefile2
-rw-r--r--kernel/sched/wait.c257
-rw-r--r--kernel/sched/wait_bit.c263
-rw-r--r--security/keys/internal.h1
11 files changed, 530 insertions, 511 deletions
diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
index 54a4fcd679ed..bb3a02ca9da4 100644
--- a/fs/cachefiles/internal.h
+++ b/fs/cachefiles/internal.h
@@ -18,7 +18,7 @@
18 18
19#include <linux/fscache-cache.h> 19#include <linux/fscache-cache.h>
20#include <linux/timer.h> 20#include <linux/timer.h>
21#include <linux/wait.h> 21#include <linux/wait_bit.h>
22#include <linux/cred.h> 22#include <linux/cred.h>
23#include <linux/workqueue.h> 23#include <linux/workqueue.h>
24#include <linux/security.h> 24#include <linux/security.h>
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 4d1fcd76d022..a8693632235f 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -24,6 +24,7 @@
24#include <linux/pagemap.h> 24#include <linux/pagemap.h>
25#include <linux/freezer.h> 25#include <linux/freezer.h>
26#include <linux/sched/signal.h> 26#include <linux/sched/signal.h>
27#include <linux/wait_bit.h>
27 28
28#include <asm/div64.h> 29#include <asm/div64.h>
29#include "cifsfs.h" 30#include "cifsfs.h"
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 3e24392f2caa..8701d7617964 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -7,6 +7,7 @@
7#include <linux/security.h> 7#include <linux/security.h>
8#include <linux/crc32.h> 8#include <linux/crc32.h>
9#include <linux/nfs_page.h> 9#include <linux/nfs_page.h>
10#include <linux/wait_bit.h>
10 11
11#define NFS_MS_MASK (MS_RDONLY|MS_NOSUID|MS_NODEV|MS_NOEXEC|MS_SYNCHRONOUS) 12#define NFS_MS_MASK (MS_RDONLY|MS_NOSUID|MS_NODEV|MS_NOEXEC|MS_SYNCHRONOUS)
12 13
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 803e5a9b2654..53f7e49d8fe5 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2,7 +2,7 @@
2#define _LINUX_FS_H 2#define _LINUX_FS_H
3 3
4#include <linux/linkage.h> 4#include <linux/linkage.h>
5#include <linux/wait.h> 5#include <linux/wait_bit.h>
6#include <linux/kdev_t.h> 6#include <linux/kdev_t.h>
7#include <linux/dcache.h> 7#include <linux/dcache.h>
8#include <linux/path.h> 8#include <linux/path.h>
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 7ba040c797ec..9d7529ffc4ce 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -13,7 +13,7 @@
13#include <linux/ktime.h> 13#include <linux/ktime.h>
14#include <linux/sunrpc/types.h> 14#include <linux/sunrpc/types.h>
15#include <linux/spinlock.h> 15#include <linux/spinlock.h>
16#include <linux/wait.h> 16#include <linux/wait_bit.h>
17#include <linux/workqueue.h> 17#include <linux/workqueue.h>
18#include <linux/sunrpc/xdr.h> 18#include <linux/sunrpc/xdr.h>
19 19
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 0805098f3589..629489746f8a 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -29,18 +29,6 @@ struct wait_queue_entry {
29 struct list_head task_list; 29 struct list_head task_list;
30}; 30};
31 31
32struct wait_bit_key {
33 void *flags;
34 int bit_nr;
35#define WAIT_ATOMIC_T_BIT_NR -1
36 unsigned long timeout;
37};
38
39struct wait_bit_queue_entry {
40 struct wait_bit_key key;
41 struct wait_queue_entry wq_entry;
42};
43
44struct wait_queue_head { 32struct wait_queue_head {
45 spinlock_t lock; 33 spinlock_t lock;
46 struct list_head task_list; 34 struct list_head task_list;
@@ -68,12 +56,6 @@ struct task_struct;
68#define DECLARE_WAIT_QUEUE_HEAD(name) \ 56#define DECLARE_WAIT_QUEUE_HEAD(name) \
69 struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name) 57 struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
70 58
71#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
72 { .flags = word, .bit_nr = bit, }
73
74#define __WAIT_ATOMIC_T_KEY_INITIALIZER(p) \
75 { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
76
77extern void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *); 59extern void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *);
78 60
79#define init_waitqueue_head(wq_head) \ 61#define init_waitqueue_head(wq_head) \
@@ -200,22 +182,11 @@ __remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq
200 list_del(&wq_entry->task_list); 182 list_del(&wq_entry->task_list);
201} 183}
202 184
203typedef int wait_bit_action_f(struct wait_bit_key *key, int mode);
204void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key); 185void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
205void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key); 186void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
206void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key); 187void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
207void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr); 188void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
208void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr); 189void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr);
209void __wake_up_bit(struct wait_queue_head *wq_head, void *word, int bit);
210int __wait_on_bit(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, wait_bit_action_f *action, unsigned int mode);
211int __wait_on_bit_lock(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, wait_bit_action_f *action, unsigned int mode);
212void wake_up_bit(void *word, int bit);
213void wake_up_atomic_t(atomic_t *p);
214int out_of_line_wait_on_bit(void *word, int, wait_bit_action_f *action, unsigned int mode);
215int out_of_line_wait_on_bit_timeout(void *word, int, wait_bit_action_f *action, unsigned int mode, unsigned long timeout);
216int out_of_line_wait_on_bit_lock(void *word, int, wait_bit_action_f *action, unsigned int mode);
217int out_of_line_wait_on_atomic_t(atomic_t *p, int (*)(atomic_t *), unsigned int mode);
218struct wait_queue_head *bit_waitqueue(void *word, int bit);
219 190
220#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL) 191#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
221#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL) 192#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
@@ -976,7 +947,6 @@ void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_en
976long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout); 947long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout);
977int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key); 948int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
978int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key); 949int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
979int wake_bit_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
980 950
981#define DEFINE_WAIT_FUNC(name, function) \ 951#define DEFINE_WAIT_FUNC(name, function) \
982 struct wait_queue_entry name = { \ 952 struct wait_queue_entry name = { \
@@ -987,17 +957,6 @@ int wake_bit_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync
987 957
988#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function) 958#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
989 959
990#define DEFINE_WAIT_BIT(name, word, bit) \
991 struct wait_bit_queue_entry name = { \
992 .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
993 .wq_entry = { \
994 .private = current, \
995 .func = wake_bit_function, \
996 .task_list = \
997 LIST_HEAD_INIT((name).wq_entry.task_list), \
998 }, \
999 }
1000
1001#define init_wait(wait) \ 960#define init_wait(wait) \
1002 do { \ 961 do { \
1003 (wait)->private = current; \ 962 (wait)->private = current; \
@@ -1006,213 +965,4 @@ int wake_bit_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync
1006 (wait)->flags = 0; \ 965 (wait)->flags = 0; \
1007 } while (0) 966 } while (0)
1008 967
1009
1010extern int bit_wait(struct wait_bit_key *key, int bit);
1011extern int bit_wait_io(struct wait_bit_key *key, int bit);
1012extern int bit_wait_timeout(struct wait_bit_key *key, int bit);
1013extern int bit_wait_io_timeout(struct wait_bit_key *key, int bit);
1014
1015/**
1016 * wait_on_bit - wait for a bit to be cleared
1017 * @word: the word being waited on, a kernel virtual address
1018 * @bit: the bit of the word being waited on
1019 * @mode: the task state to sleep in
1020 *
1021 * There is a standard hashed waitqueue table for generic use. This
1022 * is the part of the hashtable's accessor API that waits on a bit.
1023 * For instance, if one were to have waiters on a bitflag, one would
1024 * call wait_on_bit() in threads waiting for the bit to clear.
1025 * One uses wait_on_bit() where one is waiting for the bit to clear,
1026 * but has no intention of setting it.
1027 * Returned value will be zero if the bit was cleared, or non-zero
1028 * if the process received a signal and the mode permitted wakeup
1029 * on that signal.
1030 */
1031static inline int
1032wait_on_bit(unsigned long *word, int bit, unsigned mode)
1033{
1034 might_sleep();
1035 if (!test_bit(bit, word))
1036 return 0;
1037 return out_of_line_wait_on_bit(word, bit,
1038 bit_wait,
1039 mode);
1040}
1041
1042/**
1043 * wait_on_bit_io - wait for a bit to be cleared
1044 * @word: the word being waited on, a kernel virtual address
1045 * @bit: the bit of the word being waited on
1046 * @mode: the task state to sleep in
1047 *
1048 * Use the standard hashed waitqueue table to wait for a bit
1049 * to be cleared. This is similar to wait_on_bit(), but calls
1050 * io_schedule() instead of schedule() for the actual waiting.
1051 *
1052 * Returned value will be zero if the bit was cleared, or non-zero
1053 * if the process received a signal and the mode permitted wakeup
1054 * on that signal.
1055 */
1056static inline int
1057wait_on_bit_io(unsigned long *word, int bit, unsigned mode)
1058{
1059 might_sleep();
1060 if (!test_bit(bit, word))
1061 return 0;
1062 return out_of_line_wait_on_bit(word, bit,
1063 bit_wait_io,
1064 mode);
1065}
1066
1067/**
1068 * wait_on_bit_timeout - wait for a bit to be cleared or a timeout elapses
1069 * @word: the word being waited on, a kernel virtual address
1070 * @bit: the bit of the word being waited on
1071 * @mode: the task state to sleep in
1072 * @timeout: timeout, in jiffies
1073 *
1074 * Use the standard hashed waitqueue table to wait for a bit
1075 * to be cleared. This is similar to wait_on_bit(), except also takes a
1076 * timeout parameter.
1077 *
1078 * Returned value will be zero if the bit was cleared before the
1079 * @timeout elapsed, or non-zero if the @timeout elapsed or process
1080 * received a signal and the mode permitted wakeup on that signal.
1081 */
1082static inline int
1083wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode,
1084 unsigned long timeout)
1085{
1086 might_sleep();
1087 if (!test_bit(bit, word))
1088 return 0;
1089 return out_of_line_wait_on_bit_timeout(word, bit,
1090 bit_wait_timeout,
1091 mode, timeout);
1092}
1093
1094/**
1095 * wait_on_bit_action - wait for a bit to be cleared
1096 * @word: the word being waited on, a kernel virtual address
1097 * @bit: the bit of the word being waited on
1098 * @action: the function used to sleep, which may take special actions
1099 * @mode: the task state to sleep in
1100 *
1101 * Use the standard hashed waitqueue table to wait for a bit
1102 * to be cleared, and allow the waiting action to be specified.
1103 * This is like wait_on_bit() but allows fine control of how the waiting
1104 * is done.
1105 *
1106 * Returned value will be zero if the bit was cleared, or non-zero
1107 * if the process received a signal and the mode permitted wakeup
1108 * on that signal.
1109 */
1110static inline int
1111wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action,
1112 unsigned mode)
1113{
1114 might_sleep();
1115 if (!test_bit(bit, word))
1116 return 0;
1117 return out_of_line_wait_on_bit(word, bit, action, mode);
1118}
1119
1120/**
1121 * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
1122 * @word: the word being waited on, a kernel virtual address
1123 * @bit: the bit of the word being waited on
1124 * @mode: the task state to sleep in
1125 *
1126 * There is a standard hashed waitqueue table for generic use. This
1127 * is the part of the hashtable's accessor API that waits on a bit
1128 * when one intends to set it, for instance, trying to lock bitflags.
1129 * For instance, if one were to have waiters trying to set bitflag
1130 * and waiting for it to clear before setting it, one would call
1131 * wait_on_bit() in threads waiting to be able to set the bit.
1132 * One uses wait_on_bit_lock() where one is waiting for the bit to
1133 * clear with the intention of setting it, and when done, clearing it.
1134 *
1135 * Returns zero if the bit was (eventually) found to be clear and was
1136 * set. Returns non-zero if a signal was delivered to the process and
1137 * the @mode allows that signal to wake the process.
1138 */
1139static inline int
1140wait_on_bit_lock(unsigned long *word, int bit, unsigned mode)
1141{
1142 might_sleep();
1143 if (!test_and_set_bit(bit, word))
1144 return 0;
1145 return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode);
1146}
1147
1148/**
1149 * wait_on_bit_lock_io - wait for a bit to be cleared, when wanting to set it
1150 * @word: the word being waited on, a kernel virtual address
1151 * @bit: the bit of the word being waited on
1152 * @mode: the task state to sleep in
1153 *
1154 * Use the standard hashed waitqueue table to wait for a bit
1155 * to be cleared and then to atomically set it. This is similar
1156 * to wait_on_bit(), but calls io_schedule() instead of schedule()
1157 * for the actual waiting.
1158 *
1159 * Returns zero if the bit was (eventually) found to be clear and was
1160 * set. Returns non-zero if a signal was delivered to the process and
1161 * the @mode allows that signal to wake the process.
1162 */
1163static inline int
1164wait_on_bit_lock_io(unsigned long *word, int bit, unsigned mode)
1165{
1166 might_sleep();
1167 if (!test_and_set_bit(bit, word))
1168 return 0;
1169 return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode);
1170}
1171
1172/**
1173 * wait_on_bit_lock_action - wait for a bit to be cleared, when wanting to set it
1174 * @word: the word being waited on, a kernel virtual address
1175 * @bit: the bit of the word being waited on
1176 * @action: the function used to sleep, which may take special actions
1177 * @mode: the task state to sleep in
1178 *
1179 * Use the standard hashed waitqueue table to wait for a bit
1180 * to be cleared and then to set it, and allow the waiting action
1181 * to be specified.
1182 * This is like wait_on_bit() but allows fine control of how the waiting
1183 * is done.
1184 *
1185 * Returns zero if the bit was (eventually) found to be clear and was
1186 * set. Returns non-zero if a signal was delivered to the process and
1187 * the @mode allows that signal to wake the process.
1188 */
1189static inline int
1190wait_on_bit_lock_action(unsigned long *word, int bit, wait_bit_action_f *action,
1191 unsigned mode)
1192{
1193 might_sleep();
1194 if (!test_and_set_bit(bit, word))
1195 return 0;
1196 return out_of_line_wait_on_bit_lock(word, bit, action, mode);
1197}
1198
1199/**
1200 * wait_on_atomic_t - Wait for an atomic_t to become 0
1201 * @val: The atomic value being waited on, a kernel virtual address
1202 * @action: the function used to sleep, which may take special actions
1203 * @mode: the task state to sleep in
1204 *
1205 * Wait for an atomic_t to become 0. We abuse the bit-wait waitqueue table for
1206 * the purpose of getting a waitqueue, but we set the key to a bit number
1207 * outside of the target 'word'.
1208 */
1209static inline
1210int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
1211{
1212 might_sleep();
1213 if (atomic_read(val) == 0)
1214 return 0;
1215 return out_of_line_wait_on_atomic_t(val, action, mode);
1216}
1217
1218#endif /* _LINUX_WAIT_H */ 968#endif /* _LINUX_WAIT_H */
diff --git a/include/linux/wait_bit.h b/include/linux/wait_bit.h
new file mode 100644
index 000000000000..8c85c52d94b6
--- /dev/null
+++ b/include/linux/wait_bit.h
@@ -0,0 +1,260 @@
1#ifndef _LINUX_WAIT_BIT_H
2#define _LINUX_WAIT_BIT_H
3
4/*
5 * Linux wait-bit related types and methods:
6 */
7#include <linux/wait.h>
8
9struct wait_bit_key {
10 void *flags;
11 int bit_nr;
12#define WAIT_ATOMIC_T_BIT_NR -1
13 unsigned long timeout;
14};
15
16struct wait_bit_queue_entry {
17 struct wait_bit_key key;
18 struct wait_queue_entry wq_entry;
19};
20
21#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
22 { .flags = word, .bit_nr = bit, }
23
24#define __WAIT_ATOMIC_T_KEY_INITIALIZER(p) \
25 { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
26
27typedef int wait_bit_action_f(struct wait_bit_key *key, int mode);
28void __wake_up_bit(struct wait_queue_head *wq_head, void *word, int bit);
29int __wait_on_bit(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, wait_bit_action_f *action, unsigned int mode);
30int __wait_on_bit_lock(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, wait_bit_action_f *action, unsigned int mode);
31void wake_up_bit(void *word, int bit);
32void wake_up_atomic_t(atomic_t *p);
33int out_of_line_wait_on_bit(void *word, int, wait_bit_action_f *action, unsigned int mode);
34int out_of_line_wait_on_bit_timeout(void *word, int, wait_bit_action_f *action, unsigned int mode, unsigned long timeout);
35int out_of_line_wait_on_bit_lock(void *word, int, wait_bit_action_f *action, unsigned int mode);
36int out_of_line_wait_on_atomic_t(atomic_t *p, int (*)(atomic_t *), unsigned int mode);
37struct wait_queue_head *bit_waitqueue(void *word, int bit);
38
39int wake_bit_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
40
41#define DEFINE_WAIT_BIT(name, word, bit) \
42 struct wait_bit_queue_entry name = { \
43 .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
44 .wq_entry = { \
45 .private = current, \
46 .func = wake_bit_function, \
47 .task_list = \
48 LIST_HEAD_INIT((name).wq_entry.task_list), \
49 }, \
50 }
51
52extern int bit_wait(struct wait_bit_key *key, int bit);
53extern int bit_wait_io(struct wait_bit_key *key, int bit);
54extern int bit_wait_timeout(struct wait_bit_key *key, int bit);
55extern int bit_wait_io_timeout(struct wait_bit_key *key, int bit);
56
57/**
58 * wait_on_bit - wait for a bit to be cleared
59 * @word: the word being waited on, a kernel virtual address
60 * @bit: the bit of the word being waited on
61 * @mode: the task state to sleep in
62 *
63 * There is a standard hashed waitqueue table for generic use. This
64 * is the part of the hashtable's accessor API that waits on a bit.
65 * For instance, if one were to have waiters on a bitflag, one would
66 * call wait_on_bit() in threads waiting for the bit to clear.
67 * One uses wait_on_bit() where one is waiting for the bit to clear,
68 * but has no intention of setting it.
69 * Returned value will be zero if the bit was cleared, or non-zero
70 * if the process received a signal and the mode permitted wakeup
71 * on that signal.
72 */
73static inline int
74wait_on_bit(unsigned long *word, int bit, unsigned mode)
75{
76 might_sleep();
77 if (!test_bit(bit, word))
78 return 0;
79 return out_of_line_wait_on_bit(word, bit,
80 bit_wait,
81 mode);
82}
83
84/**
85 * wait_on_bit_io - wait for a bit to be cleared
86 * @word: the word being waited on, a kernel virtual address
87 * @bit: the bit of the word being waited on
88 * @mode: the task state to sleep in
89 *
90 * Use the standard hashed waitqueue table to wait for a bit
91 * to be cleared. This is similar to wait_on_bit(), but calls
92 * io_schedule() instead of schedule() for the actual waiting.
93 *
94 * Returned value will be zero if the bit was cleared, or non-zero
95 * if the process received a signal and the mode permitted wakeup
96 * on that signal.
97 */
98static inline int
99wait_on_bit_io(unsigned long *word, int bit, unsigned mode)
100{
101 might_sleep();
102 if (!test_bit(bit, word))
103 return 0;
104 return out_of_line_wait_on_bit(word, bit,
105 bit_wait_io,
106 mode);
107}
108
109/**
110 * wait_on_bit_timeout - wait for a bit to be cleared or a timeout elapses
111 * @word: the word being waited on, a kernel virtual address
112 * @bit: the bit of the word being waited on
113 * @mode: the task state to sleep in
114 * @timeout: timeout, in jiffies
115 *
116 * Use the standard hashed waitqueue table to wait for a bit
117 * to be cleared. This is similar to wait_on_bit(), except also takes a
118 * timeout parameter.
119 *
120 * Returned value will be zero if the bit was cleared before the
121 * @timeout elapsed, or non-zero if the @timeout elapsed or process
122 * received a signal and the mode permitted wakeup on that signal.
123 */
124static inline int
125wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode,
126 unsigned long timeout)
127{
128 might_sleep();
129 if (!test_bit(bit, word))
130 return 0;
131 return out_of_line_wait_on_bit_timeout(word, bit,
132 bit_wait_timeout,
133 mode, timeout);
134}
135
136/**
137 * wait_on_bit_action - wait for a bit to be cleared
138 * @word: the word being waited on, a kernel virtual address
139 * @bit: the bit of the word being waited on
140 * @action: the function used to sleep, which may take special actions
141 * @mode: the task state to sleep in
142 *
143 * Use the standard hashed waitqueue table to wait for a bit
144 * to be cleared, and allow the waiting action to be specified.
145 * This is like wait_on_bit() but allows fine control of how the waiting
146 * is done.
147 *
148 * Returned value will be zero if the bit was cleared, or non-zero
149 * if the process received a signal and the mode permitted wakeup
150 * on that signal.
151 */
152static inline int
153wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action,
154 unsigned mode)
155{
156 might_sleep();
157 if (!test_bit(bit, word))
158 return 0;
159 return out_of_line_wait_on_bit(word, bit, action, mode);
160}
161
162/**
163 * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
164 * @word: the word being waited on, a kernel virtual address
165 * @bit: the bit of the word being waited on
166 * @mode: the task state to sleep in
167 *
168 * There is a standard hashed waitqueue table for generic use. This
169 * is the part of the hashtable's accessor API that waits on a bit
170 * when one intends to set it, for instance, trying to lock bitflags.
171 * For instance, if one were to have waiters trying to set bitflag
172 * and waiting for it to clear before setting it, one would call
173 * wait_on_bit() in threads waiting to be able to set the bit.
174 * One uses wait_on_bit_lock() where one is waiting for the bit to
175 * clear with the intention of setting it, and when done, clearing it.
176 *
177 * Returns zero if the bit was (eventually) found to be clear and was
178 * set. Returns non-zero if a signal was delivered to the process and
179 * the @mode allows that signal to wake the process.
180 */
181static inline int
182wait_on_bit_lock(unsigned long *word, int bit, unsigned mode)
183{
184 might_sleep();
185 if (!test_and_set_bit(bit, word))
186 return 0;
187 return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode);
188}
189
190/**
191 * wait_on_bit_lock_io - wait for a bit to be cleared, when wanting to set it
192 * @word: the word being waited on, a kernel virtual address
193 * @bit: the bit of the word being waited on
194 * @mode: the task state to sleep in
195 *
196 * Use the standard hashed waitqueue table to wait for a bit
197 * to be cleared and then to atomically set it. This is similar
198 * to wait_on_bit(), but calls io_schedule() instead of schedule()
199 * for the actual waiting.
200 *
201 * Returns zero if the bit was (eventually) found to be clear and was
202 * set. Returns non-zero if a signal was delivered to the process and
203 * the @mode allows that signal to wake the process.
204 */
205static inline int
206wait_on_bit_lock_io(unsigned long *word, int bit, unsigned mode)
207{
208 might_sleep();
209 if (!test_and_set_bit(bit, word))
210 return 0;
211 return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode);
212}
213
214/**
215 * wait_on_bit_lock_action - wait for a bit to be cleared, when wanting to set it
216 * @word: the word being waited on, a kernel virtual address
217 * @bit: the bit of the word being waited on
218 * @action: the function used to sleep, which may take special actions
219 * @mode: the task state to sleep in
220 *
221 * Use the standard hashed waitqueue table to wait for a bit
222 * to be cleared and then to set it, and allow the waiting action
223 * to be specified.
224 * This is like wait_on_bit() but allows fine control of how the waiting
225 * is done.
226 *
227 * Returns zero if the bit was (eventually) found to be clear and was
228 * set. Returns non-zero if a signal was delivered to the process and
229 * the @mode allows that signal to wake the process.
230 */
231static inline int
232wait_on_bit_lock_action(unsigned long *word, int bit, wait_bit_action_f *action,
233 unsigned mode)
234{
235 might_sleep();
236 if (!test_and_set_bit(bit, word))
237 return 0;
238 return out_of_line_wait_on_bit_lock(word, bit, action, mode);
239}
240
241/**
242 * wait_on_atomic_t - Wait for an atomic_t to become 0
243 * @val: The atomic value being waited on, a kernel virtual address
244 * @action: the function used to sleep, which may take special actions
245 * @mode: the task state to sleep in
246 *
247 * Wait for an atomic_t to become 0. We abuse the bit-wait waitqueue table for
248 * the purpose of getting a waitqueue, but we set the key to a bit number
249 * outside of the target 'word'.
250 */
251static inline
252int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
253{
254 might_sleep();
255 if (atomic_read(val) == 0)
256 return 0;
257 return out_of_line_wait_on_atomic_t(val, action, mode);
258}
259
260#endif /* _LINUX_WAIT_BIT_H */
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
index 89ab6758667b..16277e2ed8ee 100644
--- a/kernel/sched/Makefile
+++ b/kernel/sched/Makefile
@@ -17,7 +17,7 @@ endif
17 17
18obj-y += core.o loadavg.o clock.o cputime.o 18obj-y += core.o loadavg.o clock.o cputime.o
19obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o 19obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o
20obj-y += wait.o swait.o completion.o idle.o 20obj-y += wait.o wait_bit.o swait.o completion.o idle.o
21obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o 21obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o
22obj-$(CONFIG_SCHED_AUTOGROUP) += autogroup.o 22obj-$(CONFIG_SCHED_AUTOGROUP) += autogroup.o
23obj-$(CONFIG_SCHEDSTATS) += stats.o 23obj-$(CONFIG_SCHEDSTATS) += stats.o
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
index 95e6d3820cba..6bcd7c3c4501 100644
--- a/kernel/sched/wait.c
+++ b/kernel/sched/wait.c
@@ -390,260 +390,3 @@ int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sy
390 return default_wake_function(wq_entry, mode, sync, key); 390 return default_wake_function(wq_entry, mode, sync, key);
391} 391}
392EXPORT_SYMBOL(woken_wake_function); 392EXPORT_SYMBOL(woken_wake_function);
393
394int wake_bit_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *arg)
395{
396 struct wait_bit_key *key = arg;
397 struct wait_bit_queue_entry *wait_bit = container_of(wq_entry, struct wait_bit_queue_entry, wq_entry);
398
399 if (wait_bit->key.flags != key->flags ||
400 wait_bit->key.bit_nr != key->bit_nr ||
401 test_bit(key->bit_nr, key->flags))
402 return 0;
403 else
404 return autoremove_wake_function(wq_entry, mode, sync, key);
405}
406EXPORT_SYMBOL(wake_bit_function);
407
408/*
409 * To allow interruptible waiting and asynchronous (i.e. nonblocking)
410 * waiting, the actions of __wait_on_bit() and __wait_on_bit_lock() are
411 * permitted return codes. Nonzero return codes halt waiting and return.
412 */
413int __sched
414__wait_on_bit(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry,
415 wait_bit_action_f *action, unsigned mode)
416{
417 int ret = 0;
418
419 do {
420 prepare_to_wait(wq_head, &wbq_entry->wq_entry, mode);
421 if (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags))
422 ret = (*action)(&wbq_entry->key, mode);
423 } while (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags) && !ret);
424 finish_wait(wq_head, &wbq_entry->wq_entry);
425 return ret;
426}
427EXPORT_SYMBOL(__wait_on_bit);
428
429int __sched out_of_line_wait_on_bit(void *word, int bit,
430 wait_bit_action_f *action, unsigned mode)
431{
432 struct wait_queue_head *wq_head = bit_waitqueue(word, bit);
433 DEFINE_WAIT_BIT(wq_entry, word, bit);
434
435 return __wait_on_bit(wq_head, &wq_entry, action, mode);
436}
437EXPORT_SYMBOL(out_of_line_wait_on_bit);
438
439int __sched out_of_line_wait_on_bit_timeout(
440 void *word, int bit, wait_bit_action_f *action,
441 unsigned mode, unsigned long timeout)
442{
443 struct wait_queue_head *wq_head = bit_waitqueue(word, bit);
444 DEFINE_WAIT_BIT(wq_entry, word, bit);
445
446 wq_entry.key.timeout = jiffies + timeout;
447 return __wait_on_bit(wq_head, &wq_entry, action, mode);
448}
449EXPORT_SYMBOL_GPL(out_of_line_wait_on_bit_timeout);
450
451int __sched
452__wait_on_bit_lock(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry,
453 wait_bit_action_f *action, unsigned mode)
454{
455 int ret = 0;
456
457 for (;;) {
458 prepare_to_wait_exclusive(wq_head, &wbq_entry->wq_entry, mode);
459 if (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags)) {
460 ret = action(&wbq_entry->key, mode);
461 /*
462 * See the comment in prepare_to_wait_event().
463 * finish_wait() does not necessarily takes wwq_head->lock,
464 * but test_and_set_bit() implies mb() which pairs with
465 * smp_mb__after_atomic() before wake_up_page().
466 */
467 if (ret)
468 finish_wait(wq_head, &wbq_entry->wq_entry);
469 }
470 if (!test_and_set_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags)) {
471 if (!ret)
472 finish_wait(wq_head, &wbq_entry->wq_entry);
473 return 0;
474 } else if (ret) {
475 return ret;
476 }
477 }
478}
479EXPORT_SYMBOL(__wait_on_bit_lock);
480
481int __sched out_of_line_wait_on_bit_lock(void *word, int bit,
482 wait_bit_action_f *action, unsigned mode)
483{
484 struct wait_queue_head *wq_head = bit_waitqueue(word, bit);
485 DEFINE_WAIT_BIT(wq_entry, word, bit);
486
487 return __wait_on_bit_lock(wq_head, &wq_entry, action, mode);
488}
489EXPORT_SYMBOL(out_of_line_wait_on_bit_lock);
490
491void __wake_up_bit(struct wait_queue_head *wq_head, void *word, int bit)
492{
493 struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
494 if (waitqueue_active(wq_head))
495 __wake_up(wq_head, TASK_NORMAL, 1, &key);
496}
497EXPORT_SYMBOL(__wake_up_bit);
498
499/**
500 * wake_up_bit - wake up a waiter on a bit
501 * @word: the word being waited on, a kernel virtual address
502 * @bit: the bit of the word being waited on
503 *
504 * There is a standard hashed waitqueue table for generic use. This
505 * is the part of the hashtable's accessor API that wakes up waiters
506 * on a bit. For instance, if one were to have waiters on a bitflag,
507 * one would call wake_up_bit() after clearing the bit.
508 *
509 * In order for this to function properly, as it uses waitqueue_active()
510 * internally, some kind of memory barrier must be done prior to calling
511 * this. Typically, this will be smp_mb__after_atomic(), but in some
512 * cases where bitflags are manipulated non-atomically under a lock, one
513 * may need to use a less regular barrier, such fs/inode.c's smp_mb(),
514 * because spin_unlock() does not guarantee a memory barrier.
515 */
516void wake_up_bit(void *word, int bit)
517{
518 __wake_up_bit(bit_waitqueue(word, bit), word, bit);
519}
520EXPORT_SYMBOL(wake_up_bit);
521
522/*
523 * Manipulate the atomic_t address to produce a better bit waitqueue table hash
524 * index (we're keying off bit -1, but that would produce a horrible hash
525 * value).
526 */
527static inline wait_queue_head_t *atomic_t_waitqueue(atomic_t *p)
528{
529 if (BITS_PER_LONG == 64) {
530 unsigned long q = (unsigned long)p;
531 return bit_waitqueue((void *)(q & ~1), q & 1);
532 }
533 return bit_waitqueue(p, 0);
534}
535
536static int wake_atomic_t_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync,
537 void *arg)
538{
539 struct wait_bit_key *key = arg;
540 struct wait_bit_queue_entry *wait_bit = container_of(wq_entry, struct wait_bit_queue_entry, wq_entry);
541 atomic_t *val = key->flags;
542
543 if (wait_bit->key.flags != key->flags ||
544 wait_bit->key.bit_nr != key->bit_nr ||
545 atomic_read(val) != 0)
546 return 0;
547 return autoremove_wake_function(wq_entry, mode, sync, key);
548}
549
550/*
551 * To allow interruptible waiting and asynchronous (i.e. nonblocking) waiting,
552 * the actions of __wait_on_atomic_t() are permitted return codes. Nonzero
553 * return codes halt waiting and return.
554 */
555static __sched
556int __wait_on_atomic_t(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry,
557 int (*action)(atomic_t *), unsigned mode)
558{
559 atomic_t *val;
560 int ret = 0;
561
562 do {
563 prepare_to_wait(wq_head, &wbq_entry->wq_entry, mode);
564 val = wbq_entry->key.flags;
565 if (atomic_read(val) == 0)
566 break;
567 ret = (*action)(val);
568 } while (!ret && atomic_read(val) != 0);
569 finish_wait(wq_head, &wbq_entry->wq_entry);
570 return ret;
571}
572
573#define DEFINE_WAIT_ATOMIC_T(name, p) \
574 struct wait_bit_queue_entry name = { \
575 .key = __WAIT_ATOMIC_T_KEY_INITIALIZER(p), \
576 .wq_entry = { \
577 .private = current, \
578 .func = wake_atomic_t_function, \
579 .task_list = \
580 LIST_HEAD_INIT((name).wq_entry.task_list), \
581 }, \
582 }
583
584__sched int out_of_line_wait_on_atomic_t(atomic_t *p, int (*action)(atomic_t *),
585 unsigned mode)
586{
587 struct wait_queue_head *wq_head = atomic_t_waitqueue(p);
588 DEFINE_WAIT_ATOMIC_T(wq_entry, p);
589
590 return __wait_on_atomic_t(wq_head, &wq_entry, action, mode);
591}
592EXPORT_SYMBOL(out_of_line_wait_on_atomic_t);
593
594/**
595 * wake_up_atomic_t - Wake up a waiter on a atomic_t
596 * @p: The atomic_t being waited on, a kernel virtual address
597 *
598 * Wake up anyone waiting for the atomic_t to go to zero.
599 *
600 * Abuse the bit-waker function and its waitqueue hash table set (the atomic_t
601 * check is done by the waiter's wake function, not the by the waker itself).
602 */
603void wake_up_atomic_t(atomic_t *p)
604{
605 __wake_up_bit(atomic_t_waitqueue(p), p, WAIT_ATOMIC_T_BIT_NR);
606}
607EXPORT_SYMBOL(wake_up_atomic_t);
608
609__sched int bit_wait(struct wait_bit_key *word, int mode)
610{
611 schedule();
612 if (signal_pending_state(mode, current))
613 return -EINTR;
614 return 0;
615}
616EXPORT_SYMBOL(bit_wait);
617
618__sched int bit_wait_io(struct wait_bit_key *word, int mode)
619{
620 io_schedule();
621 if (signal_pending_state(mode, current))
622 return -EINTR;
623 return 0;
624}
625EXPORT_SYMBOL(bit_wait_io);
626
627__sched int bit_wait_timeout(struct wait_bit_key *word, int mode)
628{
629 unsigned long now = READ_ONCE(jiffies);
630 if (time_after_eq(now, word->timeout))
631 return -EAGAIN;
632 schedule_timeout(word->timeout - now);
633 if (signal_pending_state(mode, current))
634 return -EINTR;
635 return 0;
636}
637EXPORT_SYMBOL_GPL(bit_wait_timeout);
638
639__sched int bit_wait_io_timeout(struct wait_bit_key *word, int mode)
640{
641 unsigned long now = READ_ONCE(jiffies);
642 if (time_after_eq(now, word->timeout))
643 return -EAGAIN;
644 io_schedule_timeout(word->timeout - now);
645 if (signal_pending_state(mode, current))
646 return -EINTR;
647 return 0;
648}
649EXPORT_SYMBOL_GPL(bit_wait_io_timeout);
diff --git a/kernel/sched/wait_bit.c b/kernel/sched/wait_bit.c
new file mode 100644
index 000000000000..463bac84dfd1
--- /dev/null
+++ b/kernel/sched/wait_bit.c
@@ -0,0 +1,263 @@
1/*
2 * The implementation of the wait_bit*() and related waiting APIs:
3 */
4#include <linux/wait_bit.h>
5#include <linux/sched/signal.h>
6#include <linux/sched/debug.h>
7
8int wake_bit_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *arg)
9{
10 struct wait_bit_key *key = arg;
11 struct wait_bit_queue_entry *wait_bit = container_of(wq_entry, struct wait_bit_queue_entry, wq_entry);
12
13 if (wait_bit->key.flags != key->flags ||
14 wait_bit->key.bit_nr != key->bit_nr ||
15 test_bit(key->bit_nr, key->flags))
16 return 0;
17 else
18 return autoremove_wake_function(wq_entry, mode, sync, key);
19}
20EXPORT_SYMBOL(wake_bit_function);
21
22/*
23 * To allow interruptible waiting and asynchronous (i.e. nonblocking)
24 * waiting, the actions of __wait_on_bit() and __wait_on_bit_lock() are
25 * permitted return codes. Nonzero return codes halt waiting and return.
26 */
27int __sched
28__wait_on_bit(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry,
29 wait_bit_action_f *action, unsigned mode)
30{
31 int ret = 0;
32
33 do {
34 prepare_to_wait(wq_head, &wbq_entry->wq_entry, mode);
35 if (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags))
36 ret = (*action)(&wbq_entry->key, mode);
37 } while (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags) && !ret);
38 finish_wait(wq_head, &wbq_entry->wq_entry);
39 return ret;
40}
41EXPORT_SYMBOL(__wait_on_bit);
42
43int __sched out_of_line_wait_on_bit(void *word, int bit,
44 wait_bit_action_f *action, unsigned mode)
45{
46 struct wait_queue_head *wq_head = bit_waitqueue(word, bit);
47 DEFINE_WAIT_BIT(wq_entry, word, bit);
48
49 return __wait_on_bit(wq_head, &wq_entry, action, mode);
50}
51EXPORT_SYMBOL(out_of_line_wait_on_bit);
52
53int __sched out_of_line_wait_on_bit_timeout(
54 void *word, int bit, wait_bit_action_f *action,
55 unsigned mode, unsigned long timeout)
56{
57 struct wait_queue_head *wq_head = bit_waitqueue(word, bit);
58 DEFINE_WAIT_BIT(wq_entry, word, bit);
59
60 wq_entry.key.timeout = jiffies + timeout;
61 return __wait_on_bit(wq_head, &wq_entry, action, mode);
62}
63EXPORT_SYMBOL_GPL(out_of_line_wait_on_bit_timeout);
64
65int __sched
66__wait_on_bit_lock(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry,
67 wait_bit_action_f *action, unsigned mode)
68{
69 int ret = 0;
70
71 for (;;) {
72 prepare_to_wait_exclusive(wq_head, &wbq_entry->wq_entry, mode);
73 if (test_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags)) {
74 ret = action(&wbq_entry->key, mode);
75 /*
76 * See the comment in prepare_to_wait_event().
77 * finish_wait() does not necessarily takes wwq_head->lock,
78 * but test_and_set_bit() implies mb() which pairs with
79 * smp_mb__after_atomic() before wake_up_page().
80 */
81 if (ret)
82 finish_wait(wq_head, &wbq_entry->wq_entry);
83 }
84 if (!test_and_set_bit(wbq_entry->key.bit_nr, wbq_entry->key.flags)) {
85 if (!ret)
86 finish_wait(wq_head, &wbq_entry->wq_entry);
87 return 0;
88 } else if (ret) {
89 return ret;
90 }
91 }
92}
93EXPORT_SYMBOL(__wait_on_bit_lock);
94
95int __sched out_of_line_wait_on_bit_lock(void *word, int bit,
96 wait_bit_action_f *action, unsigned mode)
97{
98 struct wait_queue_head *wq_head = bit_waitqueue(word, bit);
99 DEFINE_WAIT_BIT(wq_entry, word, bit);
100
101 return __wait_on_bit_lock(wq_head, &wq_entry, action, mode);
102}
103EXPORT_SYMBOL(out_of_line_wait_on_bit_lock);
104
105void __wake_up_bit(struct wait_queue_head *wq_head, void *word, int bit)
106{
107 struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
108 if (waitqueue_active(wq_head))
109 __wake_up(wq_head, TASK_NORMAL, 1, &key);
110}
111EXPORT_SYMBOL(__wake_up_bit);
112
113/**
114 * wake_up_bit - wake up a waiter on a bit
115 * @word: the word being waited on, a kernel virtual address
116 * @bit: the bit of the word being waited on
117 *
118 * There is a standard hashed waitqueue table for generic use. This
119 * is the part of the hashtable's accessor API that wakes up waiters
120 * on a bit. For instance, if one were to have waiters on a bitflag,
121 * one would call wake_up_bit() after clearing the bit.
122 *
123 * In order for this to function properly, as it uses waitqueue_active()
124 * internally, some kind of memory barrier must be done prior to calling
125 * this. Typically, this will be smp_mb__after_atomic(), but in some
126 * cases where bitflags are manipulated non-atomically under a lock, one
127 * may need to use a less regular barrier, such fs/inode.c's smp_mb(),
128 * because spin_unlock() does not guarantee a memory barrier.
129 */
130void wake_up_bit(void *word, int bit)
131{
132 __wake_up_bit(bit_waitqueue(word, bit), word, bit);
133}
134EXPORT_SYMBOL(wake_up_bit);
135
136/*
137 * Manipulate the atomic_t address to produce a better bit waitqueue table hash
138 * index (we're keying off bit -1, but that would produce a horrible hash
139 * value).
140 */
141static inline wait_queue_head_t *atomic_t_waitqueue(atomic_t *p)
142{
143 if (BITS_PER_LONG == 64) {
144 unsigned long q = (unsigned long)p;
145 return bit_waitqueue((void *)(q & ~1), q & 1);
146 }
147 return bit_waitqueue(p, 0);
148}
149
150static int wake_atomic_t_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync,
151 void *arg)
152{
153 struct wait_bit_key *key = arg;
154 struct wait_bit_queue_entry *wait_bit = container_of(wq_entry, struct wait_bit_queue_entry, wq_entry);
155 atomic_t *val = key->flags;
156
157 if (wait_bit->key.flags != key->flags ||
158 wait_bit->key.bit_nr != key->bit_nr ||
159 atomic_read(val) != 0)
160 return 0;
161 return autoremove_wake_function(wq_entry, mode, sync, key);
162}
163
164/*
165 * To allow interruptible waiting and asynchronous (i.e. nonblocking) waiting,
166 * the actions of __wait_on_atomic_t() are permitted return codes. Nonzero
167 * return codes halt waiting and return.
168 */
169static __sched
170int __wait_on_atomic_t(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry,
171 int (*action)(atomic_t *), unsigned mode)
172{
173 atomic_t *val;
174 int ret = 0;
175
176 do {
177 prepare_to_wait(wq_head, &wbq_entry->wq_entry, mode);
178 val = wbq_entry->key.flags;
179 if (atomic_read(val) == 0)
180 break;
181 ret = (*action)(val);
182 } while (!ret && atomic_read(val) != 0);
183 finish_wait(wq_head, &wbq_entry->wq_entry);
184 return ret;
185}
186
187#define DEFINE_WAIT_ATOMIC_T(name, p) \
188 struct wait_bit_queue_entry name = { \
189 .key = __WAIT_ATOMIC_T_KEY_INITIALIZER(p), \
190 .wq_entry = { \
191 .private = current, \
192 .func = wake_atomic_t_function, \
193 .task_list = \
194 LIST_HEAD_INIT((name).wq_entry.task_list), \
195 }, \
196 }
197
198__sched int out_of_line_wait_on_atomic_t(atomic_t *p, int (*action)(atomic_t *),
199 unsigned mode)
200{
201 struct wait_queue_head *wq_head = atomic_t_waitqueue(p);
202 DEFINE_WAIT_ATOMIC_T(wq_entry, p);
203
204 return __wait_on_atomic_t(wq_head, &wq_entry, action, mode);
205}
206EXPORT_SYMBOL(out_of_line_wait_on_atomic_t);
207
208/**
209 * wake_up_atomic_t - Wake up a waiter on a atomic_t
210 * @p: The atomic_t being waited on, a kernel virtual address
211 *
212 * Wake up anyone waiting for the atomic_t to go to zero.
213 *
214 * Abuse the bit-waker function and its waitqueue hash table set (the atomic_t
215 * check is done by the waiter's wake function, not the by the waker itself).
216 */
217void wake_up_atomic_t(atomic_t *p)
218{
219 __wake_up_bit(atomic_t_waitqueue(p), p, WAIT_ATOMIC_T_BIT_NR);
220}
221EXPORT_SYMBOL(wake_up_atomic_t);
222
223__sched int bit_wait(struct wait_bit_key *word, int mode)
224{
225 schedule();
226 if (signal_pending_state(mode, current))
227 return -EINTR;
228 return 0;
229}
230EXPORT_SYMBOL(bit_wait);
231
232__sched int bit_wait_io(struct wait_bit_key *word, int mode)
233{
234 io_schedule();
235 if (signal_pending_state(mode, current))
236 return -EINTR;
237 return 0;
238}
239EXPORT_SYMBOL(bit_wait_io);
240
241__sched int bit_wait_timeout(struct wait_bit_key *word, int mode)
242{
243 unsigned long now = READ_ONCE(jiffies);
244 if (time_after_eq(now, word->timeout))
245 return -EAGAIN;
246 schedule_timeout(word->timeout - now);
247 if (signal_pending_state(mode, current))
248 return -EINTR;
249 return 0;
250}
251EXPORT_SYMBOL_GPL(bit_wait_timeout);
252
253__sched int bit_wait_io_timeout(struct wait_bit_key *word, int mode)
254{
255 unsigned long now = READ_ONCE(jiffies);
256 if (time_after_eq(now, word->timeout))
257 return -EAGAIN;
258 io_schedule_timeout(word->timeout - now);
259 if (signal_pending_state(mode, current))
260 return -EINTR;
261 return 0;
262}
263EXPORT_SYMBOL_GPL(bit_wait_io_timeout);
diff --git a/security/keys/internal.h b/security/keys/internal.h
index c0f8682eba69..91bc6214ae57 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -13,6 +13,7 @@
13#define _INTERNAL_H 13#define _INTERNAL_H
14 14
15#include <linux/sched.h> 15#include <linux/sched.h>
16#include <linux/wait_bit.h>
16#include <linux/cred.h> 17#include <linux/cred.h>
17#include <linux/key-type.h> 18#include <linux/key-type.h>
18#include <linux/task_work.h> 19#include <linux/task_work.h>