diff options
Diffstat (limited to 'kernel/wait.c')
-rw-r--r-- | kernel/wait.c | 246 |
1 files changed, 246 insertions, 0 deletions
diff --git a/kernel/wait.c b/kernel/wait.c new file mode 100644 index 000000000000..791681cfea98 --- /dev/null +++ b/kernel/wait.c | |||
@@ -0,0 +1,246 @@ | |||
1 | /* | ||
2 | * Generic waiting primitives. | ||
3 | * | ||
4 | * (C) 2004 William Irwin, Oracle | ||
5 | */ | ||
6 | #include <linux/config.h> | ||
7 | #include <linux/init.h> | ||
8 | #include <linux/module.h> | ||
9 | #include <linux/sched.h> | ||
10 | #include <linux/mm.h> | ||
11 | #include <linux/wait.h> | ||
12 | #include <linux/hash.h> | ||
13 | |||
14 | void fastcall add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) | ||
15 | { | ||
16 | unsigned long flags; | ||
17 | |||
18 | wait->flags &= ~WQ_FLAG_EXCLUSIVE; | ||
19 | spin_lock_irqsave(&q->lock, flags); | ||
20 | __add_wait_queue(q, wait); | ||
21 | spin_unlock_irqrestore(&q->lock, flags); | ||
22 | } | ||
23 | EXPORT_SYMBOL(add_wait_queue); | ||
24 | |||
25 | void fastcall add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait) | ||
26 | { | ||
27 | unsigned long flags; | ||
28 | |||
29 | wait->flags |= WQ_FLAG_EXCLUSIVE; | ||
30 | spin_lock_irqsave(&q->lock, flags); | ||
31 | __add_wait_queue_tail(q, wait); | ||
32 | spin_unlock_irqrestore(&q->lock, flags); | ||
33 | } | ||
34 | EXPORT_SYMBOL(add_wait_queue_exclusive); | ||
35 | |||
36 | void fastcall remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) | ||
37 | { | ||
38 | unsigned long flags; | ||
39 | |||
40 | spin_lock_irqsave(&q->lock, flags); | ||
41 | __remove_wait_queue(q, wait); | ||
42 | spin_unlock_irqrestore(&q->lock, flags); | ||
43 | } | ||
44 | EXPORT_SYMBOL(remove_wait_queue); | ||
45 | |||
46 | |||
47 | /* | ||
48 | * Note: we use "set_current_state()" _after_ the wait-queue add, | ||
49 | * because we need a memory barrier there on SMP, so that any | ||
50 | * wake-function that tests for the wait-queue being active | ||
51 | * will be guaranteed to see waitqueue addition _or_ subsequent | ||
52 | * tests in this thread will see the wakeup having taken place. | ||
53 | * | ||
54 | * The spin_unlock() itself is semi-permeable and only protects | ||
55 | * one way (it only protects stuff inside the critical region and | ||
56 | * stops them from bleeding out - it would still allow subsequent | ||
57 | * loads to move into the the critical region). | ||
58 | */ | ||
59 | void fastcall | ||
60 | prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state) | ||
61 | { | ||
62 | unsigned long flags; | ||
63 | |||
64 | wait->flags &= ~WQ_FLAG_EXCLUSIVE; | ||
65 | spin_lock_irqsave(&q->lock, flags); | ||
66 | if (list_empty(&wait->task_list)) | ||
67 | __add_wait_queue(q, wait); | ||
68 | /* | ||
69 | * don't alter the task state if this is just going to | ||
70 | * queue an async wait queue callback | ||
71 | */ | ||
72 | if (is_sync_wait(wait)) | ||
73 | set_current_state(state); | ||
74 | spin_unlock_irqrestore(&q->lock, flags); | ||
75 | } | ||
76 | EXPORT_SYMBOL(prepare_to_wait); | ||
77 | |||
78 | void fastcall | ||
79 | prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state) | ||
80 | { | ||
81 | unsigned long flags; | ||
82 | |||
83 | wait->flags |= WQ_FLAG_EXCLUSIVE; | ||
84 | spin_lock_irqsave(&q->lock, flags); | ||
85 | if (list_empty(&wait->task_list)) | ||
86 | __add_wait_queue_tail(q, wait); | ||
87 | /* | ||
88 | * don't alter the task state if this is just going to | ||
89 | * queue an async wait queue callback | ||
90 | */ | ||
91 | if (is_sync_wait(wait)) | ||
92 | set_current_state(state); | ||
93 | spin_unlock_irqrestore(&q->lock, flags); | ||
94 | } | ||
95 | EXPORT_SYMBOL(prepare_to_wait_exclusive); | ||
96 | |||
97 | void fastcall finish_wait(wait_queue_head_t *q, wait_queue_t *wait) | ||
98 | { | ||
99 | unsigned long flags; | ||
100 | |||
101 | __set_current_state(TASK_RUNNING); | ||
102 | /* | ||
103 | * We can check for list emptiness outside the lock | ||
104 | * IFF: | ||
105 | * - we use the "careful" check that verifies both | ||
106 | * the next and prev pointers, so that there cannot | ||
107 | * be any half-pending updates in progress on other | ||
108 | * CPU's that we haven't seen yet (and that might | ||
109 | * still change the stack area. | ||
110 | * and | ||
111 | * - all other users take the lock (ie we can only | ||
112 | * have _one_ other CPU that looks at or modifies | ||
113 | * the list). | ||
114 | */ | ||
115 | if (!list_empty_careful(&wait->task_list)) { | ||
116 | spin_lock_irqsave(&q->lock, flags); | ||
117 | list_del_init(&wait->task_list); | ||
118 | spin_unlock_irqrestore(&q->lock, flags); | ||
119 | } | ||
120 | } | ||
121 | EXPORT_SYMBOL(finish_wait); | ||
122 | |||
123 | int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key) | ||
124 | { | ||
125 | int ret = default_wake_function(wait, mode, sync, key); | ||
126 | |||
127 | if (ret) | ||
128 | list_del_init(&wait->task_list); | ||
129 | return ret; | ||
130 | } | ||
131 | EXPORT_SYMBOL(autoremove_wake_function); | ||
132 | |||
133 | int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *arg) | ||
134 | { | ||
135 | struct wait_bit_key *key = arg; | ||
136 | struct wait_bit_queue *wait_bit | ||
137 | = container_of(wait, struct wait_bit_queue, wait); | ||
138 | |||
139 | if (wait_bit->key.flags != key->flags || | ||
140 | wait_bit->key.bit_nr != key->bit_nr || | ||
141 | test_bit(key->bit_nr, key->flags)) | ||
142 | return 0; | ||
143 | else | ||
144 | return autoremove_wake_function(wait, mode, sync, key); | ||
145 | } | ||
146 | EXPORT_SYMBOL(wake_bit_function); | ||
147 | |||
148 | /* | ||
149 | * To allow interruptible waiting and asynchronous (i.e. nonblocking) | ||
150 | * waiting, the actions of __wait_on_bit() and __wait_on_bit_lock() are | ||
151 | * permitted return codes. Nonzero return codes halt waiting and return. | ||
152 | */ | ||
153 | int __sched fastcall | ||
154 | __wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q, | ||
155 | int (*action)(void *), unsigned mode) | ||
156 | { | ||
157 | int ret = 0; | ||
158 | |||
159 | do { | ||
160 | prepare_to_wait(wq, &q->wait, mode); | ||
161 | if (test_bit(q->key.bit_nr, q->key.flags)) | ||
162 | ret = (*action)(q->key.flags); | ||
163 | } while (test_bit(q->key.bit_nr, q->key.flags) && !ret); | ||
164 | finish_wait(wq, &q->wait); | ||
165 | return ret; | ||
166 | } | ||
167 | EXPORT_SYMBOL(__wait_on_bit); | ||
168 | |||
169 | int __sched fastcall out_of_line_wait_on_bit(void *word, int bit, | ||
170 | int (*action)(void *), unsigned mode) | ||
171 | { | ||
172 | wait_queue_head_t *wq = bit_waitqueue(word, bit); | ||
173 | DEFINE_WAIT_BIT(wait, word, bit); | ||
174 | |||
175 | return __wait_on_bit(wq, &wait, action, mode); | ||
176 | } | ||
177 | EXPORT_SYMBOL(out_of_line_wait_on_bit); | ||
178 | |||
179 | int __sched fastcall | ||
180 | __wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q, | ||
181 | int (*action)(void *), unsigned mode) | ||
182 | { | ||
183 | int ret = 0; | ||
184 | |||
185 | do { | ||
186 | prepare_to_wait_exclusive(wq, &q->wait, mode); | ||
187 | if (test_bit(q->key.bit_nr, q->key.flags)) { | ||
188 | if ((ret = (*action)(q->key.flags))) | ||
189 | break; | ||
190 | } | ||
191 | } while (test_and_set_bit(q->key.bit_nr, q->key.flags)); | ||
192 | finish_wait(wq, &q->wait); | ||
193 | return ret; | ||
194 | } | ||
195 | EXPORT_SYMBOL(__wait_on_bit_lock); | ||
196 | |||
197 | int __sched fastcall out_of_line_wait_on_bit_lock(void *word, int bit, | ||
198 | int (*action)(void *), unsigned mode) | ||
199 | { | ||
200 | wait_queue_head_t *wq = bit_waitqueue(word, bit); | ||
201 | DEFINE_WAIT_BIT(wait, word, bit); | ||
202 | |||
203 | return __wait_on_bit_lock(wq, &wait, action, mode); | ||
204 | } | ||
205 | EXPORT_SYMBOL(out_of_line_wait_on_bit_lock); | ||
206 | |||
207 | void fastcall __wake_up_bit(wait_queue_head_t *wq, void *word, int bit) | ||
208 | { | ||
209 | struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit); | ||
210 | if (waitqueue_active(wq)) | ||
211 | __wake_up(wq, TASK_INTERRUPTIBLE|TASK_UNINTERRUPTIBLE, 1, &key); | ||
212 | } | ||
213 | EXPORT_SYMBOL(__wake_up_bit); | ||
214 | |||
215 | /** | ||
216 | * wake_up_bit - wake up a waiter on a bit | ||
217 | * @word: the word being waited on, a kernel virtual address | ||
218 | * @bit: the bit of the word being waited on | ||
219 | * | ||
220 | * There is a standard hashed waitqueue table for generic use. This | ||
221 | * is the part of the hashtable's accessor API that wakes up waiters | ||
222 | * on a bit. For instance, if one were to have waiters on a bitflag, | ||
223 | * one would call wake_up_bit() after clearing the bit. | ||
224 | * | ||
225 | * In order for this to function properly, as it uses waitqueue_active() | ||
226 | * internally, some kind of memory barrier must be done prior to calling | ||
227 | * this. Typically, this will be smp_mb__after_clear_bit(), but in some | ||
228 | * cases where bitflags are manipulated non-atomically under a lock, one | ||
229 | * may need to use a less regular barrier, such fs/inode.c's smp_mb(), | ||
230 | * because spin_unlock() does not guarantee a memory barrier. | ||
231 | */ | ||
232 | void fastcall wake_up_bit(void *word, int bit) | ||
233 | { | ||
234 | __wake_up_bit(bit_waitqueue(word, bit), word, bit); | ||
235 | } | ||
236 | EXPORT_SYMBOL(wake_up_bit); | ||
237 | |||
238 | fastcall wait_queue_head_t *bit_waitqueue(void *word, int bit) | ||
239 | { | ||
240 | const int shift = BITS_PER_LONG == 32 ? 5 : 6; | ||
241 | const struct zone *zone = page_zone(virt_to_page(word)); | ||
242 | unsigned long val = (unsigned long)word << shift | bit; | ||
243 | |||
244 | return &zone->wait_table[hash_long(val, zone->wait_table_bits)]; | ||
245 | } | ||
246 | EXPORT_SYMBOL(bit_waitqueue); | ||