summaryrefslogtreecommitdiffstats
path: root/kernel/wait.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/wait.c')
-rw-r--r--kernel/wait.c88
1 files changed, 88 insertions, 0 deletions
diff --git a/kernel/wait.c b/kernel/wait.c
index 6698e0c04ead..ce0daa320a26 100644
--- a/kernel/wait.c
+++ b/kernel/wait.c
@@ -287,3 +287,91 @@ wait_queue_head_t *bit_waitqueue(void *word, int bit)
287 return &zone->wait_table[hash_long(val, zone->wait_table_bits)]; 287 return &zone->wait_table[hash_long(val, zone->wait_table_bits)];
288} 288}
289EXPORT_SYMBOL(bit_waitqueue); 289EXPORT_SYMBOL(bit_waitqueue);
290
291/*
292 * Manipulate the atomic_t address to produce a better bit waitqueue table hash
293 * index (we're keying off bit -1, but that would produce a horrible hash
294 * value).
295 */
296static inline wait_queue_head_t *atomic_t_waitqueue(atomic_t *p)
297{
298 if (BITS_PER_LONG == 64) {
299 unsigned long q = (unsigned long)p;
300 return bit_waitqueue((void *)(q & ~1), q & 1);
301 }
302 return bit_waitqueue(p, 0);
303}
304
305static int wake_atomic_t_function(wait_queue_t *wait, unsigned mode, int sync,
306 void *arg)
307{
308 struct wait_bit_key *key = arg;
309 struct wait_bit_queue *wait_bit
310 = container_of(wait, struct wait_bit_queue, wait);
311 atomic_t *val = key->flags;
312
313 if (wait_bit->key.flags != key->flags ||
314 wait_bit->key.bit_nr != key->bit_nr ||
315 atomic_read(val) != 0)
316 return 0;
317 return autoremove_wake_function(wait, mode, sync, key);
318}
319
320/*
321 * To allow interruptible waiting and asynchronous (i.e. nonblocking) waiting,
322 * the actions of __wait_on_atomic_t() are permitted return codes. Nonzero
323 * return codes halt waiting and return.
324 */
325static __sched
326int __wait_on_atomic_t(wait_queue_head_t *wq, struct wait_bit_queue *q,
327 int (*action)(atomic_t *), unsigned mode)
328{
329 atomic_t *val;
330 int ret = 0;
331
332 do {
333 prepare_to_wait(wq, &q->wait, mode);
334 val = q->key.flags;
335 if (atomic_read(val) == 0)
336 ret = (*action)(val);
337 } while (!ret && atomic_read(val) != 0);
338 finish_wait(wq, &q->wait);
339 return ret;
340}
341
342#define DEFINE_WAIT_ATOMIC_T(name, p) \
343 struct wait_bit_queue name = { \
344 .key = __WAIT_ATOMIC_T_KEY_INITIALIZER(p), \
345 .wait = { \
346 .private = current, \
347 .func = wake_atomic_t_function, \
348 .task_list = \
349 LIST_HEAD_INIT((name).wait.task_list), \
350 }, \
351 }
352
353__sched int out_of_line_wait_on_atomic_t(atomic_t *p, int (*action)(atomic_t *),
354 unsigned mode)
355{
356 wait_queue_head_t *wq = atomic_t_waitqueue(p);
357 DEFINE_WAIT_ATOMIC_T(wait, p);
358
359 return __wait_on_atomic_t(wq, &wait, action, mode);
360}
361EXPORT_SYMBOL(out_of_line_wait_on_atomic_t);
362
363/**
364 * wake_up_atomic_t - Wake up a waiter on a atomic_t
365 * @word: The word being waited on, a kernel virtual address
366 * @bit: The bit of the word being waited on
367 *
368 * Wake up anyone waiting for the atomic_t to go to zero.
369 *
370 * Abuse the bit-waker function and its waitqueue hash table set (the atomic_t
371 * check is done by the waiter's wake function, not the by the waker itself).
372 */
373void wake_up_atomic_t(atomic_t *p)
374{
375 __wake_up_bit(atomic_t_waitqueue(p), p, WAIT_ATOMIC_T_BIT_NR);
376}
377EXPORT_SYMBOL(wake_up_atomic_t);