aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/wait.c
blob: 444ddbfaefc490839e2cf913cb688c7ebb9f0df9 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
/*
 * Copyright (c) 2000-2001 Christoph Hellwig.
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions, and the following disclaimer,
 *    without modification.
 * 2. The name of the author may not be used to endorse or promote products
 *    derived from this software without specific prior written permission.
 *
 * Alternatively, this software may be distributed under the terms of the
 * GNU General Public License ("GPL").
 *
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
 * SUCH DAMAGE.
 *
 */
#ifndef _VXFS_OLT_H_
#define _VXFS_OLT_H_

/*
 * Veritas filesystem driver - Object Location Table data structures.
 *
 * This file contains definitions for the Object Location Table used
 * by the Veritas Filesystem version 2 and newer.
 */


/*
 * OLT magic number (vxfs_olt->olt_magic).
 */
#define VXFS_OLT_MAGIC		0xa504FCF5

/*
 * VxFS OLT entry types.
 */
enum {
	VXFS_OLT_FREE	= 1,
	VXFS_OLT_FSHEAD	= 2,
	VXFS_OLT_CUT	= 3,
	VXFS_OLT_ILIST	= 4,
	VXFS_OLT_DEV	= 5,
	VXFS_OLT_SB	= 6
};

/*
 * VxFS OLT header.
 *
 * The Object Location Table header is placed at the beginning of each
 * OLT extent.  It is used to fing certain filesystem-wide metadata, e.g.
 * the initial inode list, the fileset header or the device configuration.
 */
struct vxfs_olt {
	u_int32_t	olt_magic;	/* magic number			*/
	u_int32_t	olt_size;	/* size of this entry		*/
	u_int32_t	olt_checksum;	/* checksum of extent		*/
	u_int32_t	__unused1;	/* ???				*/
	u_int32_t	olt_mtime;	/* time of last mod. (sec)	*/
	u_int32_t	olt_mutime;	/* time of last mod. (usec)	*/
	u_int32_t	olt_totfree;	/* free space in OLT extent	*/
	vx_daddr_t	olt_extents[2];	/* addr of this extent, replica	*/
	u_int32_t	olt_esize;	/* size of this extent		*/
	vx_daddr_t	olt_next[2];    /* addr of next extent, replica	*/
	u_int32_t	olt_nsize;	/* size of next extent		*/
	u_int32_t	__unused2;	/* align to 8 byte boundary	*/
};

/*
 * VxFS common OLT entry (on disk).
 */
struct vxfs_oltcommon {
	u_int32_t	olt_type;	/* type of this record		*/
	u_int32_t	olt_size;	/* size of this record		*/
};

/*
 * VxFS free OLT entry (on disk).
 */
struct vxfs_oltfree {
	u_int32_t	olt_type;	/* type of this record		*/
	u_int32_t	olt_fsize;	/* size of this free record	*/
};

/*
 * VxFS initial-inode list (on disk).
 */
struct vxfs_oltilist {
	u_int32_t	olt_type;	/* type of this record		*/
	u_int32_t	olt_size;	/* size of this record		*/
	vx_ino_t	olt_iext[2];	/* initial inode list, replica	*/
};

/*
 * Current Usage Table 
 */
struct vxfs_oltcut {
	u_int32_t	olt_type;	/* type of this record		*/
	u_int32_t	olt_size;	/* size of this record		*/
	vx_ino_t	olt_cutino;	/* inode of current usage table	*/
	u_int32_t	__pad;		/* unused, 8 byte align		*/
};

/*
 * Inodes containing Superblock, Intent log and OLTs 
 */
struct vxfs_oltsb {
	u_int32_t	olt_type;	/* type of this record		*/
	u_int32_t	olt_size;	/* size of this record		*/
	vx_ino_t	olt_sbino;	/* inode of superblock file	*/
	u_int32_t	__unused1;	/* ???				*/
	vx_ino_t	olt_logino[2];	/* inode of log file,replica	*/
	vx_ino_t	olt_oltino[2];	/* inode of OLT, replica	*/
};

/*
 * Inode containing device configuration + it's replica 
 */
struct vxfs_oltdev {
	u_int32_t	olt_type;	/* type of this record		*/
	u_int32_t	olt_size;	/* size of this record		*/
	vx_ino_t	olt_devino[2];	/* inode of device config files	*/
};

/*
 * Fileset header 
 */
struct vxfs_oltfshead {
	u_int32_t	olt_type;	/* type number			*/
	u_int32_t	olt_size;	/* size of this record		*/
	vx_ino_t	olt_fsino[2];   /* inodes of fileset header	*/
};

#endif /* _VXFS_OLT_H_ */
ck, flags); __add_wait_queue_tail(q, wait); spin_unlock_irqrestore(&q->lock, flags); } EXPORT_SYMBOL(add_wait_queue_exclusive); void fastcall remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) { unsigned long flags; spin_lock_irqsave(&q->lock, flags); __remove_wait_queue(q, wait); spin_unlock_irqrestore(&q->lock, flags); } EXPORT_SYMBOL(remove_wait_queue); /* * Note: we use "set_current_state()" _after_ the wait-queue add, * because we need a memory barrier there on SMP, so that any * wake-function that tests for the wait-queue being active * will be guaranteed to see waitqueue addition _or_ subsequent * tests in this thread will see the wakeup having taken place. * * The spin_unlock() itself is semi-permeable and only protects * one way (it only protects stuff inside the critical region and * stops them from bleeding out - it would still allow subsequent * loads to move into the critical region). */ void fastcall prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state) { unsigned long flags; wait->flags &= ~WQ_FLAG_EXCLUSIVE; spin_lock_irqsave(&q->lock, flags); if (list_empty(&wait->task_list)) __add_wait_queue(q, wait); /* * don't alter the task state if this is just going to * queue an async wait queue callback */ if (is_sync_wait(wait)) set_current_state(state); spin_unlock_irqrestore(&q->lock, flags); } EXPORT_SYMBOL(prepare_to_wait); void fastcall prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state) { unsigned long flags; wait->flags |= WQ_FLAG_EXCLUSIVE; spin_lock_irqsave(&q->lock, flags); if (list_empty(&wait->task_list)) __add_wait_queue_tail(q, wait); /* * don't alter the task state if this is just going to * queue an async wait queue callback */ if (is_sync_wait(wait)) set_current_state(state); spin_unlock_irqrestore(&q->lock, flags); } EXPORT_SYMBOL(prepare_to_wait_exclusive); void fastcall finish_wait(wait_queue_head_t *q, wait_queue_t *wait) { unsigned long flags; __set_current_state(TASK_RUNNING); /* * We can check for list emptiness outside the lock * IFF: * - we use the "careful" check that verifies both * the next and prev pointers, so that there cannot * be any half-pending updates in progress on other * CPU's that we haven't seen yet (and that might * still change the stack area. * and * - all other users take the lock (ie we can only * have _one_ other CPU that looks at or modifies * the list). */ if (!list_empty_careful(&wait->task_list)) { spin_lock_irqsave(&q->lock, flags); list_del_init(&wait->task_list); spin_unlock_irqrestore(&q->lock, flags); } } EXPORT_SYMBOL(finish_wait); int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key) { int ret = default_wake_function(wait, mode, sync, key); if (ret) list_del_init(&wait->task_list); return ret; } EXPORT_SYMBOL(autoremove_wake_function); int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *arg) { struct wait_bit_key *key = arg; struct wait_bit_queue *wait_bit = container_of(wait, struct wait_bit_queue, wait); if (wait_bit->key.flags != key->flags || wait_bit->key.bit_nr != key->bit_nr || test_bit(key->bit_nr, key->flags)) return 0; else return autoremove_wake_function(wait, mode, sync, key); } EXPORT_SYMBOL(wake_bit_function); /* * To allow interruptible waiting and asynchronous (i.e. nonblocking) * waiting, the actions of __wait_on_bit() and __wait_on_bit_lock() are * permitted return codes. Nonzero return codes halt waiting and return. */ int __sched fastcall __wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q, int (*action)(void *), unsigned mode) { int ret = 0; do { prepare_to_wait(wq, &q->wait, mode); if (test_bit(q->key.bit_nr, q->key.flags)) ret = (*action)(q->key.flags); } while (test_bit(q->key.bit_nr, q->key.flags) && !ret); finish_wait(wq, &q->wait); return ret; } EXPORT_SYMBOL(__wait_on_bit); int __sched fastcall out_of_line_wait_on_bit(void *word, int bit, int (*action)(void *), unsigned mode) { wait_queue_head_t *wq = bit_waitqueue(word, bit); DEFINE_WAIT_BIT(wait, word, bit); return __wait_on_bit(wq, &wait, action, mode); } EXPORT_SYMBOL(out_of_line_wait_on_bit); int __sched fastcall __wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q, int (*action)(void *), unsigned mode) { int ret = 0; do { prepare_to_wait_exclusive(wq, &q->wait, mode); if (test_bit(q->key.bit_nr, q->key.flags)) { if ((ret = (*action)(q->key.flags))) break; } } while (test_and_set_bit(q->key.bit_nr, q->key.flags)); finish_wait(wq, &q->wait); return ret; } EXPORT_SYMBOL(__wait_on_bit_lock); int __sched fastcall out_of_line_wait_on_bit_lock(void *word, int bit, int (*action)(void *), unsigned mode) { wait_queue_head_t *wq = bit_waitqueue(word, bit); DEFINE_WAIT_BIT(wait, word, bit); return __wait_on_bit_lock(wq, &wait, action, mode); } EXPORT_SYMBOL(out_of_line_wait_on_bit_lock); void fastcall __wake_up_bit(wait_queue_head_t *wq, void *word, int bit) { struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit); if (waitqueue_active(wq)) __wake_up(wq, TASK_INTERRUPTIBLE|TASK_UNINTERRUPTIBLE, 1, &key); } EXPORT_SYMBOL(__wake_up_bit); /** * wake_up_bit - wake up a waiter on a bit * @word: the word being waited on, a kernel virtual address * @bit: the bit of the word being waited on * * There is a standard hashed waitqueue table for generic use. This * is the part of the hashtable's accessor API that wakes up waiters * on a bit. For instance, if one were to have waiters on a bitflag, * one would call wake_up_bit() after clearing the bit. * * In order for this to function properly, as it uses waitqueue_active() * internally, some kind of memory barrier must be done prior to calling * this. Typically, this will be smp_mb__after_clear_bit(), but in some * cases where bitflags are manipulated non-atomically under a lock, one * may need to use a less regular barrier, such fs/inode.c's smp_mb(), * because spin_unlock() does not guarantee a memory barrier. */ void fastcall wake_up_bit(void *word, int bit) { __wake_up_bit(bit_waitqueue(word, bit), word, bit); } EXPORT_SYMBOL(wake_up_bit); fastcall wait_queue_head_t *bit_waitqueue(void *word, int bit) { const int shift = BITS_PER_LONG == 32 ? 5 : 6; const struct zone *zone = page_zone(virt_to_page(word)); unsigned long val = (unsigned long)word << shift | bit; return &zone->wait_table[hash_long(val, zone->wait_table_bits)]; } EXPORT_SYMBOL(bit_waitqueue);