From 6aaccece1c483f189f76f1282b3984ff4c7ecb0a Mon Sep 17 00:00:00 2001 From: Michael Witten Date: Mon, 1 Nov 2010 17:50:12 +0000 Subject: Kconfig: typo: and -> an Signed-off-by: Michael Witten Signed-off-by: Jiri Kosina --- fs/notify/fanotify/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/notify/fanotify/Kconfig b/fs/notify/fanotify/Kconfig index 3ac36b7bf6b9..7dceff005a67 100644 --- a/fs/notify/fanotify/Kconfig +++ b/fs/notify/fanotify/Kconfig @@ -6,7 +6,7 @@ config FANOTIFY ---help--- Say Y here to enable fanotify suport. fanotify is a file access notification system which differs from inotify in that it sends - and open file descriptor to the userspace listener along with + an open file descriptor to the userspace listener along with the event. If unsure, say Y. -- cgit v1.2.2 From b595076a180a56d1bb170e6eceda6eb9d76f4cd3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Mon, 1 Nov 2010 15:38:34 -0400 Subject: tree-wide: fix comment/printk typos MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit "gadget", "through", "command", "maintain", "maintain", "controller", "address", "between", "initiali[zs]e", "instead", "function", "select", "already", "equal", "access", "management", "hierarchy", "registration", "interest", "relative", "memory", "offset", "already", Signed-off-by: Uwe Kleine-König Signed-off-by: Jiri Kosina --- fs/ext4/ext4.h | 2 +- fs/ext4/extents.c | 4 ++-- fs/ext4/inode.c | 4 ++-- fs/ocfs2/inode.c | 2 +- fs/ocfs2/suballoc.c | 2 +- fs/xfs/linux-2.6/xfs_super.c | 2 +- 6 files changed, 8 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 8b5dd6369f82..47162de0b957 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -577,7 +577,7 @@ struct ext4_mount_options { #endif }; -/* Max physical block we can addres w/o extents */ +/* Max physical block we can address w/o extents */ #define EXT4_MAX_BLOCK_FILE_PHYS 0xFFFFFFFF /* diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 0554c48cb1fd..966ecb0d8f86 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -2825,14 +2825,14 @@ fix_extent_len: * to an uninitialized extent. * * Writing to an uninitized extent may result in splitting the uninitialized - * extent into multiple /intialized unintialized extents (up to three) + * extent into multiple /initialized uninitialized extents (up to three) * There are three possibilities: * a> There is no split required: Entire extent should be uninitialized * b> Splits in two extents: Write is happening at either end of the extent * c> Splits in three extents: Somone is writing in middle of the extent * * One of more index blocks maybe needed if the extent tree grow after - * the unintialized extent split. To prevent ENOSPC occur at the IO + * the uninitialized extent split. To prevent ENOSPC occur at the IO * complete, we need to split the uninitialized extent before DIO submit * the IO. The uninitialized extent called at this time will be split * into three uninitialized extent(at most). After IO complete, the part diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 191616470466..4bc84b8adb7f 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -3740,9 +3740,9 @@ retry: * preallocated extents, and those write extend the file, no need to * fall back to buffered IO. * - * For holes, we fallocate those blocks, mark them as unintialized + * For holes, we fallocate those blocks, mark them as uninitialized * If those blocks were preallocated, we mark sure they are splited, but - * still keep the range to write as unintialized. + * still keep the range to write as uninitialized. * * The unwrritten extents will be converted to written when DIO is completed. * For async direct IO, since the IO may still pending when return, we diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c index f935fd6600dd..4068c6c4c6f6 100644 --- a/fs/ocfs2/inode.c +++ b/fs/ocfs2/inode.c @@ -434,7 +434,7 @@ static int ocfs2_read_locked_inode(struct inode *inode, * #1 and #2 can be simply solved by never taking the lock * here for system files (which are the only type we read * during mount). It's a heavier approach, but our main - * concern is user-accesible files anyway. + * concern is user-accessible files anyway. * * #3 works itself out because we'll eventually take the * cluster lock before trusting anything anyway. diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c index 5fed60de7630..71998d4d61d5 100644 --- a/fs/ocfs2/suballoc.c +++ b/fs/ocfs2/suballoc.c @@ -1916,7 +1916,7 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac, if (res->sr_bg_blkno) { /* Attempt to short-circuit the usual search mechanism * by jumping straight to the most recently used - * allocation group. This helps us mantain some + * allocation group. This helps us maintain some * contiguousness across allocations. */ status = ocfs2_search_one_group(ac, handle, bits_wanted, min_bits, res, &bits_left); diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 9f3a78fe6ae4..7465a7ffc4fd 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -938,7 +938,7 @@ out_reclaim: * Slab object creation initialisation for the XFS inode. * This covers only the idempotent fields in the XFS inode; * all other fields need to be initialised on allocation - * from the slab. This avoids the need to repeatedly intialise + * from the slab. This avoids the need to repeatedly initialise * fields in the xfs inode that left in the initialise state * when freeing the inode. */ -- cgit v1.2.2 From 34db1d595ef6f183fbc1e42cda45a3dfa0035258 Mon Sep 17 00:00:00 2001 From: Kay Sievers Date: Thu, 11 Nov 2010 09:58:57 +0100 Subject: block: export 'ro' sysfs attribute for partitions We already export 'ro' for the disk. This adds the same attribute for partitions. Cc: Karel Zak Signed-off-by: Kay Sievers Signed-off-by: Jens Axboe --- fs/partitions/check.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'fs') diff --git a/fs/partitions/check.c b/fs/partitions/check.c index 79fbf3f390f0..861ae84fcee5 100644 --- a/fs/partitions/check.c +++ b/fs/partitions/check.c @@ -237,6 +237,13 @@ ssize_t part_size_show(struct device *dev, return sprintf(buf, "%llu\n",(unsigned long long)p->nr_sects); } +ssize_t part_ro_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hd_struct *p = dev_to_part(dev); + return sprintf(buf, "%d\n", p->policy ? 1 : 0); +} + ssize_t part_alignment_offset_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -312,6 +319,7 @@ ssize_t part_fail_store(struct device *dev, static DEVICE_ATTR(partition, S_IRUGO, part_partition_show, NULL); static DEVICE_ATTR(start, S_IRUGO, part_start_show, NULL); static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL); +static DEVICE_ATTR(ro, S_IRUGO, part_ro_show, NULL); static DEVICE_ATTR(alignment_offset, S_IRUGO, part_alignment_offset_show, NULL); static DEVICE_ATTR(discard_alignment, S_IRUGO, part_discard_alignment_show, NULL); @@ -326,6 +334,7 @@ static struct attribute *part_attrs[] = { &dev_attr_partition.attr, &dev_attr_start.attr, &dev_attr_size.attr, + &dev_attr_ro.attr, &dev_attr_alignment_offset.attr, &dev_attr_discard_alignment.attr, &dev_attr_stat.attr, -- cgit v1.2.2 From b36930dd508e00f0c5083bcd57d25de6d0375c76 Mon Sep 17 00:00:00 2001 From: David Miller Date: Wed, 10 Nov 2010 21:56:39 -0800 Subject: dlm: Handle application limited situations properly. In the normal regime where an application uses non-blocking I/O writes on a socket, they will handle -EAGAIN and use poll() to wait for send space. They don't actually sleep on the socket I/O write. But kernel level RPC layers that do socket I/O operations directly and key off of -EAGAIN on the write() to "try again later" don't use poll(), they instead have their own sleeping mechanism and rely upon ->sk_write_space() to trigger the wakeup. So they do effectively sleep on the write(), but this mechanism alone does not let the socket layers know what's going on. Therefore they must emulate what would have happened, otherwise TCP cannot possibly see that the connection is application window size limited. Handle this, therefore, like SUNRPC by setting SOCK_NOSPACE and bumping the ->sk_write_count as needed when we hit the send buffer limits. This should make TCP send buffer size auto-tuning and the ->sk_write_space() callback invocations actually happen. Signed-off-by: David S. Miller Signed-off-by: David Teigland --- fs/dlm/lowcomms.c | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c index 37a34c2c622a..77720f89c879 100644 --- a/fs/dlm/lowcomms.c +++ b/fs/dlm/lowcomms.c @@ -108,6 +108,7 @@ struct connection { #define CF_INIT_PENDING 4 #define CF_IS_OTHERCON 5 #define CF_CLOSE 6 +#define CF_APP_LIMITED 7 struct list_head writequeue; /* List of outgoing writequeue_entries */ spinlock_t writequeue_lock; int (*rx_action) (struct connection *); /* What to do when active */ @@ -295,7 +296,17 @@ static void lowcomms_write_space(struct sock *sk) { struct connection *con = sock2con(sk); - if (con && !test_and_set_bit(CF_WRITE_PENDING, &con->flags)) + if (!con) + return; + + clear_bit(SOCK_NOSPACE, &con->sock->flags); + + if (test_and_clear_bit(CF_APP_LIMITED, &con->flags)) { + con->sock->sk->sk_write_pending--; + clear_bit(SOCK_ASYNC_NOSPACE, &con->sock->flags); + } + + if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags)) queue_work(send_workqueue, &con->swork); } @@ -1319,6 +1330,15 @@ static void send_to_sock(struct connection *con) ret = kernel_sendpage(con->sock, e->page, offset, len, msg_flags); if (ret == -EAGAIN || ret == 0) { + if (ret == -EAGAIN && + test_bit(SOCK_ASYNC_NOSPACE, &con->sock->flags) && + !test_and_set_bit(CF_APP_LIMITED, &con->flags)) { + /* Notify TCP that we're limited by the + * application window size. + */ + set_bit(SOCK_NOSPACE, &con->sock->flags); + con->sock->sk->sk_write_pending++; + } cond_resched(); goto out; } -- cgit v1.2.2 From dcce240ead802d42b1e45ad2fcb2ed4a399cb255 Mon Sep 17 00:00:00 2001 From: Steven Whitehouse Date: Fri, 12 Nov 2010 12:12:29 +0000 Subject: dlm: Use cmwq for send and receive workqueues So far as I can tell, there is no reason to use a single-threaded send workqueue for dlm, since it may need to send to several sockets concurrently. Both workqueues are set to WQ_MEM_RECLAIM to avoid any possible deadlocks, WQ_HIGHPRI since locking traffic is highly latency sensitive (and to avoid a priority inversion wrt GFS2's glock_workqueue) and WQ_FREEZABLE just in case someone needs to do that (even though with current cluster infrastructure, it doesn't make sense as the node will most likely land up ejected from the cluster) in the future. Signed-off-by: Steven Whitehouse Cc: Tejun Heo Signed-off-by: David Teigland --- fs/dlm/lowcomms.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c index 77720f89c879..1d4e644c6589 100644 --- a/fs/dlm/lowcomms.c +++ b/fs/dlm/lowcomms.c @@ -1451,14 +1451,16 @@ static void work_stop(void) static int work_start(void) { int error; - recv_workqueue = create_workqueue("dlm_recv"); + recv_workqueue = alloc_workqueue("dlm_recv", WQ_MEM_RECLAIM | + WQ_HIGHPRI | WQ_FREEZEABLE, 0); error = IS_ERR(recv_workqueue); if (error) { log_print("can't start dlm_recv %d", error); return error; } - send_workqueue = create_singlethread_workqueue("dlm_send"); + send_workqueue = alloc_workqueue("dlm_send", WQ_MEM_RECLAIM | + WQ_HIGHPRI | WQ_FREEZEABLE, 0); error = IS_ERR(send_workqueue); if (error) { log_print("can't start dlm_send %d", error); -- cgit v1.2.2 From cb2d45da81c86d5191b19d0f67732a854bc0253c Mon Sep 17 00:00:00 2001 From: David Teigland Date: Fri, 12 Nov 2010 11:12:55 -0600 Subject: dlm: use TCP_NODELAY Nagling doesn't help and can sometimes hurt dlm comms. Signed-off-by: David Teigland --- fs/dlm/lowcomms.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'fs') diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c index 1d4e644c6589..2bedb0ac5f92 100644 --- a/fs/dlm/lowcomms.c +++ b/fs/dlm/lowcomms.c @@ -926,6 +926,7 @@ static void tcp_connect_to_sock(struct connection *con) struct sockaddr_storage saddr, src_addr; int addr_len; struct socket *sock = NULL; + int one = 1; if (con->nodeid == 0) { log_print("attempt to connect sock 0 foiled"); @@ -971,6 +972,11 @@ static void tcp_connect_to_sock(struct connection *con) make_sockaddr(&saddr, dlm_config.ci_tcp_port, &addr_len); log_print("connecting to %d", con->nodeid); + + /* Turn off Nagle's algorithm */ + kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, (char *)&one, + sizeof(one)); + result = sock->ops->connect(sock, (struct sockaddr *)&saddr, addr_len, O_NONBLOCK); @@ -1022,6 +1028,10 @@ static struct socket *tcp_create_listen_sock(struct connection *con, goto create_out; } + /* Turn off Nagle's algorithm */ + kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, (char *)&one, + sizeof(one)); + result = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, (char *)&one, sizeof(one)); -- cgit v1.2.2 From f92c8dd7a0eb18124521e2b549f88422e17f707b Mon Sep 17 00:00:00 2001 From: Bob Peterson Date: Fri, 12 Nov 2010 11:15:20 -0600 Subject: dlm: reduce cond_resched during send Calling cond_resched() after every send can unnecessarily degrade performance. Go back to an old method of scheduling after 25 messages. Signed-off-by: Bob Peterson Signed-off-by: David Teigland --- fs/dlm/lowcomms.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c index 2bedb0ac5f92..0e75f152eac2 100644 --- a/fs/dlm/lowcomms.c +++ b/fs/dlm/lowcomms.c @@ -63,6 +63,9 @@ #define NEEDED_RMEM (4*1024*1024) #define CONN_HASH_SIZE 32 +/* Number of messages to send before rescheduling */ +#define MAX_SEND_MSG_COUNT 25 + struct cbuf { unsigned int base; unsigned int len; @@ -1318,6 +1321,7 @@ static void send_to_sock(struct connection *con) const int msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL; struct writequeue_entry *e; int len, offset; + int count = 0; mutex_lock(&con->sock_mutex); if (con->sock == NULL) @@ -1355,8 +1359,12 @@ static void send_to_sock(struct connection *con) if (ret <= 0) goto send_error; } - /* Don't starve people filling buffers */ + + /* Don't starve people filling buffers */ + if (++count >= MAX_SEND_MSG_COUNT) { cond_resched(); + count = 0; + } spin_lock(&con->writequeue_lock); e->offset += ret; -- cgit v1.2.2 From 37004c42f7240035bc2726c340c4efa726b4818e Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sat, 13 Nov 2010 11:55:17 +0100 Subject: btrfs: close_bdev_exclusive() should use the same @flags as the matching open_bdev_exclusive() In the failure path of __btrfs_open_devices(), close_bdev_exclusive() is called with @flags which doesn't match the one used during open_bdev_exclusive(). Fix it. Signed-off-by: Tejun Heo Cc: Chris Mason --- fs/btrfs/volumes.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index cc04dc1445d6..d39596224d21 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -638,7 +638,7 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices, error_brelse: brelse(bh); error_close: - close_bdev_exclusive(bdev, FMODE_READ); + close_bdev_exclusive(bdev, flags); error: continue; } -- cgit v1.2.2 From e09b457bdb7e8d23fc54dcef0930ac697d8de895 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sat, 13 Nov 2010 11:55:17 +0100 Subject: block: simplify holder symlink handling Code to manage symlinks in /sys/block/*/{holders|slaves} are overly complex with multiple holder considerations, redundant extra references to all involved kobjects, unused generic kobject holder support and unnecessary mixup with bd_claim/release functionalities. Strip it down to what's necessary (single gendisk holder) and make it use a separate interface. This is a step for cleaning up bd_claim/release. This patch makes dm-table slightly more complex but it will be simplified again with further changes. Signed-off-by: Tejun Heo Acked-by: Neil Brown Acked-by: Mike Snitzer Cc: dm-devel@redhat.com --- fs/block_dev.c | 322 ++++++++------------------------------------------------- 1 file changed, 44 insertions(+), 278 deletions(-) (limited to 'fs') diff --git a/fs/block_dev.c b/fs/block_dev.c index 06e8ff12b97c..9329068684d2 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -426,9 +426,6 @@ static void init_once(void *foo) mutex_init(&bdev->bd_mutex); INIT_LIST_HEAD(&bdev->bd_inodes); INIT_LIST_HEAD(&bdev->bd_list); -#ifdef CONFIG_SYSFS - INIT_LIST_HEAD(&bdev->bd_holder_list); -#endif inode_init_once(&ei->vfs_inode); /* Initialize mutex for freeze. */ mutex_init(&bdev->bd_fsfreeze_mutex); @@ -881,314 +878,83 @@ void bd_release(struct block_device *bdev) EXPORT_SYMBOL(bd_release); #ifdef CONFIG_SYSFS -/* - * Functions for bd_claim_by_kobject / bd_release_from_kobject - * - * If a kobject is passed to bd_claim_by_kobject() - * and the kobject has a parent directory, - * following symlinks are created: - * o from the kobject to the claimed bdev - * o from "holders" directory of the bdev to the parent of the kobject - * bd_release_from_kobject() removes these symlinks. - * - * Example: - * If /dev/dm-0 maps to /dev/sda, kobject corresponding to - * /sys/block/dm-0/slaves is passed to bd_claim_by_kobject(), then: - * /sys/block/dm-0/slaves/sda --> /sys/block/sda - * /sys/block/sda/holders/dm-0 --> /sys/block/dm-0 - */ - static int add_symlink(struct kobject *from, struct kobject *to) { - if (!from || !to) - return 0; return sysfs_create_link(from, to, kobject_name(to)); } static void del_symlink(struct kobject *from, struct kobject *to) { - if (!from || !to) - return; sysfs_remove_link(from, kobject_name(to)); } -/* - * 'struct bd_holder' contains pointers to kobjects symlinked by - * bd_claim_by_kobject. - * It's connected to bd_holder_list which is protected by bdev->bd_sem. - */ -struct bd_holder { - struct list_head list; /* chain of holders of the bdev */ - int count; /* references from the holder */ - struct kobject *sdir; /* holder object, e.g. "/block/dm-0/slaves" */ - struct kobject *hdev; /* e.g. "/block/dm-0" */ - struct kobject *hdir; /* e.g. "/block/sda/holders" */ - struct kobject *sdev; /* e.g. "/block/sda" */ -}; - -/* - * Get references of related kobjects at once. - * Returns 1 on success. 0 on failure. - * - * Should call bd_holder_release_dirs() after successful use. - */ -static int bd_holder_grab_dirs(struct block_device *bdev, - struct bd_holder *bo) -{ - if (!bdev || !bo) - return 0; - - bo->sdir = kobject_get(bo->sdir); - if (!bo->sdir) - return 0; - - bo->hdev = kobject_get(bo->sdir->parent); - if (!bo->hdev) - goto fail_put_sdir; - - bo->sdev = kobject_get(&part_to_dev(bdev->bd_part)->kobj); - if (!bo->sdev) - goto fail_put_hdev; - - bo->hdir = kobject_get(bdev->bd_part->holder_dir); - if (!bo->hdir) - goto fail_put_sdev; - - return 1; - -fail_put_sdev: - kobject_put(bo->sdev); -fail_put_hdev: - kobject_put(bo->hdev); -fail_put_sdir: - kobject_put(bo->sdir); - - return 0; -} - -/* Put references of related kobjects at once. */ -static void bd_holder_release_dirs(struct bd_holder *bo) -{ - kobject_put(bo->hdir); - kobject_put(bo->sdev); - kobject_put(bo->hdev); - kobject_put(bo->sdir); -} - -static struct bd_holder *alloc_bd_holder(struct kobject *kobj) -{ - struct bd_holder *bo; - - bo = kzalloc(sizeof(*bo), GFP_KERNEL); - if (!bo) - return NULL; - - bo->count = 1; - bo->sdir = kobj; - - return bo; -} - -static void free_bd_holder(struct bd_holder *bo) -{ - kfree(bo); -} - /** - * find_bd_holder - find matching struct bd_holder from the block device + * bd_link_disk_holder - create symlinks between holding disk and slave bdev + * @bdev: the claimed slave bdev + * @disk: the holding disk * - * @bdev: struct block device to be searched - * @bo: target struct bd_holder + * This functions creates the following sysfs symlinks. * - * Returns matching entry with @bo in @bdev->bd_holder_list. - * If found, increment the reference count and return the pointer. - * If not found, returns NULL. - */ -static struct bd_holder *find_bd_holder(struct block_device *bdev, - struct bd_holder *bo) -{ - struct bd_holder *tmp; - - list_for_each_entry(tmp, &bdev->bd_holder_list, list) - if (tmp->sdir == bo->sdir) { - tmp->count++; - return tmp; - } - - return NULL; -} - -/** - * add_bd_holder - create sysfs symlinks for bd_claim() relationship + * - from "slaves" directory of the holder @disk to the claimed @bdev + * - from "holders" directory of the @bdev to the holder @disk * - * @bdev: block device to be bd_claimed - * @bo: preallocated and initialized by alloc_bd_holder() + * For example, if /dev/dm-0 maps to /dev/sda and disk for dm-0 is + * passed to bd_link_disk_holder(), then: * - * Add @bo to @bdev->bd_holder_list, create symlinks. + * /sys/block/dm-0/slaves/sda --> /sys/block/sda + * /sys/block/sda/holders/dm-0 --> /sys/block/dm-0 * - * Returns 0 if symlinks are created. - * Returns -ve if something fails. - */ -static int add_bd_holder(struct block_device *bdev, struct bd_holder *bo) -{ - int err; - - if (!bo) - return -EINVAL; - - if (!bd_holder_grab_dirs(bdev, bo)) - return -EBUSY; - - err = add_symlink(bo->sdir, bo->sdev); - if (err) - return err; - - err = add_symlink(bo->hdir, bo->hdev); - if (err) { - del_symlink(bo->sdir, bo->sdev); - return err; - } - - list_add_tail(&bo->list, &bdev->bd_holder_list); - return 0; -} - -/** - * del_bd_holder - delete sysfs symlinks for bd_claim() relationship - * - * @bdev: block device to be bd_claimed - * @kobj: holder's kobject - * - * If there is matching entry with @kobj in @bdev->bd_holder_list - * and no other bd_claim() from the same kobject, - * remove the struct bd_holder from the list, delete symlinks for it. - * - * Returns a pointer to the struct bd_holder when it's removed from the list - * and ready to be freed. - * Returns NULL if matching claim isn't found or there is other bd_claim() - * by the same kobject. - */ -static struct bd_holder *del_bd_holder(struct block_device *bdev, - struct kobject *kobj) -{ - struct bd_holder *bo; - - list_for_each_entry(bo, &bdev->bd_holder_list, list) { - if (bo->sdir == kobj) { - bo->count--; - BUG_ON(bo->count < 0); - if (!bo->count) { - list_del(&bo->list); - del_symlink(bo->sdir, bo->sdev); - del_symlink(bo->hdir, bo->hdev); - bd_holder_release_dirs(bo); - return bo; - } - break; - } - } - - return NULL; -} - -/** - * bd_claim_by_kobject - bd_claim() with additional kobject signature - * - * @bdev: block device to be claimed - * @holder: holder's signature - * @kobj: holder's kobject + * The caller must have claimed @bdev before calling this function and + * ensure that both @bdev and @disk are valid during the creation and + * lifetime of these symlinks. * - * Do bd_claim() and if it succeeds, create sysfs symlinks between - * the bdev and the holder's kobject. - * Use bd_release_from_kobject() when relesing the claimed bdev. + * CONTEXT: + * Might sleep. * - * Returns 0 on success. (same as bd_claim()) - * Returns errno on failure. + * RETURNS: + * 0 on success, -errno on failure. */ -static int bd_claim_by_kobject(struct block_device *bdev, void *holder, - struct kobject *kobj) +int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk) { - int err; - struct bd_holder *bo, *found; - - if (!kobj) - return -EINVAL; - - bo = alloc_bd_holder(kobj); - if (!bo) - return -ENOMEM; + int ret = 0; mutex_lock(&bdev->bd_mutex); - err = bd_claim(bdev, holder); - if (err) - goto fail; + WARN_ON_ONCE(!bdev->bd_holder || bdev->bd_holder_disk); - found = find_bd_holder(bdev, bo); - if (found) - goto fail; + /* FIXME: remove the following once add_disk() handles errors */ + if (WARN_ON(!disk->slave_dir || !bdev->bd_part->holder_dir)) + goto out_unlock; - err = add_bd_holder(bdev, bo); - if (err) - bd_release(bdev); - else - bo = NULL; -fail: - mutex_unlock(&bdev->bd_mutex); - free_bd_holder(bo); - return err; -} + ret = add_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj); + if (ret) + goto out_unlock; -/** - * bd_release_from_kobject - bd_release() with additional kobject signature - * - * @bdev: block device to be released - * @kobj: holder's kobject - * - * Do bd_release() and remove sysfs symlinks created by bd_claim_by_kobject(). - */ -static void bd_release_from_kobject(struct block_device *bdev, - struct kobject *kobj) -{ - if (!kobj) - return; + ret = add_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj); + if (ret) { + del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj); + goto out_unlock; + } - mutex_lock(&bdev->bd_mutex); - bd_release(bdev); - free_bd_holder(del_bd_holder(bdev, kobj)); + bdev->bd_holder_disk = disk; +out_unlock: mutex_unlock(&bdev->bd_mutex); + return ret; } +EXPORT_SYMBOL_GPL(bd_link_disk_holder); -/** - * bd_claim_by_disk - wrapper function for bd_claim_by_kobject() - * - * @bdev: block device to be claimed - * @holder: holder's signature - * @disk: holder's gendisk - * - * Call bd_claim_by_kobject() with getting @disk->slave_dir. - */ -int bd_claim_by_disk(struct block_device *bdev, void *holder, - struct gendisk *disk) +void bd_unlink_disk_holder(struct block_device *bdev) { - return bd_claim_by_kobject(bdev, holder, kobject_get(disk->slave_dir)); -} -EXPORT_SYMBOL_GPL(bd_claim_by_disk); + struct gendisk *disk = bdev->bd_holder_disk; -/** - * bd_release_from_disk - wrapper function for bd_release_from_kobject() - * - * @bdev: block device to be claimed - * @disk: holder's gendisk - * - * Call bd_release_from_kobject() and put @disk->slave_dir. - */ -void bd_release_from_disk(struct block_device *bdev, struct gendisk *disk) -{ - bd_release_from_kobject(bdev, disk->slave_dir); - kobject_put(disk->slave_dir); + bdev->bd_holder_disk = NULL; + if (!disk) + return; + + del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj); + del_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj); } -EXPORT_SYMBOL_GPL(bd_release_from_disk); +EXPORT_SYMBOL_GPL(bd_unlink_disk_holder); #endif /* -- cgit v1.2.2 From e525fd89d380c4a94c0d63913a1dd1a593ed25e7 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sat, 13 Nov 2010 11:55:17 +0100 Subject: block: make blkdev_get/put() handle exclusive access Over time, block layer has accumulated a set of APIs dealing with bdev open, close, claim and release. * blkdev_get/put() are the primary open and close functions. * bd_claim/release() deal with exclusive open. * open/close_bdev_exclusive() are combination of open and claim and the other way around, respectively. * bd_link/unlink_disk_holder() to create and remove holder/slave symlinks. * open_by_devnum() wraps bdget() + blkdev_get(). The interface is a bit confusing and the decoupling of open and claim makes it impossible to properly guarantee exclusive access as in-kernel open + claim sequence can disturb the existing exclusive open even before the block layer knows the current open if for another exclusive access. Reorganize the interface such that, * blkdev_get() is extended to include exclusive access management. @holder argument is added and, if is @FMODE_EXCL specified, it will gain exclusive access atomically w.r.t. other exclusive accesses. * blkdev_put() is similarly extended. It now takes @mode argument and if @FMODE_EXCL is set, it releases an exclusive access. Also, when the last exclusive claim is released, the holder/slave symlinks are removed automatically. * bd_claim/release() and close_bdev_exclusive() are no longer necessary and either made static or removed. * bd_link_disk_holder() remains the same but bd_unlink_disk_holder() is no longer necessary and removed. * open_bdev_exclusive() becomes a simple wrapper around lookup_bdev() and blkdev_get(). It also has an unexpected extra bdev_read_only() test which probably should be moved into blkdev_get(). * open_by_devnum() is modified to take @holder argument and pass it to blkdev_get(). Most of bdev open/close operations are unified into blkdev_get/put() and most exclusive accesses are tested atomically at the open time (as it should). This cleans up code and removes some, both valid and invalid, but unnecessary all the same, corner cases. open_bdev_exclusive() and open_by_devnum() can use further cleanup - rename to blkdev_get_by_path() and blkdev_get_by_devt() and drop special features. Well, let's leave them for another day. Most conversions are straight-forward. drbd conversion is a bit more involved as there was some reordering, but the logic should stay the same. Signed-off-by: Tejun Heo Acked-by: Neil Brown Acked-by: Ryusuke Konishi Acked-by: Mike Snitzer Acked-by: Philipp Reisner Cc: Peter Osterlund Cc: Martin Schwidefsky Cc: Heiko Carstens Cc: Jan Kara Cc: Andrew Morton Cc: Andreas Dilger Cc: "Theodore Ts'o" Cc: Mark Fasheh Cc: Joel Becker Cc: Alex Elder Cc: Christoph Hellwig Cc: dm-devel@redhat.com Cc: drbd-dev@lists.linbit.com Cc: Leo Chen Cc: Scott Branden Cc: Chris Mason Cc: Steven Whitehouse Cc: Dave Kleikamp Cc: Joern Engel Cc: reiserfs-devel@vger.kernel.org Cc: Alexander Viro --- fs/block_dev.c | 149 +++++++++++++++---------------------------- fs/btrfs/volumes.c | 14 ++-- fs/ext3/super.c | 12 +--- fs/ext4/super.c | 12 +--- fs/gfs2/ops_fstype.c | 4 +- fs/jfs/jfs_logmgr.c | 17 ++--- fs/logfs/dev_bdev.c | 4 +- fs/nilfs2/super.c | 4 +- fs/ocfs2/cluster/heartbeat.c | 2 +- fs/partitions/check.c | 2 +- fs/reiserfs/journal.c | 17 ++--- fs/super.c | 14 ++-- fs/xfs/linux-2.6/xfs_super.c | 2 +- 13 files changed, 87 insertions(+), 166 deletions(-) (limited to 'fs') diff --git a/fs/block_dev.c b/fs/block_dev.c index 9329068684d2..fc48912354d1 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -660,7 +660,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole, else if (bdev->bd_contains == bdev) return true; /* is a whole device which isn't held */ - else if (whole->bd_holder == bd_claim) + else if (whole->bd_holder == bd_may_claim) return true; /* is a partition of a device that is being partitioned */ else if (whole->bd_holder != NULL) return false; /* is a partition of a held device */ @@ -807,10 +807,10 @@ static void __bd_claim(struct block_device *bdev, struct block_device *whole, { /* note that for a whole device bd_holders * will be incremented twice, and bd_holder will - * be set to bd_claim before being set to holder + * be set to bd_may_claim before being set to holder */ whole->bd_holders++; - whole->bd_holder = bd_claim; + whole->bd_holder = bd_may_claim; bdev->bd_holders++; bdev->bd_holder = holder; } @@ -835,37 +835,7 @@ static void bd_finish_claiming(struct block_device *bdev, __bd_abort_claiming(whole, holder); /* not actually an abort */ } -/** - * bd_claim - claim a block device - * @bdev: block device to claim - * @holder: holder trying to claim @bdev - * - * Try to claim @bdev which must have been opened successfully. - * - * CONTEXT: - * Might sleep. - * - * RETURNS: - * 0 if successful, -EBUSY if @bdev is already claimed. - */ -int bd_claim(struct block_device *bdev, void *holder) -{ - struct block_device *whole = bdev->bd_contains; - int res; - - might_sleep(); - - spin_lock(&bdev_lock); - res = bd_prepare_to_claim(bdev, whole, holder); - if (res == 0) - __bd_claim(bdev, whole, holder); - spin_unlock(&bdev_lock); - - return res; -} -EXPORT_SYMBOL(bd_claim); - -void bd_release(struct block_device *bdev) +static void bd_release(struct block_device *bdev) { spin_lock(&bdev_lock); if (!--bdev->bd_contains->bd_holders) @@ -875,8 +845,6 @@ void bd_release(struct block_device *bdev) spin_unlock(&bdev_lock); } -EXPORT_SYMBOL(bd_release); - #ifdef CONFIG_SYSFS static int add_symlink(struct kobject *from, struct kobject *to) { @@ -943,7 +911,7 @@ out_unlock: } EXPORT_SYMBOL_GPL(bd_link_disk_holder); -void bd_unlink_disk_holder(struct block_device *bdev) +static void bd_unlink_disk_holder(struct block_device *bdev) { struct gendisk *disk = bdev->bd_holder_disk; @@ -954,7 +922,9 @@ void bd_unlink_disk_holder(struct block_device *bdev) del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj); del_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj); } -EXPORT_SYMBOL_GPL(bd_unlink_disk_holder); +#else +static inline void bd_unlink_disk_holder(struct block_device *bdev) +{ } #endif /* @@ -964,12 +934,12 @@ EXPORT_SYMBOL_GPL(bd_unlink_disk_holder); * to be used for internal purposes. If you ever need it - reconsider * your API. */ -struct block_device *open_by_devnum(dev_t dev, fmode_t mode) +struct block_device *open_by_devnum(dev_t dev, fmode_t mode, void *holder) { struct block_device *bdev = bdget(dev); int err = -ENOMEM; if (bdev) - err = blkdev_get(bdev, mode); + err = blkdev_get(bdev, mode, holder); return err ? ERR_PTR(err) : bdev; } @@ -1235,17 +1205,37 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) return ret; } -int blkdev_get(struct block_device *bdev, fmode_t mode) +int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder) { - return __blkdev_get(bdev, mode, 0); + struct block_device *whole = NULL; + int res; + + WARN_ON_ONCE((mode & FMODE_EXCL) && !holder); + + if ((mode & FMODE_EXCL) && holder) { + whole = bd_start_claiming(bdev, holder); + if (IS_ERR(whole)) { + bdput(bdev); + return PTR_ERR(whole); + } + } + + res = __blkdev_get(bdev, mode, 0); + + if (whole) { + if (res == 0) + bd_finish_claiming(bdev, whole, holder); + else + bd_abort_claiming(whole, holder); + } + + return res; } EXPORT_SYMBOL(blkdev_get); static int blkdev_open(struct inode * inode, struct file * filp) { - struct block_device *whole = NULL; struct block_device *bdev; - int res; /* * Preserve backwards compatibility and allow large file access @@ -1266,26 +1256,9 @@ static int blkdev_open(struct inode * inode, struct file * filp) if (bdev == NULL) return -ENOMEM; - if (filp->f_mode & FMODE_EXCL) { - whole = bd_start_claiming(bdev, filp); - if (IS_ERR(whole)) { - bdput(bdev); - return PTR_ERR(whole); - } - } - filp->f_mapping = bdev->bd_inode->i_mapping; - res = blkdev_get(bdev, filp->f_mode); - - if (whole) { - if (res == 0) - bd_finish_claiming(bdev, whole, filp); - else - bd_abort_claiming(whole, filp); - } - - return res; + return blkdev_get(bdev, filp->f_mode, filp); } static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part) @@ -1329,6 +1302,13 @@ static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part) int blkdev_put(struct block_device *bdev, fmode_t mode) { + if (mode & FMODE_EXCL) { + mutex_lock(&bdev->bd_mutex); + bd_release(bdev); + if (!bdev->bd_holders) + bd_unlink_disk_holder(bdev); + mutex_unlock(&bdev->bd_mutex); + } return __blkdev_put(bdev, mode, 0); } EXPORT_SYMBOL(blkdev_put); @@ -1336,8 +1316,7 @@ EXPORT_SYMBOL(blkdev_put); static int blkdev_close(struct inode * inode, struct file * filp) { struct block_device *bdev = I_BDEV(filp->f_mapping->host); - if (bdev->bd_holder == filp) - bd_release(bdev); + return blkdev_put(bdev, filp->f_mode); } @@ -1494,55 +1473,27 @@ EXPORT_SYMBOL(lookup_bdev); */ struct block_device *open_bdev_exclusive(const char *path, fmode_t mode, void *holder) { - struct block_device *bdev, *whole; + struct block_device *bdev; int error; bdev = lookup_bdev(path); if (IS_ERR(bdev)) return bdev; - whole = bd_start_claiming(bdev, holder); - if (IS_ERR(whole)) { - bdput(bdev); - return whole; - } - - error = blkdev_get(bdev, mode); + error = blkdev_get(bdev, mode | FMODE_EXCL, holder); if (error) - goto out_abort_claiming; + return ERR_PTR(error); - error = -EACCES; - if ((mode & FMODE_WRITE) && bdev_read_only(bdev)) - goto out_blkdev_put; + if ((mode & FMODE_WRITE) && bdev_read_only(bdev)) { + blkdev_put(bdev, mode); + return ERR_PTR(-EACCES); + } - bd_finish_claiming(bdev, whole, holder); return bdev; - -out_blkdev_put: - blkdev_put(bdev, mode); -out_abort_claiming: - bd_abort_claiming(whole, holder); - return ERR_PTR(error); } EXPORT_SYMBOL(open_bdev_exclusive); -/** - * close_bdev_exclusive - close a blockdevice opened by open_bdev_exclusive() - * - * @bdev: blockdevice to close - * @mode: mode, must match that used to open. - * - * This is the counterpart to open_bdev_exclusive(). - */ -void close_bdev_exclusive(struct block_device *bdev, fmode_t mode) -{ - bd_release(bdev); - blkdev_put(bdev, mode); -} - -EXPORT_SYMBOL(close_bdev_exclusive); - int __invalidate_device(struct block_device *bdev) { struct super_block *sb = get_super(bdev); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index d39596224d21..f1b729d3b883 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -489,7 +489,7 @@ again: continue; if (device->bdev) { - close_bdev_exclusive(device->bdev, device->mode); + blkdev_put(device->bdev, device->mode | FMODE_EXCL); device->bdev = NULL; fs_devices->open_devices--; } @@ -523,7 +523,7 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) list_for_each_entry(device, &fs_devices->devices, dev_list) { if (device->bdev) { - close_bdev_exclusive(device->bdev, device->mode); + blkdev_put(device->bdev, device->mode | FMODE_EXCL); fs_devices->open_devices--; } if (device->writeable) { @@ -638,7 +638,7 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices, error_brelse: brelse(bh); error_close: - close_bdev_exclusive(bdev, flags); + blkdev_put(bdev, flags | FMODE_EXCL); error: continue; } @@ -716,7 +716,7 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, brelse(bh); error_close: - close_bdev_exclusive(bdev, flags); + blkdev_put(bdev, flags | FMODE_EXCL); error: mutex_unlock(&uuid_mutex); return ret; @@ -1244,7 +1244,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) root->fs_info->fs_devices->latest_bdev = next_device->bdev; if (device->bdev) { - close_bdev_exclusive(device->bdev, device->mode); + blkdev_put(device->bdev, device->mode | FMODE_EXCL); device->bdev = NULL; device->fs_devices->open_devices--; } @@ -1287,7 +1287,7 @@ error_brelse: brelse(bh); error_close: if (bdev) - close_bdev_exclusive(bdev, FMODE_READ); + blkdev_put(bdev, FMODE_READ | FMODE_EXCL); out: mutex_unlock(&root->fs_info->volume_mutex); mutex_unlock(&uuid_mutex); @@ -1565,7 +1565,7 @@ out: mutex_unlock(&root->fs_info->volume_mutex); return ret; error: - close_bdev_exclusive(bdev, 0); + blkdev_put(bdev, FMODE_EXCL); if (seeding_dev) { mutex_unlock(&uuid_mutex); up_write(&sb->s_umount); diff --git a/fs/ext3/super.c b/fs/ext3/super.c index 2fedaf8b5012..23e7513dba9c 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c @@ -347,7 +347,7 @@ static struct block_device *ext3_blkdev_get(dev_t dev, struct super_block *sb) struct block_device *bdev; char b[BDEVNAME_SIZE]; - bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE); + bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb); if (IS_ERR(bdev)) goto fail; return bdev; @@ -364,8 +364,7 @@ fail: */ static int ext3_blkdev_put(struct block_device *bdev) { - bd_release(bdev); - return blkdev_put(bdev, FMODE_READ|FMODE_WRITE); + return blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); } static int ext3_blkdev_remove(struct ext3_sb_info *sbi) @@ -2136,13 +2135,6 @@ static journal_t *ext3_get_dev_journal(struct super_block *sb, if (bdev == NULL) return NULL; - if (bd_claim(bdev, sb)) { - ext3_msg(sb, KERN_ERR, - "error: failed to claim external journal device"); - blkdev_put(bdev, FMODE_READ|FMODE_WRITE); - return NULL; - } - blocksize = sb->s_blocksize; hblock = bdev_logical_block_size(bdev); if (blocksize < hblock) { diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 61182fe6254e..5dd0b3e76fa8 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -647,7 +647,7 @@ static struct block_device *ext4_blkdev_get(dev_t dev, struct super_block *sb) struct block_device *bdev; char b[BDEVNAME_SIZE]; - bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE); + bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb); if (IS_ERR(bdev)) goto fail; return bdev; @@ -663,8 +663,7 @@ fail: */ static int ext4_blkdev_put(struct block_device *bdev) { - bd_release(bdev); - return blkdev_put(bdev, FMODE_READ|FMODE_WRITE); + return blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); } static int ext4_blkdev_remove(struct ext4_sb_info *sbi) @@ -3758,13 +3757,6 @@ static journal_t *ext4_get_dev_journal(struct super_block *sb, if (bdev == NULL) return NULL; - if (bd_claim(bdev, sb)) { - ext4_msg(sb, KERN_ERR, - "failed to claim external journal device"); - blkdev_put(bdev, FMODE_READ|FMODE_WRITE); - return NULL; - } - blocksize = sb->s_blocksize; hblock = bdev_logical_block_size(bdev); if (blocksize < hblock) { diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 3eb1393f7b81..c1f0763a022b 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -1298,7 +1298,7 @@ static struct dentry *gfs2_mount(struct file_system_type *fs_type, int flags, goto error_bdev; if (s->s_root) - close_bdev_exclusive(bdev, mode); + blkdev_put(bdev, mode | FMODE_EXCL); memset(&args, 0, sizeof(args)); args.ar_quota = GFS2_QUOTA_DEFAULT; @@ -1342,7 +1342,7 @@ error_super: deactivate_locked_super(s); return ERR_PTR(error); error_bdev: - close_bdev_exclusive(bdev, mode); + blkdev_put(bdev, mode | FMODE_EXCL); return ERR_PTR(error); } diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c index e1b8493b9aaa..5a290f22dcc3 100644 --- a/fs/jfs/jfs_logmgr.c +++ b/fs/jfs/jfs_logmgr.c @@ -1120,16 +1120,13 @@ int lmLogOpen(struct super_block *sb) * file systems to log may have n-to-1 relationship; */ - bdev = open_by_devnum(sbi->logdev, FMODE_READ|FMODE_WRITE); + bdev = open_by_devnum(sbi->logdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, + log); if (IS_ERR(bdev)) { rc = -PTR_ERR(bdev); goto free; } - if ((rc = bd_claim(bdev, log))) { - goto close; - } - log->bdev = bdev; memcpy(log->uuid, sbi->loguuid, sizeof(log->uuid)); @@ -1137,7 +1134,7 @@ int lmLogOpen(struct super_block *sb) * initialize log: */ if ((rc = lmLogInit(log))) - goto unclaim; + goto close; list_add(&log->journal_list, &jfs_external_logs); @@ -1163,11 +1160,8 @@ journal_found: list_del(&log->journal_list); lbmLogShutdown(log); - unclaim: - bd_release(bdev); - close: /* close external log device */ - blkdev_put(bdev, FMODE_READ|FMODE_WRITE); + blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); free: /* free log descriptor */ mutex_unlock(&jfs_log_mutex); @@ -1512,8 +1506,7 @@ int lmLogClose(struct super_block *sb) bdev = log->bdev; rc = lmLogShutdown(log); - bd_release(bdev); - blkdev_put(bdev, FMODE_READ|FMODE_WRITE); + blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); kfree(log); diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c index 92ca6fbe09bd..734b9025858e 100644 --- a/fs/logfs/dev_bdev.c +++ b/fs/logfs/dev_bdev.c @@ -300,7 +300,7 @@ static int bdev_write_sb(struct super_block *sb, struct page *page) static void bdev_put_device(struct logfs_super *s) { - close_bdev_exclusive(s->s_bdev, FMODE_READ|FMODE_WRITE); + blkdev_put(s->s_bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); } static int bdev_can_write_buf(struct super_block *sb, u64 ofs) @@ -331,7 +331,7 @@ int logfs_get_sb_bdev(struct logfs_super *p, struct file_system_type *type, if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) { int mtdnr = MINOR(bdev->bd_dev); - close_bdev_exclusive(bdev, FMODE_READ|FMODE_WRITE); + blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); return logfs_get_sb_mtd(p, mtdnr); } diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index f804d41ec9d3..756a6798d7c8 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c @@ -1233,7 +1233,7 @@ nilfs_mount(struct file_system_type *fs_type, int flags, } if (!s_new) - close_bdev_exclusive(sd.bdev, mode); + blkdev_put(sd.bdev, mode | FMODE_EXCL); return root_dentry; @@ -1242,7 +1242,7 @@ nilfs_mount(struct file_system_type *fs_type, int flags, failed: if (!s_new) - close_bdev_exclusive(sd.bdev, mode); + blkdev_put(sd.bdev, mode | FMODE_EXCL); return ERR_PTR(err); } diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index 52c7557f3e25..d0a2721eaceb 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c @@ -1674,7 +1674,7 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg, goto out; reg->hr_bdev = I_BDEV(filp->f_mapping->host); - ret = blkdev_get(reg->hr_bdev, FMODE_WRITE | FMODE_READ); + ret = blkdev_get(reg->hr_bdev, FMODE_WRITE | FMODE_READ, NULL); if (ret) { reg->hr_bdev = NULL; goto out; diff --git a/fs/partitions/check.c b/fs/partitions/check.c index 0a8b0ad0c7e2..2e6501d034ab 100644 --- a/fs/partitions/check.c +++ b/fs/partitions/check.c @@ -549,7 +549,7 @@ void register_disk(struct gendisk *disk) goto exit; bdev->bd_invalidated = 1; - err = blkdev_get(bdev, FMODE_READ); + err = blkdev_get(bdev, FMODE_READ, NULL); if (err < 0) goto exit; blkdev_put(bdev, FMODE_READ); diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c index 076c8b194682..b488136f5ace 100644 --- a/fs/reiserfs/journal.c +++ b/fs/reiserfs/journal.c @@ -2552,8 +2552,6 @@ static int release_journal_dev(struct super_block *super, result = 0; if (journal->j_dev_bd != NULL) { - if (journal->j_dev_bd->bd_dev != super->s_dev) - bd_release(journal->j_dev_bd); result = blkdev_put(journal->j_dev_bd, journal->j_dev_mode); journal->j_dev_bd = NULL; } @@ -2571,7 +2569,7 @@ static int journal_init_dev(struct super_block *super, { int result; dev_t jdev; - fmode_t blkdev_mode = FMODE_READ | FMODE_WRITE; + fmode_t blkdev_mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL; char b[BDEVNAME_SIZE]; result = 0; @@ -2585,7 +2583,9 @@ static int journal_init_dev(struct super_block *super, /* there is no "jdev" option and journal is on separate device */ if ((!jdev_name || !jdev_name[0])) { - journal->j_dev_bd = open_by_devnum(jdev, blkdev_mode); + if (jdev == super->s_dev) + blkdev_mode &= ~FMODE_EXCL; + journal->j_dev_bd = open_by_devnum(jdev, blkdev_mode, journal); journal->j_dev_mode = blkdev_mode; if (IS_ERR(journal->j_dev_bd)) { result = PTR_ERR(journal->j_dev_bd); @@ -2594,15 +2594,8 @@ static int journal_init_dev(struct super_block *super, "cannot init journal device '%s': %i", __bdevname(jdev, b), result); return result; - } else if (jdev != super->s_dev) { - result = bd_claim(journal->j_dev_bd, journal); - if (result) { - blkdev_put(journal->j_dev_bd, blkdev_mode); - return result; - } - + } else if (jdev != super->s_dev) set_blocksize(journal->j_dev_bd, super->s_blocksize); - } return 0; } diff --git a/fs/super.c b/fs/super.c index ca696155cd9a..22374bf0ba87 100644 --- a/fs/super.c +++ b/fs/super.c @@ -801,13 +801,13 @@ struct dentry *mount_bdev(struct file_system_type *fs_type, /* * s_umount nests inside bd_mutex during - * __invalidate_device(). close_bdev_exclusive() - * acquires bd_mutex and can't be called under - * s_umount. Drop s_umount temporarily. This is safe - * as we're holding an active reference. + * __invalidate_device(). blkdev_put() acquires + * bd_mutex and can't be called under s_umount. Drop + * s_umount temporarily. This is safe as we're + * holding an active reference. */ up_write(&s->s_umount); - close_bdev_exclusive(bdev, mode); + blkdev_put(bdev, mode | FMODE_EXCL); down_write(&s->s_umount); } else { char b[BDEVNAME_SIZE]; @@ -831,7 +831,7 @@ struct dentry *mount_bdev(struct file_system_type *fs_type, error_s: error = PTR_ERR(s); error_bdev: - close_bdev_exclusive(bdev, mode); + blkdev_put(bdev, mode | FMODE_EXCL); error: return ERR_PTR(error); } @@ -862,7 +862,7 @@ void kill_block_super(struct super_block *sb) bdev->bd_super = NULL; generic_shutdown_super(sb); sync_blockdev(bdev); - close_bdev_exclusive(bdev, mode); + blkdev_put(bdev, mode | FMODE_EXCL); } EXPORT_SYMBOL(kill_block_super); diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 9f3a78fe6ae4..a1a6e5ceea67 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -623,7 +623,7 @@ xfs_blkdev_put( struct block_device *bdev) { if (bdev) - close_bdev_exclusive(bdev, FMODE_READ|FMODE_WRITE); + blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); } /* -- cgit v1.2.2 From 6a027eff62f6ae32d49f2ae5dadd6f4eee1ddae2 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sat, 13 Nov 2010 11:55:17 +0100 Subject: block: reorganize claim/release implementation With claim/release rolled into blkdev_get/put(), there's no reason to keep bd_abort/finish_claim(), __bd_claim() and bd_release() as separate functions. It only makes the code difficult to follow. Collapse them into blkdev_get/put(). This will ease future changes around claim/release. Signed-off-by: Tejun Heo --- fs/block_dev.c | 127 ++++++++++++++++++++++----------------------------------- 1 file changed, 48 insertions(+), 79 deletions(-) (limited to 'fs') diff --git a/fs/block_dev.c b/fs/block_dev.c index fc48912354d1..269bfbbd10fc 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -772,79 +772,6 @@ static struct block_device *bd_start_claiming(struct block_device *bdev, } } -/* releases bdev_lock */ -static void __bd_abort_claiming(struct block_device *whole, void *holder) -{ - BUG_ON(whole->bd_claiming != holder); - whole->bd_claiming = NULL; - wake_up_bit(&whole->bd_claiming, 0); - - spin_unlock(&bdev_lock); - bdput(whole); -} - -/** - * bd_abort_claiming - abort claiming a block device - * @whole: whole block device returned by bd_start_claiming() - * @holder: holder trying to claim @bdev - * - * Abort a claiming block started by bd_start_claiming(). Note that - * @whole is not the block device to be claimed but the whole device - * returned by bd_start_claiming(). - * - * CONTEXT: - * Grabs and releases bdev_lock. - */ -static void bd_abort_claiming(struct block_device *whole, void *holder) -{ - spin_lock(&bdev_lock); - __bd_abort_claiming(whole, holder); /* releases bdev_lock */ -} - -/* increment holders when we have a legitimate claim. requires bdev_lock */ -static void __bd_claim(struct block_device *bdev, struct block_device *whole, - void *holder) -{ - /* note that for a whole device bd_holders - * will be incremented twice, and bd_holder will - * be set to bd_may_claim before being set to holder - */ - whole->bd_holders++; - whole->bd_holder = bd_may_claim; - bdev->bd_holders++; - bdev->bd_holder = holder; -} - -/** - * bd_finish_claiming - finish claiming a block device - * @bdev: block device of interest (passed to bd_start_claiming()) - * @whole: whole block device returned by bd_start_claiming() - * @holder: holder trying to claim @bdev - * - * Finish a claiming block started by bd_start_claiming(). - * - * CONTEXT: - * Grabs and releases bdev_lock. - */ -static void bd_finish_claiming(struct block_device *bdev, - struct block_device *whole, void *holder) -{ - spin_lock(&bdev_lock); - BUG_ON(!bd_may_claim(bdev, whole, holder)); - __bd_claim(bdev, whole, holder); - __bd_abort_claiming(whole, holder); /* not actually an abort */ -} - -static void bd_release(struct block_device *bdev) -{ - spin_lock(&bdev_lock); - if (!--bdev->bd_contains->bd_holders) - bdev->bd_contains->bd_holder = NULL; - if (!--bdev->bd_holders) - bdev->bd_holder = NULL; - spin_unlock(&bdev_lock); -} - #ifdef CONFIG_SYSFS static int add_symlink(struct kobject *from, struct kobject *to) { @@ -1223,10 +1150,30 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder) res = __blkdev_get(bdev, mode, 0); if (whole) { - if (res == 0) - bd_finish_claiming(bdev, whole, holder); - else - bd_abort_claiming(whole, holder); + /* finish claiming */ + spin_lock(&bdev_lock); + + if (res == 0) { + BUG_ON(!bd_may_claim(bdev, whole, holder)); + /* + * Note that for a whole device bd_holders + * will be incremented twice, and bd_holder + * will be set to bd_may_claim before being + * set to holder + */ + whole->bd_holders++; + whole->bd_holder = bd_may_claim; + bdev->bd_holders++; + bdev->bd_holder = holder; + } + + /* tell others that we're done */ + BUG_ON(whole->bd_claiming != holder); + whole->bd_claiming = NULL; + wake_up_bit(&whole->bd_claiming, 0); + + spin_unlock(&bdev_lock); + bdput(whole); } return res; @@ -1272,6 +1219,7 @@ static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part) bdev->bd_part_count--; if (!--bdev->bd_openers) { + WARN_ON_ONCE(bdev->bd_holders); sync_blockdev(bdev); kill_bdev(bdev); } @@ -1303,10 +1251,31 @@ static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part) int blkdev_put(struct block_device *bdev, fmode_t mode) { if (mode & FMODE_EXCL) { + bool bdev_free; + + /* + * Release a claim on the device. The holder fields + * are protected with bdev_lock. bd_mutex is to + * synchronize disk_holder unlinking. + */ mutex_lock(&bdev->bd_mutex); - bd_release(bdev); - if (!bdev->bd_holders) + spin_lock(&bdev_lock); + + WARN_ON_ONCE(--bdev->bd_holders < 0); + WARN_ON_ONCE(--bdev->bd_contains->bd_holders < 0); + + /* bd_contains might point to self, check in a separate step */ + if ((bdev_free = !bdev->bd_holders)) + bdev->bd_holder = NULL; + if (!bdev->bd_contains->bd_holders) + bdev->bd_contains->bd_holder = NULL; + + spin_unlock(&bdev_lock); + + /* if this was the last claim, holder link should go too */ + if (bdev_free) bd_unlink_disk_holder(bdev); + mutex_unlock(&bdev->bd_mutex); } return __blkdev_put(bdev, mode, 0); -- cgit v1.2.2 From 75f1dc0d076d1c1168f2115f1941ea627d38bd5a Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sat, 13 Nov 2010 11:55:17 +0100 Subject: block: check bdev_read_only() from blkdev_get() bdev read-only status can be queried using bdev_read_only() and may change while the device is being opened. Enforce it by checking it from blkdev_get() after open succeeds. This makes bdev_read_only() check in open_bdev_exclusive() and fsg_lun_open() unnecessary. Drop them. Signed-off-by: Tejun Heo Cc: David Brownell Cc: linux-usb@vger.kernel.org --- fs/block_dev.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/block_dev.c b/fs/block_dev.c index 269bfbbd10fc..606a5259f87f 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -1149,6 +1149,12 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder) res = __blkdev_get(bdev, mode, 0); + /* __blkdev_get() may alter read only status, check it afterwards */ + if (!res && (mode & FMODE_WRITE) && bdev_read_only(bdev)) { + __blkdev_put(bdev, mode, 0); + res = -EACCES; + } + if (whole) { /* finish claiming */ spin_lock(&bdev_lock); @@ -1453,11 +1459,6 @@ struct block_device *open_bdev_exclusive(const char *path, fmode_t mode, void *h if (error) return ERR_PTR(error); - if ((mode & FMODE_WRITE) && bdev_read_only(bdev)) { - blkdev_put(bdev, mode); - return ERR_PTR(-EACCES); - } - return bdev; } -- cgit v1.2.2 From d4d77629953eabd3c14f6fa5746f6b28babfc55f Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Sat, 13 Nov 2010 11:55:18 +0100 Subject: block: clean up blkdev_get() wrappers and their users After recent blkdev_get() modifications, open_by_devnum() and open_bdev_exclusive() are simple wrappers around blkdev_get(). Replace them with blkdev_get_by_dev() and blkdev_get_by_path(). blkdev_get_by_dev() is identical to open_by_devnum(). blkdev_get_by_path() is slightly different in that it doesn't automatically add %FMODE_EXCL to @mode. All users are converted. Most conversions are mechanical and don't introduce any behavior difference. There are several exceptions. * btrfs now sets FMODE_EXCL in btrfs_device->mode, so there's no reason to OR it explicitly on blkdev_put(). * gfs2, nilfs2 and the generic mount_bdev() now set FMODE_EXCL in sb->s_mode. * With the above changes, sb->s_mode now always should contain FMODE_EXCL. WARN_ON_ONCE() added to kill_block_super() to detect errors. The new blkdev_get_*() functions are with proper docbook comments. While at it, add function description to blkdev_get() too. Signed-off-by: Tejun Heo Cc: Philipp Reisner Cc: Neil Brown Cc: Mike Snitzer Cc: Joern Engel Cc: Chris Mason Cc: Jan Kara Cc: "Theodore Ts'o" Cc: KONISHI Ryusuke Cc: reiserfs-devel@vger.kernel.org Cc: xfs-masters@oss.sgi.com Cc: Alexander Viro --- fs/block_dev.c | 139 +++++++++++++++++++++++++++++-------------- fs/btrfs/volumes.c | 24 ++++---- fs/btrfs/volumes.h | 2 +- fs/ext3/super.c | 2 +- fs/ext4/super.c | 2 +- fs/gfs2/ops_fstype.c | 8 +-- fs/jfs/jfs_logmgr.c | 4 +- fs/logfs/dev_bdev.c | 3 +- fs/nilfs2/super.c | 8 +-- fs/reiserfs/journal.c | 6 +- fs/super.c | 9 +-- fs/xfs/linux-2.6/xfs_super.c | 3 +- 12 files changed, 132 insertions(+), 78 deletions(-) (limited to 'fs') diff --git a/fs/block_dev.c b/fs/block_dev.c index 606a5259f87f..c1c1b8c3fb99 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -854,24 +854,6 @@ static inline void bd_unlink_disk_holder(struct block_device *bdev) { } #endif -/* - * Tries to open block device by device number. Use it ONLY if you - * really do not have anything better - i.e. when you are behind a - * truly sucky interface and all you are given is a device number. _Never_ - * to be used for internal purposes. If you ever need it - reconsider - * your API. - */ -struct block_device *open_by_devnum(dev_t dev, fmode_t mode, void *holder) -{ - struct block_device *bdev = bdget(dev); - int err = -ENOMEM; - if (bdev) - err = blkdev_get(bdev, mode, holder); - return err ? ERR_PTR(err) : bdev; -} - -EXPORT_SYMBOL(open_by_devnum); - /** * flush_disk - invalidates all buffer-cache entries on a disk * @@ -1132,6 +1114,25 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) return ret; } +/** + * blkdev_get - open a block device + * @bdev: block_device to open + * @mode: FMODE_* mask + * @holder: exclusive holder identifier + * + * Open @bdev with @mode. If @mode includes %FMODE_EXCL, @bdev is + * open with exclusive access. Specifying %FMODE_EXCL with %NULL + * @holder is invalid. Exclusive opens may nest for the same @holder. + * + * On success, the reference count of @bdev is unchanged. On failure, + * @bdev is put. + * + * CONTEXT: + * Might sleep. + * + * RETURNS: + * 0 on success, -errno on failure. + */ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder) { struct block_device *whole = NULL; @@ -1186,6 +1187,80 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder) } EXPORT_SYMBOL(blkdev_get); +/** + * blkdev_get_by_path - open a block device by name + * @path: path to the block device to open + * @mode: FMODE_* mask + * @holder: exclusive holder identifier + * + * Open the blockdevice described by the device file at @path. @mode + * and @holder are identical to blkdev_get(). + * + * On success, the returned block_device has reference count of one. + * + * CONTEXT: + * Might sleep. + * + * RETURNS: + * Pointer to block_device on success, ERR_PTR(-errno) on failure. + */ +struct block_device *blkdev_get_by_path(const char *path, fmode_t mode, + void *holder) +{ + struct block_device *bdev; + int err; + + bdev = lookup_bdev(path); + if (IS_ERR(bdev)) + return bdev; + + err = blkdev_get(bdev, mode, holder); + if (err) + return ERR_PTR(err); + + return bdev; +} +EXPORT_SYMBOL(blkdev_get_by_path); + +/** + * blkdev_get_by_dev - open a block device by device number + * @dev: device number of block device to open + * @mode: FMODE_* mask + * @holder: exclusive holder identifier + * + * Open the blockdevice described by device number @dev. @mode and + * @holder are identical to blkdev_get(). + * + * Use it ONLY if you really do not have anything better - i.e. when + * you are behind a truly sucky interface and all you are given is a + * device number. _Never_ to be used for internal purposes. If you + * ever need it - reconsider your API. + * + * On success, the returned block_device has reference count of one. + * + * CONTEXT: + * Might sleep. + * + * RETURNS: + * Pointer to block_device on success, ERR_PTR(-errno) on failure. + */ +struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder) +{ + struct block_device *bdev; + int err; + + bdev = bdget(dev); + if (!bdev) + return ERR_PTR(-ENOMEM); + + err = blkdev_get(bdev, mode, holder); + if (err) + return ERR_PTR(err); + + return bdev; +} +EXPORT_SYMBOL(blkdev_get_by_dev); + static int blkdev_open(struct inode * inode, struct file * filp) { struct block_device *bdev; @@ -1436,34 +1511,6 @@ fail: } EXPORT_SYMBOL(lookup_bdev); -/** - * open_bdev_exclusive - open a block device by name and set it up for use - * - * @path: special file representing the block device - * @mode: FMODE_... combination to pass be used - * @holder: owner for exclusion - * - * Open the blockdevice described by the special file at @path, claim it - * for the @holder. - */ -struct block_device *open_bdev_exclusive(const char *path, fmode_t mode, void *holder) -{ - struct block_device *bdev; - int error; - - bdev = lookup_bdev(path); - if (IS_ERR(bdev)) - return bdev; - - error = blkdev_get(bdev, mode | FMODE_EXCL, holder); - if (error) - return ERR_PTR(error); - - return bdev; -} - -EXPORT_SYMBOL(open_bdev_exclusive); - int __invalidate_device(struct block_device *bdev) { struct super_block *sb = get_super(bdev); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index f1b729d3b883..95324e9f9280 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -489,7 +489,7 @@ again: continue; if (device->bdev) { - blkdev_put(device->bdev, device->mode | FMODE_EXCL); + blkdev_put(device->bdev, device->mode); device->bdev = NULL; fs_devices->open_devices--; } @@ -523,7 +523,7 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) list_for_each_entry(device, &fs_devices->devices, dev_list) { if (device->bdev) { - blkdev_put(device->bdev, device->mode | FMODE_EXCL); + blkdev_put(device->bdev, device->mode); fs_devices->open_devices--; } if (device->writeable) { @@ -580,13 +580,15 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices, int seeding = 1; int ret = 0; + flags |= FMODE_EXCL; + list_for_each_entry(device, head, dev_list) { if (device->bdev) continue; if (!device->name) continue; - bdev = open_bdev_exclusive(device->name, flags, holder); + bdev = blkdev_get_by_path(device->name, flags, holder); if (IS_ERR(bdev)) { printk(KERN_INFO "open %s failed\n", device->name); goto error; @@ -638,7 +640,7 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices, error_brelse: brelse(bh); error_close: - blkdev_put(bdev, flags | FMODE_EXCL); + blkdev_put(bdev, flags); error: continue; } @@ -684,7 +686,8 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, mutex_lock(&uuid_mutex); - bdev = open_bdev_exclusive(path, flags, holder); + flags |= FMODE_EXCL; + bdev = blkdev_get_by_path(path, flags, holder); if (IS_ERR(bdev)) { ret = PTR_ERR(bdev); @@ -716,7 +719,7 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, brelse(bh); error_close: - blkdev_put(bdev, flags | FMODE_EXCL); + blkdev_put(bdev, flags); error: mutex_unlock(&uuid_mutex); return ret; @@ -1179,8 +1182,8 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) goto out; } } else { - bdev = open_bdev_exclusive(device_path, FMODE_READ, - root->fs_info->bdev_holder); + bdev = blkdev_get_by_path(device_path, FMODE_READ | FMODE_EXCL, + root->fs_info->bdev_holder); if (IS_ERR(bdev)) { ret = PTR_ERR(bdev); goto out; @@ -1244,7 +1247,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) root->fs_info->fs_devices->latest_bdev = next_device->bdev; if (device->bdev) { - blkdev_put(device->bdev, device->mode | FMODE_EXCL); + blkdev_put(device->bdev, device->mode); device->bdev = NULL; device->fs_devices->open_devices--; } @@ -1439,7 +1442,8 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding) return -EINVAL; - bdev = open_bdev_exclusive(device_path, 0, root->fs_info->bdev_holder); + bdev = blkdev_get_by_path(device_path, FMODE_EXCL, + root->fs_info->bdev_holder); if (IS_ERR(bdev)) return PTR_ERR(bdev); diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 2b638b6e4eea..856e75770304 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -49,7 +49,7 @@ struct btrfs_device { struct block_device *bdev; - /* the mode sent to open_bdev_exclusive */ + /* the mode sent to blkdev_get */ fmode_t mode; char *name; diff --git a/fs/ext3/super.c b/fs/ext3/super.c index 23e7513dba9c..123720ba786d 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c @@ -347,7 +347,7 @@ static struct block_device *ext3_blkdev_get(dev_t dev, struct super_block *sb) struct block_device *bdev; char b[BDEVNAME_SIZE]; - bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb); + bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb); if (IS_ERR(bdev)) goto fail; return bdev; diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 5dd0b3e76fa8..bd63e6927219 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -647,7 +647,7 @@ static struct block_device *ext4_blkdev_get(dev_t dev, struct super_block *sb) struct block_device *bdev; char b[BDEVNAME_SIZE]; - bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb); + bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb); if (IS_ERR(bdev)) goto fail; return bdev; diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index c1f0763a022b..bc56ccf98ffd 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -1268,7 +1268,7 @@ static struct dentry *gfs2_mount(struct file_system_type *fs_type, int flags, { struct block_device *bdev; struct super_block *s; - fmode_t mode = FMODE_READ; + fmode_t mode = FMODE_READ | FMODE_EXCL; int error; struct gfs2_args args; struct gfs2_sbd *sdp; @@ -1276,7 +1276,7 @@ static struct dentry *gfs2_mount(struct file_system_type *fs_type, int flags, if (!(flags & MS_RDONLY)) mode |= FMODE_WRITE; - bdev = open_bdev_exclusive(dev_name, mode, fs_type); + bdev = blkdev_get_by_path(dev_name, mode, fs_type); if (IS_ERR(bdev)) return ERR_CAST(bdev); @@ -1298,7 +1298,7 @@ static struct dentry *gfs2_mount(struct file_system_type *fs_type, int flags, goto error_bdev; if (s->s_root) - blkdev_put(bdev, mode | FMODE_EXCL); + blkdev_put(bdev, mode); memset(&args, 0, sizeof(args)); args.ar_quota = GFS2_QUOTA_DEFAULT; @@ -1342,7 +1342,7 @@ error_super: deactivate_locked_super(s); return ERR_PTR(error); error_bdev: - blkdev_put(bdev, mode | FMODE_EXCL); + blkdev_put(bdev, mode); return ERR_PTR(error); } diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c index 5a290f22dcc3..278e3fb40b71 100644 --- a/fs/jfs/jfs_logmgr.c +++ b/fs/jfs/jfs_logmgr.c @@ -1120,8 +1120,8 @@ int lmLogOpen(struct super_block *sb) * file systems to log may have n-to-1 relationship; */ - bdev = open_by_devnum(sbi->logdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, - log); + bdev = blkdev_get_by_dev(sbi->logdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, + log); if (IS_ERR(bdev)) { rc = -PTR_ERR(bdev); goto free; diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c index 734b9025858e..723bc5bca09a 100644 --- a/fs/logfs/dev_bdev.c +++ b/fs/logfs/dev_bdev.c @@ -325,7 +325,8 @@ int logfs_get_sb_bdev(struct logfs_super *p, struct file_system_type *type, { struct block_device *bdev; - bdev = open_bdev_exclusive(devname, FMODE_READ|FMODE_WRITE, type); + bdev = blkdev_get_by_path(devname, FMODE_READ|FMODE_WRITE|FMODE_EXCL, + type); if (IS_ERR(bdev)) return PTR_ERR(bdev); diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index 756a6798d7c8..0030640e2d72 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c @@ -1147,14 +1147,14 @@ nilfs_mount(struct file_system_type *fs_type, int flags, { struct nilfs_super_data sd; struct super_block *s; - fmode_t mode = FMODE_READ; + fmode_t mode = FMODE_READ | FMODE_EXCL; struct dentry *root_dentry; int err, s_new = false; if (!(flags & MS_RDONLY)) mode |= FMODE_WRITE; - sd.bdev = open_bdev_exclusive(dev_name, mode, fs_type); + sd.bdev = blkdev_get_by_path(dev_name, mode, fs_type); if (IS_ERR(sd.bdev)) return ERR_CAST(sd.bdev); @@ -1233,7 +1233,7 @@ nilfs_mount(struct file_system_type *fs_type, int flags, } if (!s_new) - blkdev_put(sd.bdev, mode | FMODE_EXCL); + blkdev_put(sd.bdev, mode); return root_dentry; @@ -1242,7 +1242,7 @@ nilfs_mount(struct file_system_type *fs_type, int flags, failed: if (!s_new) - blkdev_put(sd.bdev, mode | FMODE_EXCL); + blkdev_put(sd.bdev, mode); return ERR_PTR(err); } diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c index b488136f5ace..e2fce519c0f2 100644 --- a/fs/reiserfs/journal.c +++ b/fs/reiserfs/journal.c @@ -2585,7 +2585,8 @@ static int journal_init_dev(struct super_block *super, if ((!jdev_name || !jdev_name[0])) { if (jdev == super->s_dev) blkdev_mode &= ~FMODE_EXCL; - journal->j_dev_bd = open_by_devnum(jdev, blkdev_mode, journal); + journal->j_dev_bd = blkdev_get_by_dev(jdev, blkdev_mode, + journal); journal->j_dev_mode = blkdev_mode; if (IS_ERR(journal->j_dev_bd)) { result = PTR_ERR(journal->j_dev_bd); @@ -2601,8 +2602,7 @@ static int journal_init_dev(struct super_block *super, } journal->j_dev_mode = blkdev_mode; - journal->j_dev_bd = open_bdev_exclusive(jdev_name, - blkdev_mode, journal); + journal->j_dev_bd = blkdev_get_by_path(jdev_name, blkdev_mode, journal); if (IS_ERR(journal->j_dev_bd)) { result = PTR_ERR(journal->j_dev_bd); journal->j_dev_bd = NULL; diff --git a/fs/super.c b/fs/super.c index 22374bf0ba87..5d9a4497849a 100644 --- a/fs/super.c +++ b/fs/super.c @@ -766,13 +766,13 @@ struct dentry *mount_bdev(struct file_system_type *fs_type, { struct block_device *bdev; struct super_block *s; - fmode_t mode = FMODE_READ; + fmode_t mode = FMODE_READ | FMODE_EXCL; int error = 0; if (!(flags & MS_RDONLY)) mode |= FMODE_WRITE; - bdev = open_bdev_exclusive(dev_name, mode, fs_type); + bdev = blkdev_get_by_path(dev_name, mode, fs_type); if (IS_ERR(bdev)) return ERR_CAST(bdev); @@ -807,7 +807,7 @@ struct dentry *mount_bdev(struct file_system_type *fs_type, * holding an active reference. */ up_write(&s->s_umount); - blkdev_put(bdev, mode | FMODE_EXCL); + blkdev_put(bdev, mode); down_write(&s->s_umount); } else { char b[BDEVNAME_SIZE]; @@ -831,7 +831,7 @@ struct dentry *mount_bdev(struct file_system_type *fs_type, error_s: error = PTR_ERR(s); error_bdev: - blkdev_put(bdev, mode | FMODE_EXCL); + blkdev_put(bdev, mode); error: return ERR_PTR(error); } @@ -862,6 +862,7 @@ void kill_block_super(struct super_block *sb) bdev->bd_super = NULL; generic_shutdown_super(sb); sync_blockdev(bdev); + WARN_ON_ONCE(!(mode & FMODE_EXCL)); blkdev_put(bdev, mode | FMODE_EXCL); } diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index a1a6e5ceea67..9209cd199c47 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -609,7 +609,8 @@ xfs_blkdev_get( { int error = 0; - *bdevp = open_bdev_exclusive(name, FMODE_READ|FMODE_WRITE, mp); + *bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL, + mp); if (IS_ERR(*bdevp)) { error = PTR_ERR(*bdevp); printk("XFS: Invalid device [%s], error=%d\n", name, error); -- cgit v1.2.2 From 380cf090f4f531545b558b04a3dd90d09df52ee9 Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Thu, 11 Nov 2010 19:23:29 +0800 Subject: ext4: fix redirty_page_for_writepage() typo in comment Signed-off-by: Wu Fengguang Signed-off-by: Jiri Kosina --- fs/ext4/inode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 4bc84b8adb7f..b6a4b41d7e14 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -3357,7 +3357,7 @@ int ext4_alloc_da_blocks(struct inode *inode) * doing I/O at all. * * We could call write_cache_pages(), and then redirty all of - * the pages by calling redirty_page_for_writeback() but that + * the pages by calling redirty_page_for_writepage() but that * would be ugly in the extreme. So instead we would need to * replicate parts of the code in the above functions, * simplifying them becuase we wouldn't actually intend to -- cgit v1.2.2 From 43b0178eda1e7e5d1e205bbfd076ab5d6ecacc02 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 27 Oct 2010 23:19:04 +0200 Subject: nfsd: fix NULL dereference in setattr() The original code would oops if this were called from nfsd4_setattr() because "filpp" is NULL. (Note this case is currently impossible, as long as we only give out read delegations.) Signed-off-by: Dan Carpenter Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4state.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index ad2bfa68d534..2d191293e6aa 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -3081,9 +3081,10 @@ nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate, if (status) goto out; renew_client(dp->dl_client); - if (filpp) + if (filpp) { *filpp = find_readable_file(dp->dl_file); - BUG_ON(!*filpp); + BUG_ON(!*filpp); + } } else { /* open or lock stateid */ stp = find_stateid(stateid, flags); if (!stp) -- cgit v1.2.2 From 5afa040b307952bb804eba34b21646da2842e14d Mon Sep 17 00:00:00 2001 From: Mi Jinlong Date: Tue, 9 Nov 2010 09:39:23 +0800 Subject: NFSv4.1: Make sure nfsd can decode SP4_SSV correctly at exchange_id According to RFC, the argument of ssv_sp_parms4 is: struct ssv_sp_parms4 { state_protect_ops4 ssp_ops; sec_oid4 ssp_hash_algs<>; sec_oid4 ssp_encr_algs<>; uint32_t ssp_window; uint32_t ssp_num_gss_handles; }; If client send a exchange_id with SP4_SSV, server cann't decode the SP4_SSV's ssp_hash_algs and ssp_encr_algs arguments correctly. Because the kernel treat the two arguments as a signal sec_oid4 struct, but should be a set of sec_oid4 struct. Signed-off-by: Mi Jinlong Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4xdr.c | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index f35a94a04026..71d7d339e44a 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -1005,7 +1005,7 @@ static __be32 nfsd4_decode_exchange_id(struct nfsd4_compoundargs *argp, struct nfsd4_exchange_id *exid) { - int dummy; + int dummy, tmp; DECODE_HEAD; READ_BUF(NFS4_VERIFIER_SIZE); @@ -1053,15 +1053,23 @@ nfsd4_decode_exchange_id(struct nfsd4_compoundargs *argp, /* ssp_hash_algs<> */ READ_BUF(4); - READ32(dummy); - READ_BUF(dummy); - p += XDR_QUADLEN(dummy); + READ32(tmp); + while (tmp--) { + READ_BUF(4); + READ32(dummy); + READ_BUF(dummy); + p += XDR_QUADLEN(dummy); + } /* ssp_encr_algs<> */ READ_BUF(4); - READ32(dummy); - READ_BUF(dummy); - p += XDR_QUADLEN(dummy); + READ32(tmp); + while (tmp--) { + READ_BUF(4); + READ32(dummy); + READ_BUF(dummy); + p += XDR_QUADLEN(dummy); + } /* ssp_window and ssp_num_gss_handles */ READ_BUF(8); -- cgit v1.2.2 From 044bc1d4324bfb34761cb361e404cb8d39c68777 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Fri, 12 Nov 2010 14:36:06 -0500 Subject: nfsd4: return serverfault on request for ssv We're refusing to support a mandatory features of 4.1, so serverfault seems the better error; see e.g.: http://www.ietf.org/mail-archive/web/nfsv4/current/msg07638.html Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4state.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 2d191293e6aa..9e7f8af12f8f 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -1344,7 +1344,7 @@ nfsd4_exchange_id(struct svc_rqst *rqstp, case SP4_NONE: break; case SP4_SSV: - return nfserr_encr_alg_unsupp; + return nfserr_serverfault; default: BUG(); /* checked by xdr code */ case SP4_MACH_CRED: -- cgit v1.2.2 From ced6dfe9fc7128995c6d60627938944b430d82c8 Mon Sep 17 00:00:00 2001 From: Mi Jinlong Date: Thu, 11 Nov 2010 18:03:50 +0800 Subject: NFS4.1: server gets drc mem fail should reply error at create_session When server gets drc mem fail, it should reply error to client. Signed-off-by: Mi Jinlong Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4state.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs') diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 9e7f8af12f8f..5d0ee0f0cb0e 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -749,6 +749,8 @@ static struct nfsd4_session *alloc_init_session(struct svc_rqst *rqstp, struct n */ slotsize = nfsd4_sanitize_slot_size(fchan->maxresp_cached); numslots = nfsd4_get_drc_mem(slotsize, fchan->maxreqs); + if (numslots < 1) + return NULL; new = alloc_session(slotsize, numslots); if (!new) { -- cgit v1.2.2 From 1205065764f2eda3216ebe213143f69891ee3460 Mon Sep 17 00:00:00 2001 From: Mi Jinlong Date: Thu, 11 Nov 2010 18:03:40 +0800 Subject: NFS4.1: Fix bug server don't reply the right fore_channel to client at create_session At the latest kernel(2.6.37-rc1), server just initialize the forechannel at init_forechannel_attrs, but don't reflect it to reply. After initialize the session success, we should copy the forechannel info to nfsd4_create_session struct. Reviewed-by: Benny Halevy Signed-off-by: Mi Jinlong Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4state.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs') diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 5d0ee0f0cb0e..afa7525a86be 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -1562,6 +1562,8 @@ nfsd4_create_session(struct svc_rqst *rqstp, status = nfs_ok; memcpy(cr_ses->sessionid.data, new->se_sessionid.data, NFS4_MAX_SESSIONID_LEN); + memcpy(&cr_ses->fore_channel, &new->se_fchannel, + sizeof(struct nfsd4_channel_attrs)); cs_slot->sl_seqid++; cr_ses->seqid = cs_slot->sl_seqid; -- cgit v1.2.2 From e030d58e8860f1c87b17631dbdd70747cbe1fb5b Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Mon, 15 Nov 2010 15:46:07 -0500 Subject: sysfs: remove useless test from sysfs_merge_group Dan Carpenter pointed out that the new sysfs_merge_group() and sysfs_unmerge_group() routines requires their grp argument to be non-NULL, because they dereference grp to obtain the list of attributes. Hence it's pointless for the routines to include a test and special-case handling for when grp is NULL. This patch (as1433) removes the unneeded tests. Signed-off-by: Alan Stern CC: Dan Carpenter Signed-off-by: Greg Kroah-Hartman --- fs/sysfs/group.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/sysfs/group.c b/fs/sysfs/group.c index 442f34ff1af8..c8769dc222d8 100644 --- a/fs/sysfs/group.c +++ b/fs/sysfs/group.c @@ -165,10 +165,7 @@ int sysfs_merge_group(struct kobject *kobj, struct attribute *const *attr; int i; - if (grp) - dir_sd = sysfs_get_dirent(kobj->sd, NULL, grp->name); - else - dir_sd = sysfs_get(kobj->sd); + dir_sd = sysfs_get_dirent(kobj->sd, NULL, grp->name); if (!dir_sd) return -ENOENT; @@ -195,10 +192,7 @@ void sysfs_unmerge_group(struct kobject *kobj, struct sysfs_dirent *dir_sd; struct attribute *const *attr; - if (grp) - dir_sd = sysfs_get_dirent(kobj->sd, NULL, grp->name); - else - dir_sd = sysfs_get(kobj->sd); + dir_sd = sysfs_get_dirent(kobj->sd, NULL, grp->name); if (dir_sd) { for (attr = grp->attrs; *attr; ++attr) sysfs_hash_and_remove(dir_sd, NULL, (*attr)->name); -- cgit v1.2.2 From 576ecb8e2b725726471cc62b12c01e28d33127ba Mon Sep 17 00:00:00 2001 From: Samuel Kvasnica Date: Fri, 19 Nov 2010 13:38:49 +0000 Subject: xfs: fix exporting with left over 64-bit inodes We now support mounting and using filesystems with 64-bit inodes even when not mounted with the inode64 option (which now only controls if we allocate new inodes in that space or not). Make sure we always use large NFS file handles when exporting a filesystem that may contain 64-bit inodes. Note that this only affects newly generated file handles, any outstanding 32-bit file handle is still accepted. [hch: the comment and commit log are mine, the rest is from a patch snipplet from Samuel] Signed-off-by: Christoph Hellwig Signed-off-by: Alex Elder --- fs/xfs/linux-2.6/xfs_export.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_export.c b/fs/xfs/linux-2.6/xfs_export.c index 3764d74790ec..fc0114da7fdd 100644 --- a/fs/xfs/linux-2.6/xfs_export.c +++ b/fs/xfs/linux-2.6/xfs_export.c @@ -70,8 +70,16 @@ xfs_fs_encode_fh( else fileid_type = FILEID_INO32_GEN_PARENT; - /* filesystem may contain 64bit inode numbers */ - if (!(XFS_M(inode->i_sb)->m_flags & XFS_MOUNT_SMALL_INUMS)) + /* + * If the the filesystem may contain 64bit inode numbers, we need + * to use larger file handles that can represent them. + * + * While we only allocate inodes that do not fit into 32 bits any + * large enough filesystem may contain them, thus the slightly + * confusing looking conditional below. + */ + if (!(XFS_M(inode->i_sb)->m_flags & XFS_MOUNT_SMALL_INUMS) || + (XFS_M(inode->i_sb)->m_flags & XFS_MOUNT_32BITINODES)) fileid_type |= XFS_FILEID_TYPE_64FLAG; /* -- cgit v1.2.2 From e2714bf8d5c8e131a6df6b0ea2269433e9a03a9b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 1 Dec 2010 22:06:21 +0000 Subject: xfs: remove leftovers of old buffer log items in recovery code XFS used to support different types of buffer log items long time ago. Remove the switch statements checking the log item type in various buffer recovery helpers that were left over from those days and the rather useless xlog_recover_do_buffer_pass2 wrapper. Signed-off-by: Christoph Hellwig Signed-off-by: Alex Elder --- fs/xfs/xfs_log_recover.c | 155 ++++++++++++----------------------------------- 1 file changed, 40 insertions(+), 115 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 966d3f97458c..e51d93db1b0b 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -1614,22 +1614,13 @@ xlog_recover_do_buffer_pass1( xfs_buf_cancel_t *nextp; xfs_buf_cancel_t *prevp; xfs_buf_cancel_t **bucket; - xfs_daddr_t blkno = 0; - uint len = 0; - ushort flags = 0; - - switch (buf_f->blf_type) { - case XFS_LI_BUF: - blkno = buf_f->blf_blkno; - len = buf_f->blf_len; - flags = buf_f->blf_flags; - break; - } + xfs_daddr_t blkno = buf_f->blf_blkno; + uint len = buf_f->blf_len; /* * If this isn't a cancel buffer item, then just return. */ - if (!(flags & XFS_BLF_CANCEL)) { + if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) { trace_xfs_log_recover_buf_not_cancel(log, buf_f); return; } @@ -1767,77 +1758,38 @@ xlog_check_buffer_cancelled( return 0; } -STATIC int -xlog_recover_do_buffer_pass2( - xlog_t *log, - xfs_buf_log_format_t *buf_f) -{ - xfs_daddr_t blkno = 0; - ushort flags = 0; - uint len = 0; - - switch (buf_f->blf_type) { - case XFS_LI_BUF: - blkno = buf_f->blf_blkno; - flags = buf_f->blf_flags; - len = buf_f->blf_len; - break; - } - - return xlog_check_buffer_cancelled(log, blkno, len, flags); -} - /* - * Perform recovery for a buffer full of inodes. In these buffers, - * the only data which should be recovered is that which corresponds - * to the di_next_unlinked pointers in the on disk inode structures. - * The rest of the data for the inodes is always logged through the - * inodes themselves rather than the inode buffer and is recovered - * in xlog_recover_do_inode_trans(). + * Perform recovery for a buffer full of inodes. In these buffers, the only + * data which should be recovered is that which corresponds to the + * di_next_unlinked pointers in the on disk inode structures. The rest of the + * data for the inodes is always logged through the inodes themselves rather + * than the inode buffer and is recovered in xlog_recover_inode_pass2(). * - * The only time when buffers full of inodes are fully recovered is - * when the buffer is full of newly allocated inodes. In this case - * the buffer will not be marked as an inode buffer and so will be - * sent to xlog_recover_do_reg_buffer() below during recovery. + * The only time when buffers full of inodes are fully recovered is when the + * buffer is full of newly allocated inodes. In this case the buffer will + * not be marked as an inode buffer and so will be sent to + * xlog_recover_do_reg_buffer() below during recovery. */ STATIC int xlog_recover_do_inode_buffer( - xfs_mount_t *mp, + struct xfs_mount *mp, xlog_recover_item_t *item, - xfs_buf_t *bp, + struct xfs_buf *bp, xfs_buf_log_format_t *buf_f) { int i; - int item_index; - int bit; - int nbits; - int reg_buf_offset; - int reg_buf_bytes; + int item_index = 0; + int bit = 0; + int nbits = 0; + int reg_buf_offset = 0; + int reg_buf_bytes = 0; int next_unlinked_offset; int inodes_per_buf; xfs_agino_t *logged_nextp; xfs_agino_t *buffer_nextp; - unsigned int *data_map = NULL; - unsigned int map_size = 0; trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f); - switch (buf_f->blf_type) { - case XFS_LI_BUF: - data_map = buf_f->blf_data_map; - map_size = buf_f->blf_map_size; - break; - } - /* - * Set the variables corresponding to the current region to - * 0 so that we'll initialize them on the first pass through - * the loop. - */ - reg_buf_offset = 0; - reg_buf_bytes = 0; - bit = 0; - nbits = 0; - item_index = 0; inodes_per_buf = XFS_BUF_COUNT(bp) >> mp->m_sb.sb_inodelog; for (i = 0; i < inodes_per_buf; i++) { next_unlinked_offset = (i * mp->m_sb.sb_inodesize) + @@ -1852,18 +1804,18 @@ xlog_recover_do_inode_buffer( * the current di_next_unlinked field. */ bit += nbits; - bit = xfs_next_bit(data_map, map_size, bit); + bit = xfs_next_bit(buf_f->blf_data_map, + buf_f->blf_map_size, bit); /* * If there are no more logged regions in the * buffer, then we're done. */ - if (bit == -1) { + if (bit == -1) return 0; - } - nbits = xfs_contig_bits(data_map, map_size, - bit); + nbits = xfs_contig_bits(buf_f->blf_data_map, + buf_f->blf_map_size, bit); ASSERT(nbits > 0); reg_buf_offset = bit << XFS_BLF_SHIFT; reg_buf_bytes = nbits << XFS_BLF_SHIFT; @@ -1875,9 +1827,8 @@ xlog_recover_do_inode_buffer( * di_next_unlinked field, then move on to the next * di_next_unlinked field. */ - if (next_unlinked_offset < reg_buf_offset) { + if (next_unlinked_offset < reg_buf_offset) continue; - } ASSERT(item->ri_buf[item_index].i_addr != NULL); ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0); @@ -1913,36 +1864,29 @@ xlog_recover_do_inode_buffer( * given buffer. The bitmap in the buf log format structure indicates * where to place the logged data. */ -/*ARGSUSED*/ STATIC void xlog_recover_do_reg_buffer( struct xfs_mount *mp, xlog_recover_item_t *item, - xfs_buf_t *bp, + struct xfs_buf *bp, xfs_buf_log_format_t *buf_f) { int i; int bit; int nbits; - unsigned int *data_map = NULL; - unsigned int map_size = 0; int error; trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f); - switch (buf_f->blf_type) { - case XFS_LI_BUF: - data_map = buf_f->blf_data_map; - map_size = buf_f->blf_map_size; - break; - } bit = 0; i = 1; /* 0 is the buf format structure */ while (1) { - bit = xfs_next_bit(data_map, map_size, bit); + bit = xfs_next_bit(buf_f->blf_data_map, + buf_f->blf_map_size, bit); if (bit == -1) break; - nbits = xfs_contig_bits(data_map, map_size, bit); + nbits = xfs_contig_bits(buf_f->blf_data_map, + buf_f->blf_map_size, bit); ASSERT(nbits > 0); ASSERT(item->ri_buf[i].i_addr != NULL); ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0); @@ -2182,13 +2126,9 @@ xlog_recover_do_buffer_trans( int pass) { xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr; - xfs_mount_t *mp; + xfs_mount_t *mp = log->l_mp; xfs_buf_t *bp; int error; - int cancel; - xfs_daddr_t blkno; - int len; - ushort flags; uint buf_flags; if (pass == XLOG_RECOVER_PASS1) { @@ -2206,47 +2146,32 @@ xlog_recover_do_buffer_trans( * we call here will tell us whether or not to * continue with the replay of this buffer. */ - cancel = xlog_recover_do_buffer_pass2(log, buf_f); - if (cancel) { + if (xlog_check_buffer_cancelled(log, buf_f->blf_blkno, + buf_f->blf_len, buf_f->blf_flags)) { trace_xfs_log_recover_buf_cancel(log, buf_f); return 0; } } trace_xfs_log_recover_buf_recover(log, buf_f); - switch (buf_f->blf_type) { - case XFS_LI_BUF: - blkno = buf_f->blf_blkno; - len = buf_f->blf_len; - flags = buf_f->blf_flags; - break; - default: - xfs_fs_cmn_err(CE_ALERT, log->l_mp, - "xfs_log_recover: unknown buffer type 0x%x, logdev %s", - buf_f->blf_type, log->l_mp->m_logname ? - log->l_mp->m_logname : "internal"); - XFS_ERROR_REPORT("xlog_recover_do_buffer_trans", - XFS_ERRLEVEL_LOW, log->l_mp); - return XFS_ERROR(EFSCORRUPTED); - } - mp = log->l_mp; buf_flags = XBF_LOCK; - if (!(flags & XFS_BLF_INODE_BUF)) + if (!(buf_f->blf_flags & XFS_BLF_INODE_BUF)) buf_flags |= XBF_MAPPED; - bp = xfs_buf_read(mp->m_ddev_targp, blkno, len, buf_flags); + bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len, + buf_flags); if (XFS_BUF_ISERROR(bp)) { - xfs_ioerror_alert("xlog_recover_do..(read#1)", log->l_mp, - bp, blkno); + xfs_ioerror_alert("xlog_recover_do..(read#1)", mp, + bp, buf_f->blf_blkno); error = XFS_BUF_GETERROR(bp); xfs_buf_relse(bp); return error; } error = 0; - if (flags & XFS_BLF_INODE_BUF) { + if (buf_f->blf_flags & XFS_BLF_INODE_BUF) { error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f); - } else if (flags & + } else if (buf_f->blf_flags & (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) { xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f); } else { -- cgit v1.2.2 From d5689eaa0ac5588cf459ee32f86d5700dd7d6403 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 1 Dec 2010 22:06:22 +0000 Subject: xfs: use struct list_head for the buf cancel table Signed-off-by: Christoph Hellwig Signed-off-by: Alex Elder --- fs/xfs/xfs_buf_item.h | 11 ---- fs/xfs/xfs_log_priv.h | 6 +- fs/xfs/xfs_log_recover.c | 159 ++++++++++++++++++----------------------------- 3 files changed, 65 insertions(+), 111 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_buf_item.h b/fs/xfs/xfs_buf_item.h index 0e2ed43f16c7..b6ecd2061e7c 100644 --- a/fs/xfs/xfs_buf_item.h +++ b/fs/xfs/xfs_buf_item.h @@ -105,17 +105,6 @@ typedef struct xfs_buf_log_item { xfs_buf_log_format_t bli_format; /* in-log header */ } xfs_buf_log_item_t; -/* - * This structure is used during recovery to record the buf log - * items which have been canceled and should not be replayed. - */ -typedef struct xfs_buf_cancel { - xfs_daddr_t bc_blkno; - uint bc_len; - int bc_refcount; - struct xfs_buf_cancel *bc_next; -} xfs_buf_cancel_t; - void xfs_buf_item_init(struct xfs_buf *, struct xfs_mount *); void xfs_buf_item_relse(struct xfs_buf *); void xfs_buf_item_log(xfs_buf_log_item_t *, uint, uint); diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h index edcdfe01617f..c1ce505313e9 100644 --- a/fs/xfs/xfs_log_priv.h +++ b/fs/xfs/xfs_log_priv.h @@ -21,7 +21,6 @@ struct xfs_buf; struct log; struct xlog_ticket; -struct xfs_buf_cancel; struct xfs_mount; /* @@ -491,7 +490,7 @@ typedef struct log { struct xfs_buftarg *l_targ; /* buftarg of log */ uint l_flags; uint l_quotaoffs_flag; /* XFS_DQ_*, for QUOTAOFFs */ - struct xfs_buf_cancel **l_buf_cancel_table; + struct list_head *l_buf_cancel_table; int l_iclog_hsize; /* size of iclog header */ int l_iclog_heads; /* # of iclog header sectors */ uint l_sectBBsize; /* sector size in BBs (2^n) */ @@ -534,6 +533,9 @@ typedef struct log { } xlog_t; +#define XLOG_BUF_CANCEL_BUCKET(log, blkno) \ + ((log)->l_buf_cancel_table + ((__uint64_t)blkno % XLOG_BC_TABLE_SIZE)) + #define XLOG_FORCED_SHUTDOWN(log) ((log)->l_flags & XLOG_IO_ERROR) /* common routines */ diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index e51d93db1b0b..960afd41315e 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -52,6 +52,17 @@ STATIC void xlog_recover_check_summary(xlog_t *); #define xlog_recover_check_summary(log) #endif +/* + * This structure is used during recovery to record the buf log items which + * have been canceled and should not be replayed. + */ +struct xfs_buf_cancel { + xfs_daddr_t bc_blkno; + uint bc_len; + int bc_refcount; + struct list_head bc_list; +}; + /* * Sector aligned buffer routines for buffer create/read/write/access */ @@ -1607,15 +1618,11 @@ xlog_recover_reorder_trans( */ STATIC void xlog_recover_do_buffer_pass1( - xlog_t *log, + struct log *log, xfs_buf_log_format_t *buf_f) { - xfs_buf_cancel_t *bcp; - xfs_buf_cancel_t *nextp; - xfs_buf_cancel_t *prevp; - xfs_buf_cancel_t **bucket; - xfs_daddr_t blkno = buf_f->blf_blkno; - uint len = buf_f->blf_len; + struct list_head *bucket; + struct xfs_buf_cancel *bcp; /* * If this isn't a cancel buffer item, then just return. @@ -1626,51 +1633,25 @@ xlog_recover_do_buffer_pass1( } /* - * Insert an xfs_buf_cancel record into the hash table of - * them. If there is already an identical record, bump - * its reference count. - */ - bucket = &log->l_buf_cancel_table[(__uint64_t)blkno % - XLOG_BC_TABLE_SIZE]; - /* - * If the hash bucket is empty then just insert a new record into - * the bucket. - */ - if (*bucket == NULL) { - bcp = (xfs_buf_cancel_t *)kmem_alloc(sizeof(xfs_buf_cancel_t), - KM_SLEEP); - bcp->bc_blkno = blkno; - bcp->bc_len = len; - bcp->bc_refcount = 1; - bcp->bc_next = NULL; - *bucket = bcp; - return; - } - - /* - * The hash bucket is not empty, so search for duplicates of our - * record. If we find one them just bump its refcount. If not - * then add us at the end of the list. + * Insert an xfs_buf_cancel record into the hash table of them. + * If there is already an identical record, bump its reference count. */ - prevp = NULL; - nextp = *bucket; - while (nextp != NULL) { - if (nextp->bc_blkno == blkno && nextp->bc_len == len) { - nextp->bc_refcount++; + bucket = XLOG_BUF_CANCEL_BUCKET(log, buf_f->blf_blkno); + list_for_each_entry(bcp, bucket, bc_list) { + if (bcp->bc_blkno == buf_f->blf_blkno && + bcp->bc_len == buf_f->blf_len) { + bcp->bc_refcount++; trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f); return; } - prevp = nextp; - nextp = nextp->bc_next; - } - ASSERT(prevp != NULL); - bcp = (xfs_buf_cancel_t *)kmem_alloc(sizeof(xfs_buf_cancel_t), - KM_SLEEP); - bcp->bc_blkno = blkno; - bcp->bc_len = len; + } + + bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), KM_SLEEP); + bcp->bc_blkno = buf_f->blf_blkno; + bcp->bc_len = buf_f->blf_len; bcp->bc_refcount = 1; - bcp->bc_next = NULL; - prevp->bc_next = bcp; + list_add_tail(&bcp->bc_list, bucket); + trace_xfs_log_recover_buf_cancel_add(log, buf_f); } @@ -1689,14 +1670,13 @@ xlog_recover_do_buffer_pass1( */ STATIC int xlog_check_buffer_cancelled( - xlog_t *log, + struct log *log, xfs_daddr_t blkno, uint len, ushort flags) { - xfs_buf_cancel_t *bcp; - xfs_buf_cancel_t *prevp; - xfs_buf_cancel_t **bucket; + struct list_head *bucket; + struct xfs_buf_cancel *bcp; if (log->l_buf_cancel_table == NULL) { /* @@ -1707,55 +1687,36 @@ xlog_check_buffer_cancelled( return 0; } - bucket = &log->l_buf_cancel_table[(__uint64_t)blkno % - XLOG_BC_TABLE_SIZE]; - bcp = *bucket; - if (bcp == NULL) { - /* - * There is no corresponding entry in the table built - * in pass one, so this buffer has not been cancelled. - */ - ASSERT(!(flags & XFS_BLF_CANCEL)); - return 0; - } - /* - * Search for an entry in the buffer cancel table that - * matches our buffer. + * Search for an entry in the cancel table that matches our buffer. */ - prevp = NULL; - while (bcp != NULL) { - if (bcp->bc_blkno == blkno && bcp->bc_len == len) { - /* - * We've go a match, so return 1 so that the - * recovery of this buffer is cancelled. - * If this buffer is actually a buffer cancel - * log item, then decrement the refcount on the - * one in the table and remove it if this is the - * last reference. - */ - if (flags & XFS_BLF_CANCEL) { - bcp->bc_refcount--; - if (bcp->bc_refcount == 0) { - if (prevp == NULL) { - *bucket = bcp->bc_next; - } else { - prevp->bc_next = bcp->bc_next; - } - kmem_free(bcp); - } - } - return 1; - } - prevp = bcp; - bcp = bcp->bc_next; + bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno); + list_for_each_entry(bcp, bucket, bc_list) { + if (bcp->bc_blkno == blkno && bcp->bc_len == len) + goto found; } + /* - * We didn't find a corresponding entry in the table, so - * return 0 so that the buffer is NOT cancelled. + * We didn't find a corresponding entry in the table, so return 0 so + * that the buffer is NOT cancelled. */ ASSERT(!(flags & XFS_BLF_CANCEL)); return 0; + +found: + /* + * We've go a match, so return 1 so that the recovery of this buffer + * is cancelled. If this buffer is actually a buffer cancel log + * item, then decrement the refcount on the one in the table and + * remove it if this is the last reference. + */ + if (flags & XFS_BLF_CANCEL) { + if (--bcp->bc_refcount == 0) { + list_del(&bcp->bc_list); + kmem_free(bcp); + } + } + return 1; } /* @@ -3649,7 +3610,7 @@ xlog_do_log_recovery( xfs_daddr_t head_blk, xfs_daddr_t tail_blk) { - int error; + int error, i; ASSERT(head_blk != tail_blk); @@ -3657,10 +3618,12 @@ xlog_do_log_recovery( * First do a pass to find all of the cancelled buf log items. * Store them in the buf_cancel_table for use in the second pass. */ - log->l_buf_cancel_table = - (xfs_buf_cancel_t **)kmem_zalloc(XLOG_BC_TABLE_SIZE * - sizeof(xfs_buf_cancel_t*), + log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE * + sizeof(struct list_head), KM_SLEEP); + for (i = 0; i < XLOG_BC_TABLE_SIZE; i++) + INIT_LIST_HEAD(&log->l_buf_cancel_table[i]); + error = xlog_do_recovery_pass(log, head_blk, tail_blk, XLOG_RECOVER_PASS1); if (error != 0) { @@ -3679,7 +3642,7 @@ xlog_do_log_recovery( int i; for (i = 0; i < XLOG_BC_TABLE_SIZE; i++) - ASSERT(log->l_buf_cancel_table[i] == NULL); + ASSERT(list_empty(&log->l_buf_cancel_table[i])); } #endif /* DEBUG */ -- cgit v1.2.2 From d0450948641b2090b5d467ba638bbebd40b20b21 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 1 Dec 2010 22:06:23 +0000 Subject: xfs: refactor xlog_recover_commit_trans Merge the call to xlog_recover_reorder_trans and the loop over the recovery items from xlog_recover_do_trans into xlog_recover_commit_trans, and keep the switch statement over the log item types as a separate helper. Signed-off-by: Christoph Hellwig Signed-off-by: Alex Elder --- fs/xfs/xfs_log_recover.c | 117 +++++++++++++++++++++-------------------------- 1 file changed, 53 insertions(+), 64 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 960afd41315e..26e18052a648 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -2673,64 +2673,6 @@ xlog_recover_do_efd_trans( spin_unlock(&ailp->xa_lock); } -/* - * Perform the transaction - * - * If the transaction modifies a buffer or inode, do it now. Otherwise, - * EFIs and EFDs get queued up by adding entries into the AIL for them. - */ -STATIC int -xlog_recover_do_trans( - xlog_t *log, - xlog_recover_t *trans, - int pass) -{ - int error = 0; - xlog_recover_item_t *item; - - error = xlog_recover_reorder_trans(log, trans, pass); - if (error) - return error; - - list_for_each_entry(item, &trans->r_itemq, ri_list) { - trace_xfs_log_recover_item_recover(log, trans, item, pass); - switch (ITEM_TYPE(item)) { - case XFS_LI_BUF: - error = xlog_recover_do_buffer_trans(log, item, pass); - break; - case XFS_LI_INODE: - error = xlog_recover_do_inode_trans(log, item, pass); - break; - case XFS_LI_EFI: - error = xlog_recover_do_efi_trans(log, item, - trans->r_lsn, pass); - break; - case XFS_LI_EFD: - xlog_recover_do_efd_trans(log, item, pass); - error = 0; - break; - case XFS_LI_DQUOT: - error = xlog_recover_do_dquot_trans(log, item, pass); - break; - case XFS_LI_QUOTAOFF: - error = xlog_recover_do_quotaoff_trans(log, item, - pass); - break; - default: - xlog_warn( - "XFS: invalid item type (%d) xlog_recover_do_trans", ITEM_TYPE(item)); - ASSERT(0); - error = XFS_ERROR(EIO); - break; - } - - if (error) - return error; - } - - return 0; -} - /* * Free up any resources allocated by the transaction * @@ -2738,7 +2680,7 @@ xlog_recover_do_trans( */ STATIC void xlog_recover_free_trans( - xlog_recover_t *trans) + struct xlog_recover *trans) { xlog_recover_item_t *item, *n; int i; @@ -2756,18 +2698,65 @@ xlog_recover_free_trans( kmem_free(trans); } +STATIC int +xlog_recover_commit_item( + struct log *log, + struct xlog_recover *trans, + xlog_recover_item_t *item, + int pass) +{ + trace_xfs_log_recover_item_recover(log, trans, item, pass); + + switch (ITEM_TYPE(item)) { + case XFS_LI_BUF: + return xlog_recover_do_buffer_trans(log, item, pass); + case XFS_LI_INODE: + return xlog_recover_do_inode_trans(log, item, pass); + case XFS_LI_EFI: + return xlog_recover_do_efi_trans(log, item, trans->r_lsn, pass); + case XFS_LI_EFD: + xlog_recover_do_efd_trans(log, item, pass); + return 0; + case XFS_LI_DQUOT: + return xlog_recover_do_dquot_trans(log, item, pass); + case XFS_LI_QUOTAOFF: + return xlog_recover_do_quotaoff_trans(log, item, pass); + default: + xlog_warn( + "XFS: invalid item type (%d) xlog_recover_do_trans", ITEM_TYPE(item)); + ASSERT(0); + return XFS_ERROR(EIO); + } +} + +/* + * Perform the transaction. + * + * If the transaction modifies a buffer or inode, do it now. Otherwise, + * EFIs and EFDs get queued up by adding entries into the AIL for them. + */ STATIC int xlog_recover_commit_trans( - xlog_t *log, - xlog_recover_t *trans, + struct log *log, + struct xlog_recover *trans, int pass) { - int error; + int error = 0; + xlog_recover_item_t *item; hlist_del(&trans->r_list); - if ((error = xlog_recover_do_trans(log, trans, pass))) + + error = xlog_recover_reorder_trans(log, trans, pass); + if (error) return error; - xlog_recover_free_trans(trans); /* no error */ + + list_for_each_entry(item, &trans->r_itemq, ri_list) { + error = xlog_recover_commit_item(log, trans, item, pass); + if (error) + return error; + } + + xlog_recover_free_trans(trans); return 0; } -- cgit v1.2.2 From c9f71f5fc4390ea3a8087c00d53a799e7e0f0f8e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 1 Dec 2010 22:06:24 +0000 Subject: xfs: untangle phase1 vs phase2 recovery helpers Dispatch to a different helper for phase1 vs phase2 in xlog_recover_commit_trans instead of doing it in all the low-level functions. Signed-off-by: Christoph Hellwig Signed-off-by: Alex Elder --- fs/xfs/xfs_log_recover.c | 194 +++++++++++++++++++++++------------------------ 1 file changed, 93 insertions(+), 101 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 26e18052a648..4ab4f6ff48aa 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -1616,11 +1616,12 @@ xlog_recover_reorder_trans( * record in the table to tell us how many times we expect to see this * record during the second pass. */ -STATIC void -xlog_recover_do_buffer_pass1( +STATIC int +xlog_recover_buffer_pass1( struct log *log, - xfs_buf_log_format_t *buf_f) + xlog_recover_item_t *item) { + xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr; struct list_head *bucket; struct xfs_buf_cancel *bcp; @@ -1629,7 +1630,7 @@ xlog_recover_do_buffer_pass1( */ if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) { trace_xfs_log_recover_buf_not_cancel(log, buf_f); - return; + return 0; } /* @@ -1642,7 +1643,7 @@ xlog_recover_do_buffer_pass1( bcp->bc_len == buf_f->blf_len) { bcp->bc_refcount++; trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f); - return; + return 0; } } @@ -1653,6 +1654,7 @@ xlog_recover_do_buffer_pass1( list_add_tail(&bcp->bc_list, bucket); trace_xfs_log_recover_buf_cancel_add(log, buf_f); + return 0; } /* @@ -2081,10 +2083,9 @@ xlog_recover_do_dquot_buffer( * for more details on the implementation of the table of cancel records. */ STATIC int -xlog_recover_do_buffer_trans( +xlog_recover_buffer_pass2( xlog_t *log, - xlog_recover_item_t *item, - int pass) + xlog_recover_item_t *item) { xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr; xfs_mount_t *mp = log->l_mp; @@ -2092,27 +2093,16 @@ xlog_recover_do_buffer_trans( int error; uint buf_flags; - if (pass == XLOG_RECOVER_PASS1) { - /* - * In this pass we're only looking for buf items - * with the XFS_BLF_CANCEL bit set. - */ - xlog_recover_do_buffer_pass1(log, buf_f); + /* + * In this pass we only want to recover all the buffers which have + * not been cancelled and are not cancellation buffers themselves. + */ + if (xlog_check_buffer_cancelled(log, buf_f->blf_blkno, + buf_f->blf_len, buf_f->blf_flags)) { + trace_xfs_log_recover_buf_cancel(log, buf_f); return 0; - } else { - /* - * In this pass we want to recover all the buffers - * which have not been cancelled and are not - * cancellation buffers themselves. The routine - * we call here will tell us whether or not to - * continue with the replay of this buffer. - */ - if (xlog_check_buffer_cancelled(log, buf_f->blf_blkno, - buf_f->blf_len, buf_f->blf_flags)) { - trace_xfs_log_recover_buf_cancel(log, buf_f); - return 0; - } } + trace_xfs_log_recover_buf_recover(log, buf_f); buf_flags = XBF_LOCK; @@ -2172,16 +2162,14 @@ xlog_recover_do_buffer_trans( } STATIC int -xlog_recover_do_inode_trans( +xlog_recover_inode_pass2( xlog_t *log, - xlog_recover_item_t *item, - int pass) + xlog_recover_item_t *item) { xfs_inode_log_format_t *in_f; - xfs_mount_t *mp; + xfs_mount_t *mp = log->l_mp; xfs_buf_t *bp; xfs_dinode_t *dip; - xfs_ino_t ino; int len; xfs_caddr_t src; xfs_caddr_t dest; @@ -2191,10 +2179,6 @@ xlog_recover_do_inode_trans( xfs_icdinode_t *dicp; int need_free = 0; - if (pass == XLOG_RECOVER_PASS1) { - return 0; - } - if (item->ri_buf[0].i_len == sizeof(xfs_inode_log_format_t)) { in_f = item->ri_buf[0].i_addr; } else { @@ -2204,8 +2188,6 @@ xlog_recover_do_inode_trans( if (error) goto error; } - ino = in_f->ilf_ino; - mp = log->l_mp; /* * Inode buffers can be freed, look out for it, @@ -2240,8 +2222,8 @@ xlog_recover_do_inode_trans( xfs_buf_relse(bp); xfs_fs_cmn_err(CE_ALERT, mp, "xfs_inode_recover: Bad inode magic number, dino ptr = 0x%p, dino bp = 0x%p, ino = %Ld", - dip, bp, ino); - XFS_ERROR_REPORT("xlog_recover_do_inode_trans(1)", + dip, bp, in_f->ilf_ino); + XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)", XFS_ERRLEVEL_LOW, mp); error = EFSCORRUPTED; goto error; @@ -2251,8 +2233,8 @@ xlog_recover_do_inode_trans( xfs_buf_relse(bp); xfs_fs_cmn_err(CE_ALERT, mp, "xfs_inode_recover: Bad inode log record, rec ptr 0x%p, ino %Ld", - item, ino); - XFS_ERROR_REPORT("xlog_recover_do_inode_trans(2)", + item, in_f->ilf_ino); + XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)", XFS_ERRLEVEL_LOW, mp); error = EFSCORRUPTED; goto error; @@ -2280,12 +2262,12 @@ xlog_recover_do_inode_trans( if (unlikely((dicp->di_mode & S_IFMT) == S_IFREG)) { if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) && (dicp->di_format != XFS_DINODE_FMT_BTREE)) { - XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(3)", + XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)", XFS_ERRLEVEL_LOW, mp, dicp); xfs_buf_relse(bp); xfs_fs_cmn_err(CE_ALERT, mp, "xfs_inode_recover: Bad regular inode log record, rec ptr 0x%p, ino ptr = 0x%p, ino bp = 0x%p, ino %Ld", - item, dip, bp, ino); + item, dip, bp, in_f->ilf_ino); error = EFSCORRUPTED; goto error; } @@ -2293,40 +2275,40 @@ xlog_recover_do_inode_trans( if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) && (dicp->di_format != XFS_DINODE_FMT_BTREE) && (dicp->di_format != XFS_DINODE_FMT_LOCAL)) { - XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(4)", + XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)", XFS_ERRLEVEL_LOW, mp, dicp); xfs_buf_relse(bp); xfs_fs_cmn_err(CE_ALERT, mp, "xfs_inode_recover: Bad dir inode log record, rec ptr 0x%p, ino ptr = 0x%p, ino bp = 0x%p, ino %Ld", - item, dip, bp, ino); + item, dip, bp, in_f->ilf_ino); error = EFSCORRUPTED; goto error; } } if (unlikely(dicp->di_nextents + dicp->di_anextents > dicp->di_nblocks)){ - XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(5)", + XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)", XFS_ERRLEVEL_LOW, mp, dicp); xfs_buf_relse(bp); xfs_fs_cmn_err(CE_ALERT, mp, "xfs_inode_recover: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld", - item, dip, bp, ino, + item, dip, bp, in_f->ilf_ino, dicp->di_nextents + dicp->di_anextents, dicp->di_nblocks); error = EFSCORRUPTED; goto error; } if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) { - XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(6)", + XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)", XFS_ERRLEVEL_LOW, mp, dicp); xfs_buf_relse(bp); xfs_fs_cmn_err(CE_ALERT, mp, "xfs_inode_recover: Bad inode log rec ptr 0x%p, dino ptr 0x%p, dino bp 0x%p, ino %Ld, forkoff 0x%x", - item, dip, bp, ino, dicp->di_forkoff); + item, dip, bp, in_f->ilf_ino, dicp->di_forkoff); error = EFSCORRUPTED; goto error; } if (unlikely(item->ri_buf[1].i_len > sizeof(struct xfs_icdinode))) { - XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(7)", + XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)", XFS_ERRLEVEL_LOW, mp, dicp); xfs_buf_relse(bp); xfs_fs_cmn_err(CE_ALERT, mp, @@ -2418,7 +2400,7 @@ xlog_recover_do_inode_trans( break; default: - xlog_warn("XFS: xlog_recover_do_inode_trans: Invalid flag"); + xlog_warn("XFS: xlog_recover_inode_pass2: Invalid flag"); ASSERT(0); xfs_buf_relse(bp); error = EIO; @@ -2442,18 +2424,11 @@ error: * of that type. */ STATIC int -xlog_recover_do_quotaoff_trans( +xlog_recover_quotaoff_pass1( xlog_t *log, - xlog_recover_item_t *item, - int pass) + xlog_recover_item_t *item) { - xfs_qoff_logformat_t *qoff_f; - - if (pass == XLOG_RECOVER_PASS2) { - return (0); - } - - qoff_f = item->ri_buf[0].i_addr; + xfs_qoff_logformat_t *qoff_f = item->ri_buf[0].i_addr; ASSERT(qoff_f); /* @@ -2474,22 +2449,17 @@ xlog_recover_do_quotaoff_trans( * Recover a dquot record */ STATIC int -xlog_recover_do_dquot_trans( +xlog_recover_dquot_pass2( xlog_t *log, - xlog_recover_item_t *item, - int pass) + xlog_recover_item_t *item) { - xfs_mount_t *mp; + xfs_mount_t *mp = log->l_mp; xfs_buf_t *bp; struct xfs_disk_dquot *ddq, *recddq; int error; xfs_dq_logformat_t *dq_f; uint type; - if (pass == XLOG_RECOVER_PASS1) { - return 0; - } - mp = log->l_mp; /* * Filesystems are required to send in quota flags at mount time. @@ -2533,7 +2503,7 @@ xlog_recover_do_dquot_trans( if ((error = xfs_qm_dqcheck(recddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN, - "xlog_recover_do_dquot_trans (log copy)"))) { + "xlog_recover_dquot_pass2 (log copy)"))) { return XFS_ERROR(EIO); } ASSERT(dq_f->qlf_len == 1); @@ -2556,7 +2526,7 @@ xlog_recover_do_dquot_trans( * minimal initialization then. */ if (xfs_qm_dqcheck(ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN, - "xlog_recover_do_dquot_trans")) { + "xlog_recover_dquot_pass2")) { xfs_buf_relse(bp); return XFS_ERROR(EIO); } @@ -2579,24 +2549,18 @@ xlog_recover_do_dquot_trans( * LSN. */ STATIC int -xlog_recover_do_efi_trans( +xlog_recover_efi_pass2( xlog_t *log, xlog_recover_item_t *item, - xfs_lsn_t lsn, - int pass) + xfs_lsn_t lsn) { int error; - xfs_mount_t *mp; + xfs_mount_t *mp = log->l_mp; xfs_efi_log_item_t *efip; xfs_efi_log_format_t *efi_formatp; - if (pass == XLOG_RECOVER_PASS1) { - return 0; - } - efi_formatp = item->ri_buf[0].i_addr; - mp = log->l_mp; efip = xfs_efi_init(mp, efi_formatp->efi_nextents); if ((error = xfs_efi_copy_format(&(item->ri_buf[0]), &(efip->efi_format)))) { @@ -2623,11 +2587,10 @@ xlog_recover_do_efi_trans( * efd format structure. If we find it, we remove the efi from the * AIL and free it. */ -STATIC void -xlog_recover_do_efd_trans( +STATIC int +xlog_recover_efd_pass2( xlog_t *log, - xlog_recover_item_t *item, - int pass) + xlog_recover_item_t *item) { xfs_efd_log_format_t *efd_formatp; xfs_efi_log_item_t *efip = NULL; @@ -2636,10 +2599,6 @@ xlog_recover_do_efd_trans( struct xfs_ail_cursor cur; struct xfs_ail *ailp = log->l_ailp; - if (pass == XLOG_RECOVER_PASS1) { - return; - } - efd_formatp = item->ri_buf[0].i_addr; ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) + ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) || @@ -2671,6 +2630,8 @@ xlog_recover_do_efd_trans( } xfs_trans_ail_cursor_done(ailp, &cur); spin_unlock(&ailp->xa_lock); + + return 0; } /* @@ -2699,31 +2660,59 @@ xlog_recover_free_trans( } STATIC int -xlog_recover_commit_item( +xlog_recover_commit_pass1( struct log *log, struct xlog_recover *trans, - xlog_recover_item_t *item, - int pass) + xlog_recover_item_t *item) { - trace_xfs_log_recover_item_recover(log, trans, item, pass); + trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1); switch (ITEM_TYPE(item)) { case XFS_LI_BUF: - return xlog_recover_do_buffer_trans(log, item, pass); + return xlog_recover_buffer_pass1(log, item); + case XFS_LI_QUOTAOFF: + return xlog_recover_quotaoff_pass1(log, item); case XFS_LI_INODE: - return xlog_recover_do_inode_trans(log, item, pass); case XFS_LI_EFI: - return xlog_recover_do_efi_trans(log, item, trans->r_lsn, pass); case XFS_LI_EFD: - xlog_recover_do_efd_trans(log, item, pass); + case XFS_LI_DQUOT: + /* nothing to do in pass 1 */ return 0; + default: + xlog_warn( + "XFS: invalid item type (%d) xlog_recover_commit_pass1", + ITEM_TYPE(item)); + ASSERT(0); + return XFS_ERROR(EIO); + } +} + +STATIC int +xlog_recover_commit_pass2( + struct log *log, + struct xlog_recover *trans, + xlog_recover_item_t *item) +{ + trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2); + + switch (ITEM_TYPE(item)) { + case XFS_LI_BUF: + return xlog_recover_buffer_pass2(log, item); + case XFS_LI_INODE: + return xlog_recover_inode_pass2(log, item); + case XFS_LI_EFI: + return xlog_recover_efi_pass2(log, item, trans->r_lsn); + case XFS_LI_EFD: + return xlog_recover_efd_pass2(log, item); case XFS_LI_DQUOT: - return xlog_recover_do_dquot_trans(log, item, pass); + return xlog_recover_dquot_pass2(log, item); case XFS_LI_QUOTAOFF: - return xlog_recover_do_quotaoff_trans(log, item, pass); + /* nothing to do in pass2 */ + return 0; default: xlog_warn( - "XFS: invalid item type (%d) xlog_recover_do_trans", ITEM_TYPE(item)); + "XFS: invalid item type (%d) xlog_recover_commit_pass2", + ITEM_TYPE(item)); ASSERT(0); return XFS_ERROR(EIO); } @@ -2751,7 +2740,10 @@ xlog_recover_commit_trans( return error; list_for_each_entry(item, &trans->r_itemq, ri_list) { - error = xlog_recover_commit_item(log, trans, item, pass); + if (pass == XLOG_RECOVER_PASS1) + error = xlog_recover_commit_pass1(log, trans, item); + else + error = xlog_recover_commit_pass2(log, trans, item); if (error) return error; } -- cgit v1.2.2 From 85da94c6b4666582c38579ccdcd90a5d9b5697ef Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 10 Dec 2010 08:42:16 +0000 Subject: xfs: improve mapping type check in xfs_vm_writepage Currently we only refuse a "read-only" mapping for writing out unwritten and delayed buffers, and refuse any other for overwrites. Improve the checks to require delalloc mappings for delayed buffers, and unwritten extent mappings for unwritten extents. Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Alex Elder --- fs/xfs/linux-2.6/xfs_aops.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index 691f61223ed6..23a7668e07da 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c @@ -1082,17 +1082,17 @@ xfs_vm_writepage( if (buffer_unwritten(bh) || buffer_delay(bh)) { int new_ioend = 0; - /* - * Make sure we don't use a read-only iomap - */ - if (flags == BMAPI_READ) - imap_valid = 0; - if (buffer_unwritten(bh)) { - type = IO_UNWRITTEN; + if (type != IO_UNWRITTEN) { + type = IO_UNWRITTEN; + imap_valid = 0; + } flags = BMAPI_WRITE | BMAPI_IGNSTATE; } else if (buffer_delay(bh)) { - type = IO_DELAY; + if (type != IO_DELAY) { + type = IO_DELAY; + imap_valid = 0; + } flags = BMAPI_ALLOCATE; if (wbc->sync_mode == WB_SYNC_NONE) @@ -1128,8 +1128,11 @@ xfs_vm_writepage( * That means it must already have extents allocated * underneath it. Map the extent by reading it. */ - if (!imap_valid || flags != BMAPI_READ) { + if (flags != BMAPI_READ) { flags = BMAPI_READ; + imap_valid = 0; + } + if (!imap_valid) { size = xfs_probe_cluster(inode, page, bh, head); err = xfs_map_blocks(inode, offset, size, &imap, flags); -- cgit v1.2.2 From 221cb2517e8fc9a1d67c7a8a9c19fc5a916b583f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 10 Dec 2010 08:42:17 +0000 Subject: xfs: remove some dead bio handling code We'll never have BIO_EOPNOTSUPP set after calling submit_bio as this can only happen for discards, and used to happen for barriers, none of which is every submitted by xfs_submit_ioend_bio. Also remove the loop around bio_alloc as it will never fail due to it's mempool backing. Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Alex Elder --- fs/xfs/linux-2.6/xfs_aops.c | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index 23a7668e07da..ca67ae92c238 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c @@ -380,26 +380,18 @@ xfs_submit_ioend_bio( submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC_PLUG : WRITE, bio); - ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP)); - bio_put(bio); } STATIC struct bio * xfs_alloc_ioend_bio( struct buffer_head *bh) { - struct bio *bio; int nvecs = bio_get_nr_vecs(bh->b_bdev); - - do { - bio = bio_alloc(GFP_NOIO, nvecs); - nvecs >>= 1; - } while (!bio); + struct bio *bio = bio_alloc(GFP_NOIO, nvecs); ASSERT(bio->bi_private == NULL); bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9); bio->bi_bdev = bh->b_bdev; - bio_get(bio); return bio; } @@ -470,9 +462,8 @@ xfs_submit_ioend( /* Pass 1 - start writeback */ do { next = ioend->io_list; - for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) { + for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) xfs_start_buffer_writeback(bh); - } } while ((ioend = next) != NULL); /* Pass 2 - submit I/O */ -- cgit v1.2.2 From 6ac7248ec5f20cb44a063d7c7191b8e0068b5a28 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 10 Dec 2010 08:42:18 +0000 Subject: xfs: a few small tweaks for overwrites in xfs_vm_writepage Don't trylock the buffer. We are the only one ever locking it for a regular file address space, and trylock was only copied from the generic code which did it due to the old buffer based writeout in jbd. Also make sure to only write out the buffer if the iomap actually is valid, because we wouldn't have a proper mapping otherwise. In practice we will never get an invalid mapping here as the page lock guarantees truncate doesn't race with us, but better be safe than sorry. Also make sure we allocate a new ioend when crossing boundaries between mappings, just like we do for delalloc and unwritten extents. Again this currently doesn't matter as the I/O end handler only cares for the boundaries for unwritten extents, but this makes the code fully correct and the same as for delalloc/unwritten extents. Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Alex Elder --- fs/xfs/linux-2.6/xfs_aops.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index ca67ae92c238..1ace78bfbea7 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c @@ -1051,6 +1051,8 @@ xfs_vm_writepage( type = IO_NEW; do { + int new_ioend = 0; + if (offset >= end_offset) break; if (!buffer_uptodate(bh)) @@ -1071,8 +1073,6 @@ xfs_vm_writepage( imap_valid = xfs_imap_valid(inode, &imap, offset); if (buffer_unwritten(bh) || buffer_delay(bh)) { - int new_ioend = 0; - if (buffer_unwritten(bh)) { if (type != IO_UNWRITTEN) { type = IO_UNWRITTEN; @@ -1124,6 +1124,7 @@ xfs_vm_writepage( imap_valid = 0; } if (!imap_valid) { + new_ioend = 1; size = xfs_probe_cluster(inode, page, bh, head); err = xfs_map_blocks(inode, offset, size, &imap, flags); @@ -1142,14 +1143,12 @@ xfs_vm_writepage( * that we are writing into for the first time. */ type = IO_NEW; - if (trylock_buffer(bh)) { - if (imap_valid) - all_bh = 1; + if (imap_valid) { + all_bh = 1; + lock_buffer(bh); xfs_add_to_ioend(inode, bh, offset, type, - &ioend, !imap_valid); + &ioend, new_ioend); count++; - } else { - imap_valid = 0; } } else if (PageUptodate(page)) { ASSERT(buffer_mapped(bh)); -- cgit v1.2.2 From 405f80429436d38ab4e6b4c0d99861a1f00648fd Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 10 Dec 2010 08:42:19 +0000 Subject: xfs: cleanup the xfs_iomap_write_* helpers Remove passing the BMAPI_* flags to these helpers, in xfs_iomap_write_direct the check BMAPI_DIRECT was always true, and in the xfs_iomap_write_delay path is was never checked at all. Remove the nmap return value as we never make use of it. Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Alex Elder --- fs/xfs/xfs_iomap.c | 45 +++++++++++++++------------------------------ 1 file changed, 15 insertions(+), 30 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index 20576146369f..991291068378 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c @@ -51,11 +51,11 @@ #define XFS_WRITE_IMAPS XFS_BMAP_MAX_NMAP STATIC int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t, - int, struct xfs_bmbt_irec *, int *); -STATIC int xfs_iomap_write_delay(struct xfs_inode *, xfs_off_t, size_t, int, - struct xfs_bmbt_irec *, int *); + struct xfs_bmbt_irec *, int); +STATIC int xfs_iomap_write_delay(struct xfs_inode *, xfs_off_t, size_t, + struct xfs_bmbt_irec *); STATIC int xfs_iomap_write_allocate(struct xfs_inode *, xfs_off_t, size_t, - struct xfs_bmbt_irec *, int *); + struct xfs_bmbt_irec *); int xfs_iomap( @@ -134,12 +134,12 @@ xfs_iomap( } if (flags & BMAPI_DIRECT) { - error = xfs_iomap_write_direct(ip, offset, count, flags, - imap, nimaps); + error = xfs_iomap_write_direct(ip, offset, count, imap, + *nimaps); } else { - error = xfs_iomap_write_delay(ip, offset, count, flags, - imap, nimaps); + error = xfs_iomap_write_delay(ip, offset, count, imap); } + if (!error) { trace_xfs_iomap_alloc(ip, offset, count, flags, imap); } @@ -155,13 +155,10 @@ xfs_iomap( break; } - error = xfs_iomap_write_allocate(ip, offset, count, - imap, nimaps); + error = xfs_iomap_write_allocate(ip, offset, count, imap); break; } - ASSERT(*nimaps <= 1); - out: if (lockmode) xfs_iunlock(ip, lockmode); @@ -241,9 +238,8 @@ xfs_iomap_write_direct( xfs_inode_t *ip, xfs_off_t offset, size_t count, - int flags, xfs_bmbt_irec_t *imap, - int *nmaps) + int nmaps) { xfs_mount_t *mp = ip->i_mount; xfs_fileoff_t offset_fsb; @@ -279,7 +275,7 @@ xfs_iomap_write_direct( if (error) goto error_out; } else { - if (*nmaps && (imap->br_startblock == HOLESTARTBLOCK)) + if (nmaps && (imap->br_startblock == HOLESTARTBLOCK)) last_fsb = MIN(last_fsb, (xfs_fileoff_t) imap->br_blockcount + imap->br_startoff); @@ -331,7 +327,7 @@ xfs_iomap_write_direct( xfs_trans_ijoin(tp, ip); bmapi_flag = XFS_BMAPI_WRITE; - if ((flags & BMAPI_DIRECT) && (offset < ip->i_size || extsz)) + if (offset < ip->i_size || extsz) bmapi_flag |= XFS_BMAPI_PREALLOC; /* @@ -370,7 +366,6 @@ xfs_iomap_write_direct( goto error_out; } - *nmaps = 1; return 0; error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */ @@ -379,7 +374,6 @@ error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */ error1: /* Just cancel transaction */ xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); - *nmaps = 0; /* nothing set-up here */ error_out: return XFS_ERROR(error); @@ -396,7 +390,6 @@ xfs_iomap_eof_want_preallocate( xfs_inode_t *ip, xfs_off_t offset, size_t count, - int ioflag, xfs_bmbt_irec_t *imap, int nimaps, int *prealloc) @@ -440,9 +433,7 @@ xfs_iomap_write_delay( xfs_inode_t *ip, xfs_off_t offset, size_t count, - int ioflag, - xfs_bmbt_irec_t *ret_imap, - int *nmaps) + xfs_bmbt_irec_t *ret_imap) { xfs_mount_t *mp = ip->i_mount; xfs_fileoff_t offset_fsb; @@ -470,7 +461,7 @@ xfs_iomap_write_delay( offset_fsb = XFS_B_TO_FSBT(mp, offset); error = xfs_iomap_eof_want_preallocate(mp, ip, offset, count, - ioflag, imap, XFS_WRITE_IMAPS, &prealloc); + imap, XFS_WRITE_IMAPS, &prealloc); if (error) return error; @@ -523,8 +514,6 @@ retry: return xfs_cmn_err_fsblock_zero(ip, &imap[0]); *ret_imap = imap[0]; - *nmaps = 1; - return 0; } @@ -543,8 +532,7 @@ xfs_iomap_write_allocate( xfs_inode_t *ip, xfs_off_t offset, size_t count, - xfs_bmbt_irec_t *imap, - int *retmap) + xfs_bmbt_irec_t *imap) { xfs_mount_t *mp = ip->i_mount; xfs_fileoff_t offset_fsb, last_block; @@ -557,8 +545,6 @@ xfs_iomap_write_allocate( int error = 0; int nres; - *retmap = 0; - /* * Make sure that the dquots are there. */ @@ -680,7 +666,6 @@ xfs_iomap_write_allocate( if ((offset_fsb >= imap->br_startoff) && (offset_fsb < (imap->br_startoff + imap->br_blockcount))) { - *retmap = 1; XFS_STATS_INC(xs_xstrat_quick); return 0; } -- cgit v1.2.2 From a206c817c864583c44e2f418db8e6c7a000fbc38 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 10 Dec 2010 08:42:20 +0000 Subject: xfs: kill xfs_iomap Opencode the xfs_iomap code in it's two callers. The overlap of passed flags already was minimal and will be further reduced in the next patch. As a side effect the BMAPI_* flags for xfs_bmapi and the IO_* flags for I/O end processing are merged into a single set of flags, which should be a bit more descriptive of the operation we perform. Also improve the tracing by giving each caller it's own type set of tracepoints. Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Alex Elder --- fs/xfs/linux-2.6/xfs_aops.c | 211 +++++++++++++++++++++++++++++++------------ fs/xfs/linux-2.6/xfs_aops.h | 16 ++++ fs/xfs/linux-2.6/xfs_trace.h | 28 +++--- fs/xfs/xfs_iomap.c | 122 +------------------------ fs/xfs/xfs_iomap.h | 27 ++---- 5 files changed, 191 insertions(+), 213 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index 1ace78bfbea7..365040f61d89 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c @@ -38,15 +38,6 @@ #include #include -/* - * Types of I/O for bmap clustering and I/O completion tracking. - */ -enum { - IO_READ, /* mapping for a read */ - IO_DELAY, /* mapping covers delalloc region */ - IO_UNWRITTEN, /* mapping covers allocated but uninitialized data */ - IO_NEW /* just allocated */ -}; /* * Prime number of hash buckets since address is used as the key. @@ -182,9 +173,6 @@ xfs_setfilesize( xfs_inode_t *ip = XFS_I(ioend->io_inode); xfs_fsize_t isize; - ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG); - ASSERT(ioend->io_type != IO_READ); - if (unlikely(ioend->io_error)) return 0; @@ -244,10 +232,8 @@ xfs_end_io( * We might have to update the on-disk file size after extending * writes. */ - if (ioend->io_type != IO_READ) { - error = xfs_setfilesize(ioend); - ASSERT(!error || error == EAGAIN); - } + error = xfs_setfilesize(ioend); + ASSERT(!error || error == EAGAIN); /* * If we didn't complete processing of the ioend, requeue it to the @@ -320,12 +306,88 @@ xfs_map_blocks( loff_t offset, ssize_t count, struct xfs_bmbt_irec *imap, - int flags) + int type, + int nonblocking) { - int nmaps = 1; - int new = 0; + struct xfs_inode *ip = XFS_I(inode); + struct xfs_mount *mp = ip->i_mount; + xfs_fileoff_t offset_fsb, end_fsb; + int error = 0; + int lockmode = 0; + int bmapi_flags = XFS_BMAPI_ENTIRE; + int nimaps = 1; + + if (XFS_FORCED_SHUTDOWN(mp)) + return -XFS_ERROR(EIO); + + switch (type) { + case IO_OVERWRITE: + lockmode = xfs_ilock_map_shared(ip); + break; + case IO_UNWRITTEN: + lockmode = XFS_ILOCK_EXCL; + bmapi_flags |= XFS_BMAPI_IGSTATE; + xfs_ilock(ip, lockmode); + break; + case IO_DELALLOC: + lockmode = XFS_ILOCK_SHARED; + + if (!xfs_ilock_nowait(ip, lockmode)) { + if (nonblocking) + return -XFS_ERROR(EAGAIN); + xfs_ilock(ip, lockmode); + } + break; + } + + ASSERT(offset <= mp->m_maxioffset); + if (offset + count > mp->m_maxioffset) + count = mp->m_maxioffset - offset; + end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); + offset_fsb = XFS_B_TO_FSBT(mp, offset); + + error = xfs_bmapi(NULL, ip, offset_fsb, end_fsb - offset_fsb, + bmapi_flags, NULL, 0, imap, &nimaps, NULL); + if (error) + goto out; + + switch (type) { + case IO_UNWRITTEN: + /* If we found an extent, return it */ + if (nimaps && + (imap->br_startblock != HOLESTARTBLOCK) && + (imap->br_startblock != DELAYSTARTBLOCK)) { + trace_xfs_map_blocks_found(ip, offset, count, type, imap); + break; + } + + error = xfs_iomap_write_delay(ip, offset, count, imap); + if (!error) + trace_xfs_map_blocks_alloc(ip, offset, count, type, imap); + break; + case IO_DELALLOC: + /* If we found an extent, return it */ + xfs_iunlock(ip, lockmode); + lockmode = 0; + + if (nimaps && !isnullstartblock(imap->br_startblock)) { + trace_xfs_map_blocks_found(ip, offset, count, type, imap); + break; + } + + error = xfs_iomap_write_allocate(ip, offset, count, imap); + if (!error) + trace_xfs_map_blocks_alloc(ip, offset, count, type, imap); + break; + default: + if (nimaps) + trace_xfs_map_blocks_found(ip, offset, count, type, imap); + } - return -xfs_iomap(XFS_I(inode), offset, count, flags, imap, &nmaps, &new); +out: + if (lockmode) + xfs_iunlock(ip, lockmode); + return -XFS_ERROR(error); } STATIC int @@ -722,9 +784,9 @@ xfs_is_delayed_page( if (buffer_unwritten(bh)) acceptable = (type == IO_UNWRITTEN); else if (buffer_delay(bh)) - acceptable = (type == IO_DELAY); + acceptable = (type == IO_DELALLOC); else if (buffer_dirty(bh) && buffer_mapped(bh)) - acceptable = (type == IO_NEW); + acceptable = (type == IO_OVERWRITE); else break; } while ((bh = bh->b_this_page) != head); @@ -809,7 +871,7 @@ xfs_convert_page( if (buffer_unwritten(bh)) type = IO_UNWRITTEN; else - type = IO_DELAY; + type = IO_DELALLOC; if (!xfs_imap_valid(inode, imap, offset)) { done = 1; @@ -826,7 +888,7 @@ xfs_convert_page( page_dirty--; count++; } else { - type = IO_NEW; + type = IO_OVERWRITE; if (buffer_mapped(bh) && all_bh) { lock_buffer(bh); xfs_add_to_ioend(inode, bh, offset, @@ -926,7 +988,7 @@ xfs_aops_discard_page( struct buffer_head *bh, *head; loff_t offset = page_offset(page); - if (!xfs_is_delayed_page(page, IO_DELAY)) + if (!xfs_is_delayed_page(page, IO_DELALLOC)) goto out_invalidate; if (XFS_FORCED_SHUTDOWN(ip->i_mount)) @@ -994,9 +1056,10 @@ xfs_vm_writepage( __uint64_t end_offset; pgoff_t end_index, last_index; ssize_t size, len; - int flags, err, imap_valid = 0, uptodate = 1; + int err, imap_valid = 0, uptodate = 1; int count = 0; int all_bh = 0; + int nonblocking = 0; trace_xfs_writepage(inode, page, 0); @@ -1047,8 +1110,10 @@ xfs_vm_writepage( bh = head = page_buffers(page); offset = page_offset(page); - flags = BMAPI_READ; - type = IO_NEW; + type = IO_OVERWRITE; + + if (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking) + nonblocking = 1; do { int new_ioend = 0; @@ -1078,16 +1143,11 @@ xfs_vm_writepage( type = IO_UNWRITTEN; imap_valid = 0; } - flags = BMAPI_WRITE | BMAPI_IGNSTATE; } else if (buffer_delay(bh)) { - if (type != IO_DELAY) { - type = IO_DELAY; + if (type != IO_DELALLOC) { + type = IO_DELALLOC; imap_valid = 0; } - flags = BMAPI_ALLOCATE; - - if (wbc->sync_mode == WB_SYNC_NONE) - flags |= BMAPI_TRYLOCK; } if (!imap_valid) { @@ -1100,8 +1160,8 @@ xfs_vm_writepage( * for unwritten extent conversion. */ new_ioend = 1; - err = xfs_map_blocks(inode, offset, len, - &imap, flags); + err = xfs_map_blocks(inode, offset, len, &imap, + type, nonblocking); if (err) goto error; imap_valid = xfs_imap_valid(inode, &imap, @@ -1119,30 +1179,21 @@ xfs_vm_writepage( * That means it must already have extents allocated * underneath it. Map the extent by reading it. */ - if (flags != BMAPI_READ) { - flags = BMAPI_READ; + if (type != IO_OVERWRITE) { + type = IO_OVERWRITE; imap_valid = 0; } if (!imap_valid) { new_ioend = 1; size = xfs_probe_cluster(inode, page, bh, head); err = xfs_map_blocks(inode, offset, size, - &imap, flags); + &imap, type, nonblocking); if (err) goto error; imap_valid = xfs_imap_valid(inode, &imap, offset); } - /* - * We set the type to IO_NEW in case we are doing a - * small write at EOF that is extending the file but - * without needing an allocation. We need to update the - * file size on I/O completion in this case so it is - * the same case as having just allocated a new extent - * that we are writing into for the first time. - */ - type = IO_NEW; if (imap_valid) { all_bh = 1; lock_buffer(bh); @@ -1250,13 +1301,19 @@ __xfs_get_blocks( int create, int direct) { - int flags = create ? BMAPI_WRITE : BMAPI_READ; + struct xfs_inode *ip = XFS_I(inode); + struct xfs_mount *mp = ip->i_mount; + xfs_fileoff_t offset_fsb, end_fsb; + int error = 0; + int lockmode = 0; struct xfs_bmbt_irec imap; + int nimaps = 1; xfs_off_t offset; ssize_t size; - int nimap = 1; int new = 0; - int error; + + if (XFS_FORCED_SHUTDOWN(mp)) + return -XFS_ERROR(EIO); offset = (xfs_off_t)iblock << inode->i_blkbits; ASSERT(bh_result->b_size >= (1 << inode->i_blkbits)); @@ -1265,15 +1322,45 @@ __xfs_get_blocks( if (!create && direct && offset >= i_size_read(inode)) return 0; - if (direct && create) - flags |= BMAPI_DIRECT; + if (create) { + lockmode = XFS_ILOCK_EXCL; + xfs_ilock(ip, lockmode); + } else { + lockmode = xfs_ilock_map_shared(ip); + } + + ASSERT(offset <= mp->m_maxioffset); + if (offset + size > mp->m_maxioffset) + size = mp->m_maxioffset - offset; + end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size); + offset_fsb = XFS_B_TO_FSBT(mp, offset); - error = xfs_iomap(XFS_I(inode), offset, size, flags, &imap, &nimap, - &new); + error = xfs_bmapi(NULL, ip, offset_fsb, end_fsb - offset_fsb, + XFS_BMAPI_ENTIRE, NULL, 0, &imap, &nimaps, NULL); if (error) - return -error; - if (nimap == 0) - return 0; + goto out_unlock; + + if (create && + (!nimaps || + (imap.br_startblock == HOLESTARTBLOCK || + imap.br_startblock == DELAYSTARTBLOCK))) { + if (direct) { + error = xfs_iomap_write_direct(ip, offset, size, + &imap, nimaps); + } else { + error = xfs_iomap_write_delay(ip, offset, size, &imap); + } + if (error) + goto out_unlock; + + trace_xfs_get_blocks_alloc(ip, offset, size, 0, &imap); + } else if (nimaps) { + trace_xfs_get_blocks_found(ip, offset, size, 0, &imap); + } else { + trace_xfs_get_blocks_notfound(ip, offset, size); + goto out_unlock; + } + xfs_iunlock(ip, lockmode); if (imap.br_startblock != HOLESTARTBLOCK && imap.br_startblock != DELAYSTARTBLOCK) { @@ -1340,6 +1427,10 @@ __xfs_get_blocks( } return 0; + +out_unlock: + xfs_iunlock(ip, lockmode); + return -error; } int @@ -1427,7 +1518,7 @@ xfs_vm_direct_IO( ssize_t ret; if (rw & WRITE) { - iocb->private = xfs_alloc_ioend(inode, IO_NEW); + iocb->private = xfs_alloc_ioend(inode, IO_DIRECT); ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, nr_segs, diff --git a/fs/xfs/linux-2.6/xfs_aops.h b/fs/xfs/linux-2.6/xfs_aops.h index c5057fb6237a..71f721e1a71f 100644 --- a/fs/xfs/linux-2.6/xfs_aops.h +++ b/fs/xfs/linux-2.6/xfs_aops.h @@ -22,6 +22,22 @@ extern struct workqueue_struct *xfsdatad_workqueue; extern struct workqueue_struct *xfsconvertd_workqueue; extern mempool_t *xfs_ioend_pool; +/* + * Types of I/O for bmap clustering and I/O completion tracking. + */ +enum { + IO_DIRECT = 0, /* special case for direct I/O ioends */ + IO_DELALLOC, /* mapping covers delalloc region */ + IO_UNWRITTEN, /* mapping covers allocated but uninitialized data */ + IO_OVERWRITE, /* mapping covers already allocated extent */ +}; + +#define XFS_IO_TYPES \ + { 0, "" }, \ + { IO_DELALLOC, "delalloc" }, \ + { IO_UNWRITTEN, "unwritten" }, \ + { IO_OVERWRITE, "overwrite" } + /* * xfs_ioend struct manages large extent writes for XFS. * It can manage several multi-page bio's at once. diff --git a/fs/xfs/linux-2.6/xfs_trace.h b/fs/xfs/linux-2.6/xfs_trace.h index acef2e98c594..f56431c916a0 100644 --- a/fs/xfs/linux-2.6/xfs_trace.h +++ b/fs/xfs/linux-2.6/xfs_trace.h @@ -935,10 +935,10 @@ DEFINE_PAGE_EVENT(xfs_writepage); DEFINE_PAGE_EVENT(xfs_releasepage); DEFINE_PAGE_EVENT(xfs_invalidatepage); -DECLARE_EVENT_CLASS(xfs_iomap_class, +DECLARE_EVENT_CLASS(xfs_imap_class, TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count, - int flags, struct xfs_bmbt_irec *irec), - TP_ARGS(ip, offset, count, flags, irec), + int type, struct xfs_bmbt_irec *irec), + TP_ARGS(ip, offset, count, type, irec), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_ino_t, ino) @@ -946,7 +946,7 @@ DECLARE_EVENT_CLASS(xfs_iomap_class, __field(loff_t, new_size) __field(loff_t, offset) __field(size_t, count) - __field(int, flags) + __field(int, type) __field(xfs_fileoff_t, startoff) __field(xfs_fsblock_t, startblock) __field(xfs_filblks_t, blockcount) @@ -958,13 +958,13 @@ DECLARE_EVENT_CLASS(xfs_iomap_class, __entry->new_size = ip->i_new_size; __entry->offset = offset; __entry->count = count; - __entry->flags = flags; + __entry->type = type; __entry->startoff = irec ? irec->br_startoff : 0; __entry->startblock = irec ? irec->br_startblock : 0; __entry->blockcount = irec ? irec->br_blockcount : 0; ), TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx " - "offset 0x%llx count %zd flags %s " + "offset 0x%llx count %zd type %s " "startoff 0x%llx startblock %lld blockcount 0x%llx", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, @@ -972,20 +972,21 @@ DECLARE_EVENT_CLASS(xfs_iomap_class, __entry->new_size, __entry->offset, __entry->count, - __print_flags(__entry->flags, "|", BMAPI_FLAGS), + __print_symbolic(__entry->type, XFS_IO_TYPES), __entry->startoff, (__int64_t)__entry->startblock, __entry->blockcount) ) #define DEFINE_IOMAP_EVENT(name) \ -DEFINE_EVENT(xfs_iomap_class, name, \ +DEFINE_EVENT(xfs_imap_class, name, \ TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count, \ - int flags, struct xfs_bmbt_irec *irec), \ - TP_ARGS(ip, offset, count, flags, irec)) -DEFINE_IOMAP_EVENT(xfs_iomap_enter); -DEFINE_IOMAP_EVENT(xfs_iomap_found); -DEFINE_IOMAP_EVENT(xfs_iomap_alloc); + int type, struct xfs_bmbt_irec *irec), \ + TP_ARGS(ip, offset, count, type, irec)) +DEFINE_IOMAP_EVENT(xfs_map_blocks_found); +DEFINE_IOMAP_EVENT(xfs_map_blocks_alloc); +DEFINE_IOMAP_EVENT(xfs_get_blocks_found); +DEFINE_IOMAP_EVENT(xfs_get_blocks_alloc); DECLARE_EVENT_CLASS(xfs_simple_io_class, TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count), @@ -1022,6 +1023,7 @@ DEFINE_EVENT(xfs_simple_io_class, name, \ TP_ARGS(ip, offset, count)) DEFINE_SIMPLE_IO_EVENT(xfs_delalloc_enospc); DEFINE_SIMPLE_IO_EVENT(xfs_unwritten_convert); +DEFINE_SIMPLE_IO_EVENT(xfs_get_blocks_notfound); TRACE_EVENT(xfs_itruncate_start, diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index 991291068378..22b62a179e89 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c @@ -47,124 +47,8 @@ #define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \ << mp->m_writeio_log) -#define XFS_STRAT_WRITE_IMAPS 2 #define XFS_WRITE_IMAPS XFS_BMAP_MAX_NMAP -STATIC int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t, - struct xfs_bmbt_irec *, int); -STATIC int xfs_iomap_write_delay(struct xfs_inode *, xfs_off_t, size_t, - struct xfs_bmbt_irec *); -STATIC int xfs_iomap_write_allocate(struct xfs_inode *, xfs_off_t, size_t, - struct xfs_bmbt_irec *); - -int -xfs_iomap( - struct xfs_inode *ip, - xfs_off_t offset, - ssize_t count, - int flags, - struct xfs_bmbt_irec *imap, - int *nimaps, - int *new) -{ - struct xfs_mount *mp = ip->i_mount; - xfs_fileoff_t offset_fsb, end_fsb; - int error = 0; - int lockmode = 0; - int bmapi_flags = 0; - - ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG); - - *new = 0; - - if (XFS_FORCED_SHUTDOWN(mp)) - return XFS_ERROR(EIO); - - trace_xfs_iomap_enter(ip, offset, count, flags, NULL); - - switch (flags & (BMAPI_READ | BMAPI_WRITE | BMAPI_ALLOCATE)) { - case BMAPI_READ: - lockmode = xfs_ilock_map_shared(ip); - bmapi_flags = XFS_BMAPI_ENTIRE; - break; - case BMAPI_WRITE: - lockmode = XFS_ILOCK_EXCL; - if (flags & BMAPI_IGNSTATE) - bmapi_flags |= XFS_BMAPI_IGSTATE|XFS_BMAPI_ENTIRE; - xfs_ilock(ip, lockmode); - break; - case BMAPI_ALLOCATE: - lockmode = XFS_ILOCK_SHARED; - bmapi_flags = XFS_BMAPI_ENTIRE; - - /* Attempt non-blocking lock */ - if (flags & BMAPI_TRYLOCK) { - if (!xfs_ilock_nowait(ip, lockmode)) - return XFS_ERROR(EAGAIN); - } else { - xfs_ilock(ip, lockmode); - } - break; - default: - BUG(); - } - - ASSERT(offset <= mp->m_maxioffset); - if ((xfs_fsize_t)offset + count > mp->m_maxioffset) - count = mp->m_maxioffset - offset; - end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); - offset_fsb = XFS_B_TO_FSBT(mp, offset); - - error = xfs_bmapi(NULL, ip, offset_fsb, - (xfs_filblks_t)(end_fsb - offset_fsb), - bmapi_flags, NULL, 0, imap, - nimaps, NULL); - - if (error) - goto out; - - switch (flags & (BMAPI_WRITE|BMAPI_ALLOCATE)) { - case BMAPI_WRITE: - /* If we found an extent, return it */ - if (*nimaps && - (imap->br_startblock != HOLESTARTBLOCK) && - (imap->br_startblock != DELAYSTARTBLOCK)) { - trace_xfs_iomap_found(ip, offset, count, flags, imap); - break; - } - - if (flags & BMAPI_DIRECT) { - error = xfs_iomap_write_direct(ip, offset, count, imap, - *nimaps); - } else { - error = xfs_iomap_write_delay(ip, offset, count, imap); - } - - if (!error) { - trace_xfs_iomap_alloc(ip, offset, count, flags, imap); - } - *new = 1; - break; - case BMAPI_ALLOCATE: - /* If we found an extent, return it */ - xfs_iunlock(ip, lockmode); - lockmode = 0; - - if (*nimaps && !isnullstartblock(imap->br_startblock)) { - trace_xfs_iomap_found(ip, offset, count, flags, imap); - break; - } - - error = xfs_iomap_write_allocate(ip, offset, count, imap); - break; - } - -out: - if (lockmode) - xfs_iunlock(ip, lockmode); - return XFS_ERROR(error); -} - STATIC int xfs_iomap_eof_align_last_fsb( xfs_mount_t *mp, @@ -233,7 +117,7 @@ xfs_cmn_err_fsblock_zero( return EFSCORRUPTED; } -STATIC int +int xfs_iomap_write_direct( xfs_inode_t *ip, xfs_off_t offset, @@ -428,7 +312,7 @@ xfs_iomap_eof_want_preallocate( return 0; } -STATIC int +int xfs_iomap_write_delay( xfs_inode_t *ip, xfs_off_t offset, @@ -527,7 +411,7 @@ retry: * We no longer bother to look at the incoming map - all we have to * guarantee is that whatever we allocate fills the required range. */ -STATIC int +int xfs_iomap_write_allocate( xfs_inode_t *ip, xfs_off_t offset, diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h index 7748a430f50d..80615760959a 100644 --- a/fs/xfs/xfs_iomap.h +++ b/fs/xfs/xfs_iomap.h @@ -18,30 +18,15 @@ #ifndef __XFS_IOMAP_H__ #define __XFS_IOMAP_H__ -/* base extent manipulation calls */ -#define BMAPI_READ (1 << 0) /* read extents */ -#define BMAPI_WRITE (1 << 1) /* create extents */ -#define BMAPI_ALLOCATE (1 << 2) /* delayed allocate to real extents */ - -/* modifiers */ -#define BMAPI_IGNSTATE (1 << 4) /* ignore unwritten state on read */ -#define BMAPI_DIRECT (1 << 5) /* direct instead of buffered write */ -#define BMAPI_MMA (1 << 6) /* allocate for mmap write */ -#define BMAPI_TRYLOCK (1 << 7) /* non-blocking request */ - -#define BMAPI_FLAGS \ - { BMAPI_READ, "READ" }, \ - { BMAPI_WRITE, "WRITE" }, \ - { BMAPI_ALLOCATE, "ALLOCATE" }, \ - { BMAPI_IGNSTATE, "IGNSTATE" }, \ - { BMAPI_DIRECT, "DIRECT" }, \ - { BMAPI_TRYLOCK, "TRYLOCK" } - struct xfs_inode; struct xfs_bmbt_irec; -extern int xfs_iomap(struct xfs_inode *, xfs_off_t, ssize_t, int, - struct xfs_bmbt_irec *, int *, int *); +extern int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t, + struct xfs_bmbt_irec *, int); +extern int xfs_iomap_write_delay(struct xfs_inode *, xfs_off_t, size_t, + struct xfs_bmbt_irec *); +extern int xfs_iomap_write_allocate(struct xfs_inode *, xfs_off_t, size_t, + struct xfs_bmbt_irec *); extern int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, size_t); #endif /* __XFS_IOMAP_H__*/ -- cgit v1.2.2 From 8ff2957d581582890693affc09920108a67cb05d Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 10 Dec 2010 08:42:21 +0000 Subject: xfs: simplify xfs_map_blocks No need to lock the extent map exclusive when performing an overwrite, we know the extent map must already have been loaded by get_blocks. Apply the non-blocking inode semantics to all mapping types instead of just delayed allocations. Remove the handling of not yet allocated blocks for the IO_UNWRITTEN case - if an extent is marked as unwritten allocated in the buffer it must already have an extent on disk. Add asserts to verify all the assumptions above in debug builds. Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Alex Elder --- fs/xfs/linux-2.6/xfs_aops.c | 77 +++++++++++++++------------------------------ 1 file changed, 25 insertions(+), 52 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index 365040f61d89..1252a8443429 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c @@ -313,81 +313,54 @@ xfs_map_blocks( struct xfs_mount *mp = ip->i_mount; xfs_fileoff_t offset_fsb, end_fsb; int error = 0; - int lockmode = 0; int bmapi_flags = XFS_BMAPI_ENTIRE; int nimaps = 1; if (XFS_FORCED_SHUTDOWN(mp)) return -XFS_ERROR(EIO); - switch (type) { - case IO_OVERWRITE: - lockmode = xfs_ilock_map_shared(ip); - break; - case IO_UNWRITTEN: - lockmode = XFS_ILOCK_EXCL; + if (type == IO_UNWRITTEN) bmapi_flags |= XFS_BMAPI_IGSTATE; - xfs_ilock(ip, lockmode); - break; - case IO_DELALLOC: - lockmode = XFS_ILOCK_SHARED; - - if (!xfs_ilock_nowait(ip, lockmode)) { - if (nonblocking) - return -XFS_ERROR(EAGAIN); - xfs_ilock(ip, lockmode); - } - break; + + if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) { + if (nonblocking) + return -XFS_ERROR(EAGAIN); + xfs_ilock(ip, XFS_ILOCK_SHARED); } + ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || + (ip->i_df.if_flags & XFS_IFEXTENTS)); ASSERT(offset <= mp->m_maxioffset); + if (offset + count > mp->m_maxioffset) count = mp->m_maxioffset - offset; end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); offset_fsb = XFS_B_TO_FSBT(mp, offset); - error = xfs_bmapi(NULL, ip, offset_fsb, end_fsb - offset_fsb, bmapi_flags, NULL, 0, imap, &nimaps, NULL); - if (error) - goto out; - - switch (type) { - case IO_UNWRITTEN: - /* If we found an extent, return it */ - if (nimaps && - (imap->br_startblock != HOLESTARTBLOCK) && - (imap->br_startblock != DELAYSTARTBLOCK)) { - trace_xfs_map_blocks_found(ip, offset, count, type, imap); - break; - } + xfs_iunlock(ip, XFS_ILOCK_SHARED); - error = xfs_iomap_write_delay(ip, offset, count, imap); - if (!error) - trace_xfs_map_blocks_alloc(ip, offset, count, type, imap); - break; - case IO_DELALLOC: - /* If we found an extent, return it */ - xfs_iunlock(ip, lockmode); - lockmode = 0; - - if (nimaps && !isnullstartblock(imap->br_startblock)) { - trace_xfs_map_blocks_found(ip, offset, count, type, imap); - break; - } + if (error) + return -XFS_ERROR(error); + if (type == IO_DELALLOC && + (!nimaps || isnullstartblock(imap->br_startblock))) { error = xfs_iomap_write_allocate(ip, offset, count, imap); if (!error) trace_xfs_map_blocks_alloc(ip, offset, count, type, imap); - break; - default: - if (nimaps) - trace_xfs_map_blocks_found(ip, offset, count, type, imap); + return -XFS_ERROR(error); } -out: - if (lockmode) - xfs_iunlock(ip, lockmode); - return -XFS_ERROR(error); +#ifdef DEBUG + if (type == IO_UNWRITTEN) { + ASSERT(nimaps); + ASSERT(imap->br_startblock != HOLESTARTBLOCK); + ASSERT(imap->br_startblock != DELAYSTARTBLOCK); + } +#endif + if (nimaps) + trace_xfs_map_blocks_found(ip, offset, count, type, imap); + return 0; } STATIC int -- cgit v1.2.2 From ed1e7b7e484dfb64168755613d499f32a97409bd Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 10 Dec 2010 08:42:22 +0000 Subject: xfs: remove xfs_probe_cluster xfs_map_blocks always calls xfs_bmapi with the XFS_BMAPI_ENTIRE entire flag, which tells it to not cap the extent at the passed in size, but just treat the size as an minimum to map. This means xfs_probe_cluster is entirely useless as we'll always get the whole extent back anyway. Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Alex Elder --- fs/xfs/linux-2.6/xfs_aops.c | 111 ++------------------------------------------ 1 file changed, 4 insertions(+), 107 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index 1252a8443429..c3bc7690f043 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c @@ -304,13 +304,13 @@ STATIC int xfs_map_blocks( struct inode *inode, loff_t offset, - ssize_t count, struct xfs_bmbt_irec *imap, int type, int nonblocking) { struct xfs_inode *ip = XFS_I(inode); struct xfs_mount *mp = ip->i_mount; + ssize_t count = 1 << inode->i_blkbits; xfs_fileoff_t offset_fsb, end_fsb; int error = 0; int bmapi_flags = XFS_BMAPI_ENTIRE; @@ -634,108 +634,6 @@ xfs_map_at_offset( clear_buffer_unwritten(bh); } -/* - * Look for a page at index that is suitable for clustering. - */ -STATIC unsigned int -xfs_probe_page( - struct page *page, - unsigned int pg_offset) -{ - struct buffer_head *bh, *head; - int ret = 0; - - if (PageWriteback(page)) - return 0; - if (!PageDirty(page)) - return 0; - if (!page->mapping) - return 0; - if (!page_has_buffers(page)) - return 0; - - bh = head = page_buffers(page); - do { - if (!buffer_uptodate(bh)) - break; - if (!buffer_mapped(bh)) - break; - ret += bh->b_size; - if (ret >= pg_offset) - break; - } while ((bh = bh->b_this_page) != head); - - return ret; -} - -STATIC size_t -xfs_probe_cluster( - struct inode *inode, - struct page *startpage, - struct buffer_head *bh, - struct buffer_head *head) -{ - struct pagevec pvec; - pgoff_t tindex, tlast, tloff; - size_t total = 0; - int done = 0, i; - - /* First sum forwards in this page */ - do { - if (!buffer_uptodate(bh) || !buffer_mapped(bh)) - return total; - total += bh->b_size; - } while ((bh = bh->b_this_page) != head); - - /* if we reached the end of the page, sum forwards in following pages */ - tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT; - tindex = startpage->index + 1; - - /* Prune this back to avoid pathological behavior */ - tloff = min(tlast, startpage->index + 64); - - pagevec_init(&pvec, 0); - while (!done && tindex <= tloff) { - unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1); - - if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len)) - break; - - for (i = 0; i < pagevec_count(&pvec); i++) { - struct page *page = pvec.pages[i]; - size_t pg_offset, pg_len = 0; - - if (tindex == tlast) { - pg_offset = - i_size_read(inode) & (PAGE_CACHE_SIZE - 1); - if (!pg_offset) { - done = 1; - break; - } - } else - pg_offset = PAGE_CACHE_SIZE; - - if (page->index == tindex && trylock_page(page)) { - pg_len = xfs_probe_page(page, pg_offset); - unlock_page(page); - } - - if (!pg_len) { - done = 1; - break; - } - - total += pg_len; - tindex++; - } - - pagevec_release(&pvec); - cond_resched(); - } - - return total; -} - /* * Test if a given page is suitable for writing as part of an unwritten * or delayed allocate extent. @@ -1028,7 +926,7 @@ xfs_vm_writepage( unsigned int type; __uint64_t end_offset; pgoff_t end_index, last_index; - ssize_t size, len; + ssize_t len; int err, imap_valid = 0, uptodate = 1; int count = 0; int all_bh = 0; @@ -1133,7 +1031,7 @@ xfs_vm_writepage( * for unwritten extent conversion. */ new_ioend = 1; - err = xfs_map_blocks(inode, offset, len, &imap, + err = xfs_map_blocks(inode, offset, &imap, type, nonblocking); if (err) goto error; @@ -1158,8 +1056,7 @@ xfs_vm_writepage( } if (!imap_valid) { new_ioend = 1; - size = xfs_probe_cluster(inode, page, bh, head); - err = xfs_map_blocks(inode, offset, size, + err = xfs_map_blocks(inode, offset, &imap, type, nonblocking); if (err) goto error; -- cgit v1.2.2 From 2fa24f92530edaf86c3b5f662464e0d2e3b3e517 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 10 Dec 2010 08:42:23 +0000 Subject: xfs: remove the all_bh flag from xfs_convert_page The all_bh flag is always set when entering the page clustering machinery with a regular written extent, which means the check for it is superflous. Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Alex Elder --- fs/xfs/linux-2.6/xfs_aops.c | 33 +++++++++++++-------------------- 1 file changed, 13 insertions(+), 20 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index c3bc7690f043..86f57f61939b 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c @@ -682,8 +682,7 @@ xfs_convert_page( loff_t tindex, struct xfs_bmbt_irec *imap, xfs_ioend_t **ioendp, - struct writeback_control *wbc, - int all_bh) + struct writeback_control *wbc) { struct buffer_head *bh, *head; xfs_off_t end_offset; @@ -738,11 +737,14 @@ xfs_convert_page( continue; } - if (buffer_unwritten(bh) || buffer_delay(bh)) { + if (buffer_unwritten(bh) || buffer_delay(bh) || + buffer_mapped(bh)) { if (buffer_unwritten(bh)) type = IO_UNWRITTEN; - else + else if (buffer_delay(bh)) type = IO_DELALLOC; + else + type = IO_OVERWRITE; if (!xfs_imap_valid(inode, imap, offset)) { done = 1; @@ -752,23 +754,17 @@ xfs_convert_page( ASSERT(imap->br_startblock != HOLESTARTBLOCK); ASSERT(imap->br_startblock != DELAYSTARTBLOCK); - xfs_map_at_offset(inode, bh, imap, offset); + if (type == IO_OVERWRITE) + lock_buffer(bh); + else + xfs_map_at_offset(inode, bh, imap, offset); xfs_add_to_ioend(inode, bh, offset, type, ioendp, done); page_dirty--; count++; } else { - type = IO_OVERWRITE; - if (buffer_mapped(bh) && all_bh) { - lock_buffer(bh); - xfs_add_to_ioend(inode, bh, offset, - type, ioendp, done); - count++; - page_dirty--; - } else { - done = 1; - } + done = 1; } } while (offset += len, (bh = bh->b_this_page) != head); @@ -800,7 +796,6 @@ xfs_cluster_write( struct xfs_bmbt_irec *imap, xfs_ioend_t **ioendp, struct writeback_control *wbc, - int all_bh, pgoff_t tlast) { struct pagevec pvec; @@ -815,7 +810,7 @@ xfs_cluster_write( for (i = 0; i < pagevec_count(&pvec); i++) { done = xfs_convert_page(inode, pvec.pages[i], tindex++, - imap, ioendp, wbc, all_bh); + imap, ioendp, wbc); if (done) break; } @@ -929,7 +924,6 @@ xfs_vm_writepage( ssize_t len; int err, imap_valid = 0, uptodate = 1; int count = 0; - int all_bh = 0; int nonblocking = 0; trace_xfs_writepage(inode, page, 0); @@ -1065,7 +1059,6 @@ xfs_vm_writepage( } if (imap_valid) { - all_bh = 1; lock_buffer(bh); xfs_add_to_ioend(inode, bh, offset, type, &ioend, new_ioend); @@ -1102,7 +1095,7 @@ xfs_vm_writepage( end_index = last_index; xfs_cluster_write(inode, page->index + 1, &imap, &ioend, - wbc, all_bh, end_index); + wbc, end_index); } if (iohead) -- cgit v1.2.2 From aeea1b1f81800e362a3aca86d769d02e137a8fa7 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 10 Dec 2010 08:42:24 +0000 Subject: xfs: refactor xfs_vm_writepage After the last patches the code for overwrites is the same as for delayed and unwritten extents except that it doesn't need to call xfs_map_at_offset. Take care of that fact to simplify xfs_vm_writepage. The buffer loop now first checks the type of buffer and checks/sets the ioend type, or continues to the next buffer if it's not interesting to us. Only after that we validate the iomap and perform the block mapping if needed, all in common code for the cases where we have to do work. Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Alex Elder --- fs/xfs/linux-2.6/xfs_aops.c | 97 ++++++++++++++++++--------------------------- 1 file changed, 39 insertions(+), 58 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index 86f57f61939b..4d982dc8b862 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c @@ -999,74 +999,55 @@ xfs_vm_writepage( continue; } - if (imap_valid) - imap_valid = xfs_imap_valid(inode, &imap, offset); - - if (buffer_unwritten(bh) || buffer_delay(bh)) { - if (buffer_unwritten(bh)) { - if (type != IO_UNWRITTEN) { - type = IO_UNWRITTEN; - imap_valid = 0; - } - } else if (buffer_delay(bh)) { - if (type != IO_DELALLOC) { - type = IO_DELALLOC; - imap_valid = 0; - } - } - - if (!imap_valid) { - /* - * If we didn't have a valid mapping then we - * need to ensure that we put the new mapping - * in a new ioend structure. This needs to be - * done to ensure that the ioends correctly - * reflect the block mappings at io completion - * for unwritten extent conversion. - */ - new_ioend = 1; - err = xfs_map_blocks(inode, offset, &imap, - type, nonblocking); - if (err) - goto error; - imap_valid = xfs_imap_valid(inode, &imap, - offset); + if (buffer_unwritten(bh)) { + if (type != IO_UNWRITTEN) { + type = IO_UNWRITTEN; + imap_valid = 0; } - if (imap_valid) { - xfs_map_at_offset(inode, bh, &imap, offset); - xfs_add_to_ioend(inode, bh, offset, type, - &ioend, new_ioend); - count++; + } else if (buffer_delay(bh)) { + if (type != IO_DELALLOC) { + type = IO_DELALLOC; + imap_valid = 0; } } else if (buffer_uptodate(bh)) { - /* - * we got here because the buffer is already mapped. - * That means it must already have extents allocated - * underneath it. Map the extent by reading it. - */ if (type != IO_OVERWRITE) { type = IO_OVERWRITE; imap_valid = 0; } - if (!imap_valid) { - new_ioend = 1; - err = xfs_map_blocks(inode, offset, - &imap, type, nonblocking); - if (err) - goto error; - imap_valid = xfs_imap_valid(inode, &imap, - offset); + } else { + if (PageUptodate(page)) { + ASSERT(buffer_mapped(bh)); + imap_valid = 0; } + continue; + } - if (imap_valid) { + if (imap_valid) + imap_valid = xfs_imap_valid(inode, &imap, offset); + if (!imap_valid) { + /* + * If we didn't have a valid mapping then we need to + * put the new mapping into a separate ioend structure. + * This ensures non-contiguous extents always have + * separate ioends, which is particularly important + * for unwritten extent conversion at I/O completion + * time. + */ + new_ioend = 1; + err = xfs_map_blocks(inode, offset, &imap, type, + nonblocking); + if (err) + goto error; + imap_valid = xfs_imap_valid(inode, &imap, offset); + } + if (imap_valid) { + if (type == IO_OVERWRITE) lock_buffer(bh); - xfs_add_to_ioend(inode, bh, offset, type, - &ioend, new_ioend); - count++; - } - } else if (PageUptodate(page)) { - ASSERT(buffer_mapped(bh)); - imap_valid = 0; + else + xfs_map_at_offset(inode, bh, &imap, offset); + xfs_add_to_ioend(inode, bh, offset, type, &ioend, + new_ioend); + count++; } if (!iohead) -- cgit v1.2.2 From ecff71e677c6d469f525dcf31ada709d5858307c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 10 Dec 2010 08:42:25 +0000 Subject: xfs: simplify xfs_map_at_offset Move the buffer locking into the callers as they need to do it wether they call xfs_map_at_offset or not. Remove the b_bdev assignment, which is already done by get_blocks. Remove the duplicate extent type asserts in xfs_convert_page just before calling xfs_map_at_offset. Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Alex Elder --- fs/xfs/linux-2.6/xfs_aops.c | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index 4d982dc8b862..ec7bbb5645b6 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c @@ -626,9 +626,7 @@ xfs_map_at_offset( ASSERT(imap->br_startblock != HOLESTARTBLOCK); ASSERT(imap->br_startblock != DELAYSTARTBLOCK); - lock_buffer(bh); xfs_map_buffer(inode, bh, imap, offset); - bh->b_bdev = xfs_find_bdev_for_inode(inode); set_buffer_mapped(bh); clear_buffer_delay(bh); clear_buffer_unwritten(bh); @@ -751,12 +749,8 @@ xfs_convert_page( continue; } - ASSERT(imap->br_startblock != HOLESTARTBLOCK); - ASSERT(imap->br_startblock != DELAYSTARTBLOCK); - - if (type == IO_OVERWRITE) - lock_buffer(bh); - else + lock_buffer(bh); + if (type != IO_OVERWRITE) xfs_map_at_offset(inode, bh, imap, offset); xfs_add_to_ioend(inode, bh, offset, type, ioendp, done); @@ -1041,9 +1035,8 @@ xfs_vm_writepage( imap_valid = xfs_imap_valid(inode, &imap, offset); } if (imap_valid) { - if (type == IO_OVERWRITE) - lock_buffer(bh); - else + lock_buffer(bh); + if (type != IO_OVERWRITE) xfs_map_at_offset(inode, bh, &imap, offset); xfs_add_to_ioend(inode, bh, offset, type, &ioend, new_ioend); -- cgit v1.2.2 From 9f9baab38dacd11fe6095a1e59f3783a305f7020 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 10 Dec 2010 15:03:57 +0000 Subject: xfs: clean up xfs_alloc_ag_vextent_exact Use a goto label to consolidate all block not found cases, and add a tracepoint for them. Also clean up a few whitespace issues. Based on an earlier patch from Dave Chinner. Signed-off-by: Christoph Hellwig Signed-off-by: Alex Elder --- fs/xfs/linux-2.6/xfs_trace.h | 1 + fs/xfs/xfs_alloc.c | 58 ++++++++++++++++++++++++-------------------- 2 files changed, 33 insertions(+), 26 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_trace.h b/fs/xfs/linux-2.6/xfs_trace.h index f56431c916a0..83e8760159ef 100644 --- a/fs/xfs/linux-2.6/xfs_trace.h +++ b/fs/xfs/linux-2.6/xfs_trace.h @@ -1422,6 +1422,7 @@ DEFINE_EVENT(xfs_alloc_class, name, \ TP_PROTO(struct xfs_alloc_arg *args), \ TP_ARGS(args)) DEFINE_ALLOC_EVENT(xfs_alloc_exact_done); +DEFINE_ALLOC_EVENT(xfs_alloc_exact_notfound); DEFINE_ALLOC_EVENT(xfs_alloc_exact_error); DEFINE_ALLOC_EVENT(xfs_alloc_near_nominleft); DEFINE_ALLOC_EVENT(xfs_alloc_near_first); diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c index 112abc439ca5..d9133f10d2b1 100644 --- a/fs/xfs/xfs_alloc.c +++ b/fs/xfs/xfs_alloc.c @@ -577,61 +577,58 @@ xfs_alloc_ag_vextent_exact( xfs_extlen_t rlen; /* length of returned extent */ ASSERT(args->alignment == 1); + /* * Allocate/initialize a cursor for the by-number freespace btree. */ bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp, - args->agno, XFS_BTNUM_BNO); + args->agno, XFS_BTNUM_BNO); + /* * Lookup bno and minlen in the btree (minlen is irrelevant, really). * Look for the closest free block <= bno, it must contain bno * if any free block does. */ - if ((error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i))) + error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i); + if (error) goto error0; - if (!i) { - /* - * Didn't find it, return null. - */ - xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR); - args->agbno = NULLAGBLOCK; - return 0; - } + if (!i) + goto not_found; + /* * Grab the freespace record. */ - if ((error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i))) + error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i); + if (error) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); ASSERT(fbno <= args->agbno); minend = args->agbno + args->minlen; maxend = args->agbno + args->maxlen; fend = fbno + flen; + /* * Give up if the freespace isn't long enough for the minimum request. */ - if (fend < minend) { - xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR); - args->agbno = NULLAGBLOCK; - return 0; - } + if (fend < minend) + goto not_found; + /* * End of extent will be smaller of the freespace end and the * maximal requested end. - */ - end = XFS_AGBLOCK_MIN(fend, maxend); - /* + * * Fix the length according to mod and prod if given. */ + end = XFS_AGBLOCK_MIN(fend, maxend); args->len = end - args->agbno; xfs_alloc_fix_len(args); - if (!xfs_alloc_fix_minleft(args)) { - xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR); - return 0; - } + if (!xfs_alloc_fix_minleft(args)) + goto not_found; + rlen = args->len; ASSERT(args->agbno + rlen <= fend); end = args->agbno + rlen; + /* * We are allocating agbno for rlen [agbno .. end] * Allocate/initialize a cursor for the by-size btree. @@ -640,16 +637,25 @@ xfs_alloc_ag_vextent_exact( args->agno, XFS_BTNUM_CNT); ASSERT(args->agbno + args->len <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length)); - if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, - args->agbno, args->len, XFSA_FIXUP_BNO_OK))) { + error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, args->agbno, + args->len, XFSA_FIXUP_BNO_OK); + if (error) { xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR); goto error0; } + xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR); xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); - trace_xfs_alloc_exact_done(args); args->wasfromfl = 0; + trace_xfs_alloc_exact_done(args); + return 0; + +not_found: + /* Didn't find it, return null. */ + xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR); + args->agbno = NULLAGBLOCK; + trace_xfs_alloc_exact_notfound(args); return 0; error0: -- cgit v1.2.2 From 489a150f6454e2cd93d9e0ee6d7c5a361844f62a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 10 Dec 2010 15:04:11 +0000 Subject: xfs: factor duplicate code in xfs_alloc_ag_vextent_near into a helper Add a new xfs_alloc_find_best_extent that does a forward/backward search in the allocation btree. That code previously was existed two times in xfs_alloc_ag_vextent_near, once for each search direction. Based on an earlier patch from Dave Chinner. Signed-off-by: Christoph Hellwig Signed-off-by: Alex Elder --- fs/xfs/xfs_alloc.c | 293 +++++++++++++++++++++-------------------------------- 1 file changed, 113 insertions(+), 180 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c index d9133f10d2b1..fa8723f5870a 100644 --- a/fs/xfs/xfs_alloc.c +++ b/fs/xfs/xfs_alloc.c @@ -664,6 +664,95 @@ error0: return error; } +/* + * Search the btree in a given direction via the search cursor and compare + * the records found against the good extent we've already found. + */ +STATIC int +xfs_alloc_find_best_extent( + struct xfs_alloc_arg *args, /* allocation argument structure */ + struct xfs_btree_cur **gcur, /* good cursor */ + struct xfs_btree_cur **scur, /* searching cursor */ + xfs_agblock_t gdiff, /* difference for search comparison */ + xfs_agblock_t *sbno, /* extent found by search */ + xfs_extlen_t *slen, + xfs_extlen_t *slena, /* aligned length */ + int dir) /* 0 = search right, 1 = search left */ +{ + xfs_agblock_t bno; + xfs_agblock_t new; + xfs_agblock_t sdiff; + int error; + int i; + + /* The good extent is perfect, no need to search. */ + if (!gdiff) + goto out_use_good; + + /* + * Look until we find a better one, run out of space or run off the end. + */ + do { + error = xfs_alloc_get_rec(*scur, sbno, slen, &i); + if (error) + goto error0; + XFS_WANT_CORRUPTED_GOTO(i == 1, error0); + xfs_alloc_compute_aligned(*sbno, *slen, args->alignment, + args->minlen, &bno, slena); + + /* + * The good extent is closer than this one. + */ + if (!dir) { + if (bno >= args->agbno + gdiff) + goto out_use_good; + } else { + if (bno <= args->agbno - gdiff) + goto out_use_good; + } + + /* + * Same distance, compare length and pick the best. + */ + if (*slena >= args->minlen) { + args->len = XFS_EXTLEN_MIN(*slena, args->maxlen); + xfs_alloc_fix_len(args); + + sdiff = xfs_alloc_compute_diff(args->agbno, args->len, + args->alignment, *sbno, + *slen, &new); + + /* + * Choose closer size and invalidate other cursor. + */ + if (sdiff < gdiff) + goto out_use_search; + goto out_use_good; + } + + if (!dir) + error = xfs_btree_increment(*scur, 0, &i); + else + error = xfs_btree_decrement(*scur, 0, &i); + if (error) + goto error0; + } while (i); + +out_use_good: + xfs_btree_del_cursor(*scur, XFS_BTREE_NOERROR); + *scur = NULL; + return 0; + +out_use_search: + xfs_btree_del_cursor(*gcur, XFS_BTREE_NOERROR); + *gcur = NULL; + return 0; + +error0: + /* caller invalidates cursors */ + return error; +} + /* * Allocate a variable extent near bno in the allocation group agno. * Extent's length (returned in len) will be between minlen and maxlen, @@ -931,203 +1020,45 @@ xfs_alloc_ag_vextent_near( } } } while (bno_cur_lt || bno_cur_gt); + /* * Got both cursors still active, need to find better entry. */ if (bno_cur_lt && bno_cur_gt) { - /* - * Left side is long enough, look for a right side entry. - */ if (ltlena >= args->minlen) { /* - * Fix up the length. + * Left side is good, look for a right side entry. */ args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen); xfs_alloc_fix_len(args); - rlen = args->len; - ltdiff = xfs_alloc_compute_diff(args->agbno, rlen, + ltdiff = xfs_alloc_compute_diff(args->agbno, args->len, args->alignment, ltbno, ltlen, <new); + + error = xfs_alloc_find_best_extent(args, + &bno_cur_lt, &bno_cur_gt, + ltdiff, >bno, >len, >lena, + 0 /* search right */); + } else { + ASSERT(gtlena >= args->minlen); + /* - * Not perfect. - */ - if (ltdiff) { - /* - * Look until we find a better one, run out of - * space, or run off the end. - */ - while (bno_cur_lt && bno_cur_gt) { - if ((error = xfs_alloc_get_rec( - bno_cur_gt, >bno, - >len, &i))) - goto error0; - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); - xfs_alloc_compute_aligned(gtbno, gtlen, - args->alignment, args->minlen, - >bnoa, >lena); - /* - * The left one is clearly better. - */ - if (gtbnoa >= args->agbno + ltdiff) { - xfs_btree_del_cursor( - bno_cur_gt, - XFS_BTREE_NOERROR); - bno_cur_gt = NULL; - break; - } - /* - * If we reach a big enough entry, - * compare the two and pick the best. - */ - if (gtlena >= args->minlen) { - args->len = - XFS_EXTLEN_MIN(gtlena, - args->maxlen); - xfs_alloc_fix_len(args); - rlen = args->len; - gtdiff = xfs_alloc_compute_diff( - args->agbno, rlen, - args->alignment, - gtbno, gtlen, >new); - /* - * Right side is better. - */ - if (gtdiff < ltdiff) { - xfs_btree_del_cursor( - bno_cur_lt, - XFS_BTREE_NOERROR); - bno_cur_lt = NULL; - } - /* - * Left side is better. - */ - else { - xfs_btree_del_cursor( - bno_cur_gt, - XFS_BTREE_NOERROR); - bno_cur_gt = NULL; - } - break; - } - /* - * Fell off the right end. - */ - if ((error = xfs_btree_increment( - bno_cur_gt, 0, &i))) - goto error0; - if (!i) { - xfs_btree_del_cursor( - bno_cur_gt, - XFS_BTREE_NOERROR); - bno_cur_gt = NULL; - break; - } - } - } - /* - * The left side is perfect, trash the right side. - */ - else { - xfs_btree_del_cursor(bno_cur_gt, - XFS_BTREE_NOERROR); - bno_cur_gt = NULL; - } - } - /* - * It's the right side that was found first, look left. - */ - else { - /* - * Fix up the length. + * Right side is good, look for a left side entry. */ args->len = XFS_EXTLEN_MIN(gtlena, args->maxlen); xfs_alloc_fix_len(args); - rlen = args->len; - gtdiff = xfs_alloc_compute_diff(args->agbno, rlen, + gtdiff = xfs_alloc_compute_diff(args->agbno, args->len, args->alignment, gtbno, gtlen, >new); - /* - * Right side entry isn't perfect. - */ - if (gtdiff) { - /* - * Look until we find a better one, run out of - * space, or run off the end. - */ - while (bno_cur_lt && bno_cur_gt) { - if ((error = xfs_alloc_get_rec( - bno_cur_lt, <bno, - <len, &i))) - goto error0; - XFS_WANT_CORRUPTED_GOTO(i == 1, error0); - xfs_alloc_compute_aligned(ltbno, ltlen, - args->alignment, args->minlen, - <bnoa, <lena); - /* - * The right one is clearly better. - */ - if (ltbnoa <= args->agbno - gtdiff) { - xfs_btree_del_cursor( - bno_cur_lt, - XFS_BTREE_NOERROR); - bno_cur_lt = NULL; - break; - } - /* - * If we reach a big enough entry, - * compare the two and pick the best. - */ - if (ltlena >= args->minlen) { - args->len = XFS_EXTLEN_MIN( - ltlena, args->maxlen); - xfs_alloc_fix_len(args); - rlen = args->len; - ltdiff = xfs_alloc_compute_diff( - args->agbno, rlen, - args->alignment, - ltbno, ltlen, <new); - /* - * Left side is better. - */ - if (ltdiff < gtdiff) { - xfs_btree_del_cursor( - bno_cur_gt, - XFS_BTREE_NOERROR); - bno_cur_gt = NULL; - } - /* - * Right side is better. - */ - else { - xfs_btree_del_cursor( - bno_cur_lt, - XFS_BTREE_NOERROR); - bno_cur_lt = NULL; - } - break; - } - /* - * Fell off the left end. - */ - if ((error = xfs_btree_decrement( - bno_cur_lt, 0, &i))) - goto error0; - if (!i) { - xfs_btree_del_cursor(bno_cur_lt, - XFS_BTREE_NOERROR); - bno_cur_lt = NULL; - break; - } - } - } - /* - * The right side is perfect, trash the left side. - */ - else { - xfs_btree_del_cursor(bno_cur_lt, - XFS_BTREE_NOERROR); - bno_cur_lt = NULL; - } + + error = xfs_alloc_find_best_extent(args, + &bno_cur_gt, &bno_cur_lt, + gtdiff, <bno, <len, <lena, + 1 /* search left */); } + + if (error) + goto error0; } + /* * If we couldn't get anything, give up. */ @@ -1136,6 +1067,7 @@ xfs_alloc_ag_vextent_near( args->agbno = NULLAGBLOCK; return 0; } + /* * At this point we have selected a freespace entry, either to the * left or to the right. If it's on the right, copy all the @@ -1152,6 +1084,7 @@ xfs_alloc_ag_vextent_near( j = 1; } else j = 0; + /* * Fix up the length and compute the useful address. */ -- cgit v1.2.2 From dcfcf20512cb517ac18b9433b676183fa1257911 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Thu, 23 Dec 2010 11:57:13 +1100 Subject: xfs: provide a inode iolock lockdep class The XFS iolock needs to be re-initialised to a new lock class before it enters reclaim to prevent lockdep false positives. Unfortunately, this is not sufficient protection as inodes in the XFS_IRECLAIMABLE state can be recycled and not re-initialised before being reused. We need to re-initialise the lock state when transfering out of XFS_IRECLAIMABLE state to XFS_INEW, but we need to keep the same class as if the inode was just allocated. Hence we need a specific lockdep class variable for the iolock so that both initialisations use the same class. While there, add a specific class for inodes in the reclaim state so that it is easy to tell from lockdep reports what state the inode was in that generated the report. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig --- fs/xfs/linux-2.6/xfs_super.c | 2 ++ fs/xfs/xfs_iget.c | 19 +++++++++++++++++++ fs/xfs/xfs_inode.h | 2 ++ 3 files changed, 23 insertions(+) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 064f964d4f3c..c45b3233d486 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -1118,6 +1118,8 @@ xfs_fs_evict_inode( */ ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock)); mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino); + lockdep_set_class_and_name(&ip->i_iolock.mr_lock, + &xfs_iolock_reclaimable, "xfs_iolock_reclaimable"); xfs_inactive(ip); } diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index 0cdd26932d8e..cdb1c2505fc6 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c @@ -42,6 +42,17 @@ #include "xfs_trace.h" +/* + * Define xfs inode iolock lockdep classes. We need to ensure that all active + * inodes are considered the same for lockdep purposes, including inodes that + * are recycled through the XFS_IRECLAIMABLE state. This is the the only way to + * guarantee the locks are considered the same when there are multiple lock + * initialisation siteѕ. Also, define a reclaimable inode class so it is + * obvious in lockdep reports which class the report is against. + */ +static struct lock_class_key xfs_iolock_active; +struct lock_class_key xfs_iolock_reclaimable; + /* * Allocate and initialise an xfs_inode. */ @@ -71,6 +82,8 @@ xfs_inode_alloc( ASSERT(completion_done(&ip->i_flush)); mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino); + lockdep_set_class_and_name(&ip->i_iolock.mr_lock, + &xfs_iolock_active, "xfs_iolock_active"); /* initialise the xfs inode */ ip->i_ino = ino; @@ -218,6 +231,12 @@ xfs_iget_cache_hit( ip->i_flags |= XFS_INEW; __xfs_inode_clear_reclaim_tag(mp, pag, ip); inode->i_state = I_NEW; + + ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock)); + mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino); + lockdep_set_class_and_name(&ip->i_iolock.mr_lock, + &xfs_iolock_active, "xfs_iolock_active"); + spin_unlock(&ip->i_flags_lock); write_unlock(&pag->pag_ici_lock); } else { diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index fb2ca2e4cdc9..1c6514d73dc8 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -438,6 +438,8 @@ static inline void xfs_ifunlock(xfs_inode_t *ip) #define XFS_IOLOCK_DEP(flags) (((flags) & XFS_IOLOCK_DEP_MASK) >> XFS_IOLOCK_SHIFT) #define XFS_ILOCK_DEP(flags) (((flags) & XFS_ILOCK_DEP_MASK) >> XFS_ILOCK_SHIFT) +extern struct lock_class_key xfs_iolock_reclaimable; + /* * Flags for xfs_itruncate_start(). */ -- cgit v1.2.2 From 622d81494fa32343a4b97b607619656c7a4a6d1a Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Thu, 23 Dec 2010 11:57:37 +1100 Subject: xfs: use KM_NOFS for allocations during attribute list operations When listing attributes, we are doiing memory allocations under the inode ilock using only KM_SLEEP. This allows memory allocation to recurse back into the filesystem and do writeback, which may the ilock we already hold on the current inode. THis will deadlock. Hence use KM_NOFS for such allocations outside of transaction context to ensure that reclaim recursion does not occur. Reported-by: Nick Piggin Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig --- fs/xfs/xfs_attr_leaf.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c index a6cff8edcdb6..71e90dc2aeb1 100644 --- a/fs/xfs/xfs_attr_leaf.c +++ b/fs/xfs/xfs_attr_leaf.c @@ -637,7 +637,7 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context) * It didn't all fit, so we have to sort everything on hashval. */ sbsize = sf->hdr.count * sizeof(*sbuf); - sbp = sbuf = kmem_alloc(sbsize, KM_SLEEP); + sbp = sbuf = kmem_alloc(sbsize, KM_SLEEP | KM_NOFS); /* * Scan the attribute list for the rest of the entries, storing @@ -2386,7 +2386,7 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context) args.dp = context->dp; args.whichfork = XFS_ATTR_FORK; args.valuelen = valuelen; - args.value = kmem_alloc(valuelen, KM_SLEEP); + args.value = kmem_alloc(valuelen, KM_SLEEP | KM_NOFS); args.rmtblkno = be32_to_cpu(name_rmt->valueblk); args.rmtblkcnt = XFS_B_TO_FSB(args.dp->i_mount, valuelen); retval = xfs_attr_rmtval_get(&args); -- cgit v1.2.2 From 055388a3188f56676c21e92962fc366ac8b5cb72 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Tue, 4 Jan 2011 11:35:03 +1100 Subject: xfs: dynamic speculative EOF preallocation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently the size of the speculative preallocation during delayed allocation is fixed by either the allocsize mount option of a default size. We are seeing a lot of cases where we need to recommend using the allocsize mount option to prevent fragmentation when buffered writes land in the same AG. Rather than using a fixed preallocation size by default (up to 64k), make it dynamic by basing it on the current inode size. That way the EOF preallocation will increase as the file size increases. Hence for streaming writes we are much more likely to get large preallocations exactly when we need it to reduce fragementation. For default settings, the size of the initial extents is determined by the number of parallel writers and the amount of memory in the machine. For 4GB RAM and 4 concurrent 32GB file writes: EXT: FILE-OFFSET BLOCK-RANGE AG AG-OFFSET TOTAL 0: [0..1048575]: 1048672..2097247 0 (1048672..2097247) 1048576 1: [1048576..2097151]: 5242976..6291551 0 (5242976..6291551) 1048576 2: [2097152..4194303]: 12583008..14680159 0 (12583008..14680159) 2097152 3: [4194304..8388607]: 25165920..29360223 0 (25165920..29360223) 4194304 4: [8388608..16777215]: 58720352..67108959 0 (58720352..67108959) 8388608 5: [16777216..33554423]: 117440584..134217791 0 (117440584..134217791) 16777208 6: [33554424..50331511]: 184549056..201326143 0 (184549056..201326143) 16777088 7: [50331512..67108599]: 251657408..268434495 0 (251657408..268434495) 16777088 and for 16 concurrent 16GB file writes: EXT: FILE-OFFSET BLOCK-RANGE AG AG-OFFSET TOTAL 0: [0..262143]: 2490472..2752615 0 (2490472..2752615) 262144 1: [262144..524287]: 6291560..6553703 0 (6291560..6553703) 262144 2: [524288..1048575]: 13631592..14155879 0 (13631592..14155879) 524288 3: [1048576..2097151]: 30408808..31457383 0 (30408808..31457383) 1048576 4: [2097152..4194303]: 52428904..54526055 0 (52428904..54526055) 2097152 5: [4194304..8388607]: 104857704..109052007 0 (104857704..109052007) 4194304 6: [8388608..16777215]: 209715304..218103911 0 (209715304..218103911) 8388608 7: [16777216..33554423]: 452984848..469762055 0 (452984848..469762055) 16777208 Because it is hard to take back specualtive preallocation, cases where there are large slow growing log files on a nearly full filesystem may cause premature ENOSPC. Hence as the filesystem nears full, the maximum dynamic prealloc size іs reduced according to this table (based on 4k block size): freespace max prealloc size >5% full extent (8GB) 4-5% 2GB (8GB >> 2) 3-4% 1GB (8GB >> 3) 2-3% 512MB (8GB >> 4) 1-2% 256MB (8GB >> 5) <1% 128MB (8GB >> 6) This should reduce the amount of space held in speculative preallocation for such cases. The allocsize mount option turns off the dynamic behaviour and fixes the prealloc size to whatever the mount option specifies. i.e. the behaviour is unchanged. Signed-off-by: Dave Chinner --- fs/xfs/xfs_fsops.c | 1 + fs/xfs/xfs_iomap.c | 84 +++++++++++++++++++++++++++++++++++++++++++++++------- fs/xfs/xfs_mount.c | 21 ++++++++++++++ fs/xfs/xfs_mount.h | 14 +++++++++ 4 files changed, 110 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index a7c116e814af..f56d30e8040c 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c @@ -374,6 +374,7 @@ xfs_growfs_data_private( mp->m_maxicount = icount << mp->m_sb.sb_inopblog; } else mp->m_maxicount = 0; + xfs_set_low_space_thresholds(mp); /* update secondary superblocks. */ for (agno = 1; agno < nagcount; agno++) { diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index 22b62a179e89..55582bd66659 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c @@ -267,6 +267,9 @@ error_out: * If the caller is doing a write at the end of the file, then extend the * allocation out to the file system's write iosize. We clean up any extra * space left over when the file is closed in xfs_inactive(). + * + * If we find we already have delalloc preallocation beyond EOF, don't do more + * preallocation as it it not needed. */ STATIC int xfs_iomap_eof_want_preallocate( @@ -282,6 +285,7 @@ xfs_iomap_eof_want_preallocate( xfs_filblks_t count_fsb; xfs_fsblock_t firstblock; int n, error, imaps; + int found_delalloc = 0; *prealloc = 0; if ((offset + count) <= ip->i_size) @@ -306,12 +310,60 @@ xfs_iomap_eof_want_preallocate( return 0; start_fsb += imap[n].br_blockcount; count_fsb -= imap[n].br_blockcount; + + if (imap[n].br_startblock == DELAYSTARTBLOCK) + found_delalloc = 1; } } - *prealloc = 1; + if (!found_delalloc) + *prealloc = 1; return 0; } +/* + * If we don't have a user specified preallocation size, dynamically increase + * the preallocation size as the size of the file grows. Cap the maximum size + * at a single extent or less if the filesystem is near full. The closer the + * filesystem is to full, the smaller the maximum prealocation. + */ +STATIC xfs_fsblock_t +xfs_iomap_prealloc_size( + struct xfs_mount *mp, + struct xfs_inode *ip) +{ + xfs_fsblock_t alloc_blocks = 0; + + if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)) { + int shift = 0; + int64_t freesp; + + alloc_blocks = XFS_B_TO_FSB(mp, ip->i_size); + alloc_blocks = XFS_FILEOFF_MIN(MAXEXTLEN, + rounddown_pow_of_two(alloc_blocks)); + + xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT); + freesp = mp->m_sb.sb_fdblocks; + if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) { + shift = 2; + if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT]) + shift++; + if (freesp < mp->m_low_space[XFS_LOWSP_3_PCNT]) + shift++; + if (freesp < mp->m_low_space[XFS_LOWSP_2_PCNT]) + shift++; + if (freesp < mp->m_low_space[XFS_LOWSP_1_PCNT]) + shift++; + } + if (shift) + alloc_blocks >>= shift; + } + + if (alloc_blocks < mp->m_writeio_blocks) + alloc_blocks = mp->m_writeio_blocks; + + return alloc_blocks; +} + int xfs_iomap_write_delay( xfs_inode_t *ip, @@ -344,6 +396,7 @@ xfs_iomap_write_delay( extsz = xfs_get_extsz_hint(ip); offset_fsb = XFS_B_TO_FSBT(mp, offset); + error = xfs_iomap_eof_want_preallocate(mp, ip, offset, count, imap, XFS_WRITE_IMAPS, &prealloc); if (error) @@ -351,9 +404,11 @@ xfs_iomap_write_delay( retry: if (prealloc) { + xfs_fsblock_t alloc_blocks = xfs_iomap_prealloc_size(mp, ip); + aligned_offset = XFS_WRITEIO_ALIGN(mp, (offset + count - 1)); ioalign = XFS_B_TO_FSBT(mp, aligned_offset); - last_fsb = ioalign + mp->m_writeio_blocks; + last_fsb = ioalign + alloc_blocks; } else { last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); } @@ -371,22 +426,31 @@ retry: XFS_BMAPI_DELAY | XFS_BMAPI_WRITE | XFS_BMAPI_ENTIRE, &firstblock, 1, imap, &nimaps, NULL); - if (error && (error != ENOSPC)) + switch (error) { + case 0: + case ENOSPC: + case EDQUOT: + break; + default: return XFS_ERROR(error); + } /* - * If bmapi returned us nothing, and if we didn't get back EDQUOT, - * then we must have run out of space - flush all other inodes with - * delalloc blocks and retry without EOF preallocation. + * If bmapi returned us nothing, we got either ENOSPC or EDQUOT. For + * ENOSPC, * flush all other inodes with delalloc blocks to free up + * some of the excess reserved metadata space. For both cases, retry + * without EOF preallocation. */ if (nimaps == 0) { trace_xfs_delalloc_enospc(ip, offset, count); if (flushed) - return XFS_ERROR(ENOSPC); + return XFS_ERROR(error ? error : ENOSPC); - xfs_iunlock(ip, XFS_ILOCK_EXCL); - xfs_flush_inodes(ip); - xfs_ilock(ip, XFS_ILOCK_EXCL); + if (error == ENOSPC) { + xfs_iunlock(ip, XFS_ILOCK_EXCL); + xfs_flush_inodes(ip); + xfs_ilock(ip, XFS_ILOCK_EXCL); + } flushed = 1; error = 0; diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 19e9dfa1c254..40579fdf0d0a 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -974,6 +974,24 @@ xfs_set_rw_sizes(xfs_mount_t *mp) mp->m_writeio_blocks = 1 << (mp->m_writeio_log - sbp->sb_blocklog); } +/* + * precalculate the low space thresholds for dynamic speculative preallocation. + */ +void +xfs_set_low_space_thresholds( + struct xfs_mount *mp) +{ + int i; + + for (i = 0; i < XFS_LOWSP_MAX; i++) { + __uint64_t space = mp->m_sb.sb_dblocks; + + do_div(space, 100); + mp->m_low_space[i] = space * (i + 1); + } +} + + /* * Set whether we're using inode alignment. */ @@ -1196,6 +1214,9 @@ xfs_mountfs( */ xfs_set_rw_sizes(mp); + /* set the low space thresholds for dynamic preallocation */ + xfs_set_low_space_thresholds(mp); + /* * Set the inode cluster size. * This may still be overridden by the file system diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index 5861b4980740..a62e8971539d 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -103,6 +103,16 @@ extern int xfs_icsb_modify_counters(struct xfs_mount *, xfs_sb_field_t, xfs_mod_incore_sb(mp, field, delta, rsvd) #endif +/* dynamic preallocation free space thresholds, 5% down to 1% */ +enum { + XFS_LOWSP_1_PCNT = 0, + XFS_LOWSP_2_PCNT, + XFS_LOWSP_3_PCNT, + XFS_LOWSP_4_PCNT, + XFS_LOWSP_5_PCNT, + XFS_LOWSP_MAX, +}; + typedef struct xfs_mount { struct super_block *m_super; xfs_tid_t m_tid; /* next unused tid for fs */ @@ -202,6 +212,8 @@ typedef struct xfs_mount { __int64_t m_update_flags; /* sb flags we need to update on the next remount,rw */ struct shrinker m_inode_shrink; /* inode reclaim shrinker */ + int64_t m_low_space[XFS_LOWSP_MAX]; + /* low free space thresholds */ } xfs_mount_t; /* @@ -379,6 +391,8 @@ extern int xfs_sb_validate_fsb_count(struct xfs_sb *, __uint64_t); extern int xfs_dev_is_read_only(struct xfs_mount *, char *); +extern void xfs_set_low_space_thresholds(struct xfs_mount *); + #endif /* __KERNEL__ */ extern void xfs_mod_sb(struct xfs_trans *, __int64_t); -- cgit v1.2.2 From 6e857567dbbfe14dd6cc3f7414671b047b1ff5c7 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Thu, 23 Dec 2010 12:02:31 +1100 Subject: xfs: don't truncate prealloc from frequently accessed inodes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A long standing problem for streaming writeѕ through the NFS server has been that the NFS server opens and closes file descriptors on an inode for every write. The result of this behaviour is that the ->release() function is called on every close and that results in XFS truncating speculative preallocation beyond the EOF. This has an adverse effect on file layout when multiple files are being written at the same time - they interleave their extents and can result in severe fragmentation. To avoid this problem, keep track of ->release calls made on a dirty inode. For most cases, an inode is only going to be opened once for writing and then closed again during it's lifetime in cache. Hence if there are multiple ->release calls when the inode is dirty, there is a good chance that the inode is being accessed by the NFS server. Hence set a flag the first time ->release is called while there are delalloc blocks still outstanding on the inode. If this flag is set when ->release is next called, then do no truncate away the speculative preallocation - leave it there so that subsequent writes do not need to reallocate the delalloc space. This will prevent interleaving of extents of different inodes written concurrently to the same AG. If we get this wrong, it is not a big deal as we truncate speculative allocation beyond EOF anyway in xfs_inactive() when the inode is thrown out of the cache. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig --- fs/xfs/xfs_inode.h | 13 ++++++----- fs/xfs/xfs_vnodeops.c | 61 +++++++++++++++++++++++++++++++++------------------ 2 files changed, 47 insertions(+), 27 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 1c6514d73dc8..5c95fa8ec11d 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -376,12 +376,13 @@ static inline void xfs_ifunlock(xfs_inode_t *ip) /* * In-core inode flags. */ -#define XFS_IRECLAIM 0x0001 /* we have started reclaiming this inode */ -#define XFS_ISTALE 0x0002 /* inode has been staled */ -#define XFS_IRECLAIMABLE 0x0004 /* inode can be reclaimed */ -#define XFS_INEW 0x0008 /* inode has just been allocated */ -#define XFS_IFILESTREAM 0x0010 /* inode is in a filestream directory */ -#define XFS_ITRUNCATED 0x0020 /* truncated down so flush-on-close */ +#define XFS_IRECLAIM 0x0001 /* started reclaiming this inode */ +#define XFS_ISTALE 0x0002 /* inode has been staled */ +#define XFS_IRECLAIMABLE 0x0004 /* inode can be reclaimed */ +#define XFS_INEW 0x0008 /* inode has just been allocated */ +#define XFS_IFILESTREAM 0x0010 /* inode is in a filestream directory */ +#define XFS_ITRUNCATED 0x0020 /* truncated down so flush-on-close */ +#define XFS_IDIRTY_RELEASE 0x0040 /* dirty release already seen */ /* * Flags for inode locking. diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index 8e4a63c4151a..d8e6f8cd6f0c 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c @@ -964,29 +964,48 @@ xfs_release( xfs_flush_pages(ip, 0, -1, XBF_ASYNC, FI_NONE); } - if (ip->i_d.di_nlink != 0) { - if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) && - ((ip->i_size > 0) || (VN_CACHED(VFS_I(ip)) > 0 || - ip->i_delayed_blks > 0)) && - (ip->i_df.if_flags & XFS_IFEXTENTS)) && - (!(ip->i_d.di_flags & - (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))) { + if (ip->i_d.di_nlink == 0) + return 0; - /* - * If we can't get the iolock just skip truncating - * the blocks past EOF because we could deadlock - * with the mmap_sem otherwise. We'll get another - * chance to drop them once the last reference to - * the inode is dropped, so we'll never leak blocks - * permanently. - */ - error = xfs_free_eofblocks(mp, ip, - XFS_FREE_EOF_TRYLOCK); - if (error) - return error; - } - } + if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) && + ((ip->i_size > 0) || (VN_CACHED(VFS_I(ip)) > 0 || + ip->i_delayed_blks > 0)) && + (ip->i_df.if_flags & XFS_IFEXTENTS)) && + (!(ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))) { + /* + * If we can't get the iolock just skip truncating the blocks + * past EOF because we could deadlock with the mmap_sem + * otherwise. We'll get another chance to drop them once the + * last reference to the inode is dropped, so we'll never leak + * blocks permanently. + * + * Further, check if the inode is being opened, written and + * closed frequently and we have delayed allocation blocks + * oustanding (e.g. streaming writes from the NFS server), + * truncating the blocks past EOF will cause fragmentation to + * occur. + * + * In this case don't do the truncation, either, but we have to + * be careful how we detect this case. Blocks beyond EOF show + * up as i_delayed_blks even when the inode is clean, so we + * need to truncate them away first before checking for a dirty + * release. Hence on the first dirty close we will still remove + * the speculative allocation, but after that we will leave it + * in place. + */ + if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE)) + return 0; + + error = xfs_free_eofblocks(mp, ip, + XFS_FREE_EOF_TRYLOCK); + if (error) + return error; + + /* delalloc blocks after truncation means it really is dirty */ + if (ip->i_delayed_blks) + xfs_iflags_set(ip, XFS_IDIRTY_RELEASE); + } return 0; } -- cgit v1.2.2 From d95b7aaf9ab6738bef1ebcc52ab66563085e44ac Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Thu, 16 Dec 2010 16:41:39 +1100 Subject: xfs: rcu free inodes Introduce RCU freeing of XFS inodes so that we can convert lookup traversals to use rcu_read_lock() protection. This patch only introduces the RCU freeing to minimise the potential conflicts with mainline if this is merged into mainline via a VFS patchset. It abuses the i_dentry list for the RCU callback structure because the VFS patches make this a union so it is safe to use like this and simplifies and merge issues. This patch uses basic RCU freeing rather than SLAB_DESTROY_BY_RCU. The later lookup patches need the same "found free inode" protection regardless of the RCU freeing method used, so once again the RCU freeing method can be dealt with apprpriately at merge time without affecting any other code. Signed-off-by: Dave Chinner Reviewed-by: Paul E. McKenney --- fs/xfs/xfs_iget.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index cdb1c2505fc6..9fae47556604 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c @@ -104,6 +104,18 @@ xfs_inode_alloc( return ip; } +void +__xfs_inode_free( + struct rcu_head *head) +{ + struct inode *inode = container_of((void *)head, + struct inode, i_dentry); + struct xfs_inode *ip = XFS_I(inode); + + INIT_LIST_HEAD(&inode->i_dentry); + kmem_zone_free(xfs_inode_zone, ip); +} + void xfs_inode_free( struct xfs_inode *ip) @@ -147,7 +159,7 @@ xfs_inode_free( ASSERT(!spin_is_locked(&ip->i_flags_lock)); ASSERT(completion_done(&ip->i_flush)); - kmem_zone_free(xfs_inode_zone, ip); + call_rcu((struct rcu_head *)&VFS_I(ip)->i_dentry, __xfs_inode_free); } /* -- cgit v1.2.2 From 1a3e8f3da09c7082d25b512a0ffe569391e4c09a Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Fri, 17 Dec 2010 17:29:43 +1100 Subject: xfs: convert inode cache lookups to use RCU locking MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With delayed logging greatly increasing the sustained parallelism of inode operations, the inode cache locking is showing significant read vs write contention when inode reclaim runs at the same time as lookups. There is also a lot more write lock acquistions than there are read locks (4:1 ratio) so the read locking is not really buying us much in the way of parallelism. To avoid the read vs write contention, change the cache to use RCU locking on the read side. To avoid needing to RCU free every single inode, use the built in slab RCU freeing mechanism. This requires us to be able to detect lookups of freed inodes, so enѕure that ever freed inode has an inode number of zero and the XFS_IRECLAIM flag set. We already check the XFS_IRECLAIM flag in cache hit lookup path, but also add a check for a zero inode number as well. We canthen convert all the read locking lockups to use RCU read side locking and hence remove all read side locking. Signed-off-by: Dave Chinner Reviewed-by: Alex Elder --- fs/xfs/linux-2.6/xfs_sync.c | 84 +++++++++++++++++++++++++++++++++++---------- fs/xfs/xfs_iget.c | 47 ++++++++++++++++++------- fs/xfs/xfs_inode.c | 52 +++++++++++++++++++++------- 3 files changed, 141 insertions(+), 42 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index afb0d7cfad1c..fd38682da851 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c @@ -53,14 +53,30 @@ xfs_inode_ag_walk_grab( { struct inode *inode = VFS_I(ip); + ASSERT(rcu_read_lock_held()); + + /* + * check for stale RCU freed inode + * + * If the inode has been reallocated, it doesn't matter if it's not in + * the AG we are walking - we are walking for writeback, so if it + * passes all the "valid inode" checks and is dirty, then we'll write + * it back anyway. If it has been reallocated and still being + * initialised, the XFS_INEW check below will catch it. + */ + spin_lock(&ip->i_flags_lock); + if (!ip->i_ino) + goto out_unlock_noent; + + /* avoid new or reclaimable inodes. Leave for reclaim code to flush */ + if (__xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM)) + goto out_unlock_noent; + spin_unlock(&ip->i_flags_lock); + /* nothing to sync during shutdown */ if (XFS_FORCED_SHUTDOWN(ip->i_mount)) return EFSCORRUPTED; - /* avoid new or reclaimable inodes. Leave for reclaim code to flush */ - if (xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM)) - return ENOENT; - /* If we can't grab the inode, it must on it's way to reclaim. */ if (!igrab(inode)) return ENOENT; @@ -72,6 +88,10 @@ xfs_inode_ag_walk_grab( /* inode is valid */ return 0; + +out_unlock_noent: + spin_unlock(&ip->i_flags_lock); + return ENOENT; } STATIC int @@ -98,12 +118,12 @@ restart: int error = 0; int i; - read_lock(&pag->pag_ici_lock); + rcu_read_lock(); nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void **)batch, first_index, XFS_LOOKUP_BATCH); if (!nr_found) { - read_unlock(&pag->pag_ici_lock); + rcu_read_unlock(); break; } @@ -118,18 +138,26 @@ restart: batch[i] = NULL; /* - * Update the index for the next lookup. Catch overflows - * into the next AG range which can occur if we have inodes - * in the last block of the AG and we are currently - * pointing to the last inode. + * Update the index for the next lookup. Catch + * overflows into the next AG range which can occur if + * we have inodes in the last block of the AG and we + * are currently pointing to the last inode. + * + * Because we may see inodes that are from the wrong AG + * due to RCU freeing and reallocation, only update the + * index if it lies in this AG. It was a race that lead + * us to see this inode, so another lookup from the + * same index will not find it again. */ + if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno) + continue; first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) done = 1; } /* unlock now we've grabbed the inodes. */ - read_unlock(&pag->pag_ici_lock); + rcu_read_unlock(); for (i = 0; i < nr_found; i++) { if (!batch[i]) @@ -639,9 +667,14 @@ xfs_reclaim_inode_grab( struct xfs_inode *ip, int flags) { + ASSERT(rcu_read_lock_held()); + + /* quick check for stale RCU freed inode */ + if (!ip->i_ino) + return 1; /* - * do some unlocked checks first to avoid unnecceary lock traffic. + * do some unlocked checks first to avoid unnecessary lock traffic. * The first is a flush lock check, the second is a already in reclaim * check. Only do these checks if we are not going to block on locks. */ @@ -654,11 +687,16 @@ xfs_reclaim_inode_grab( * The radix tree lock here protects a thread in xfs_iget from racing * with us starting reclaim on the inode. Once we have the * XFS_IRECLAIM flag set it will not touch us. + * + * Due to RCU lookup, we may find inodes that have been freed and only + * have XFS_IRECLAIM set. Indeed, we may see reallocated inodes that + * aren't candidates for reclaim at all, so we must check the + * XFS_IRECLAIMABLE is set first before proceeding to reclaim. */ spin_lock(&ip->i_flags_lock); - ASSERT_ALWAYS(__xfs_iflags_test(ip, XFS_IRECLAIMABLE)); - if (__xfs_iflags_test(ip, XFS_IRECLAIM)) { - /* ignore as it is already under reclaim */ + if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) || + __xfs_iflags_test(ip, XFS_IRECLAIM)) { + /* not a reclaim candidate. */ spin_unlock(&ip->i_flags_lock); return 1; } @@ -864,14 +902,14 @@ restart: struct xfs_inode *batch[XFS_LOOKUP_BATCH]; int i; - write_lock(&pag->pag_ici_lock); + rcu_read_lock(); nr_found = radix_tree_gang_lookup_tag( &pag->pag_ici_root, (void **)batch, first_index, XFS_LOOKUP_BATCH, XFS_ICI_RECLAIM_TAG); if (!nr_found) { - write_unlock(&pag->pag_ici_lock); + rcu_read_unlock(); break; } @@ -891,14 +929,24 @@ restart: * occur if we have inodes in the last block of * the AG and we are currently pointing to the * last inode. + * + * Because we may see inodes that are from the + * wrong AG due to RCU freeing and + * reallocation, only update the index if it + * lies in this AG. It was a race that lead us + * to see this inode, so another lookup from + * the same index will not find it again. */ + if (XFS_INO_TO_AGNO(mp, ip->i_ino) != + pag->pag_agno) + continue; first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) done = 1; } /* unlock now we've grabbed the inodes. */ - write_unlock(&pag->pag_ici_lock); + rcu_read_unlock(); for (i = 0; i < nr_found; i++) { if (!batch[i]) diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index 9fae47556604..04ed09b907b8 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c @@ -80,6 +80,7 @@ xfs_inode_alloc( ASSERT(atomic_read(&ip->i_pincount) == 0); ASSERT(!spin_is_locked(&ip->i_flags_lock)); ASSERT(completion_done(&ip->i_flush)); + ASSERT(ip->i_ino == 0); mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino); lockdep_set_class_and_name(&ip->i_iolock.mr_lock, @@ -98,9 +99,6 @@ xfs_inode_alloc( ip->i_size = 0; ip->i_new_size = 0; - /* prevent anyone from using this yet */ - VFS_I(ip)->i_state = I_NEW; - return ip; } @@ -159,6 +157,16 @@ xfs_inode_free( ASSERT(!spin_is_locked(&ip->i_flags_lock)); ASSERT(completion_done(&ip->i_flush)); + /* + * Because we use RCU freeing we need to ensure the inode always + * appears to be reclaimed with an invalid inode number when in the + * free state. The ip->i_flags_lock provides the barrier against lookup + * races. + */ + spin_lock(&ip->i_flags_lock); + ip->i_flags = XFS_IRECLAIM; + ip->i_ino = 0; + spin_unlock(&ip->i_flags_lock); call_rcu((struct rcu_head *)&VFS_I(ip)->i_dentry, __xfs_inode_free); } @@ -169,14 +177,29 @@ static int xfs_iget_cache_hit( struct xfs_perag *pag, struct xfs_inode *ip, + xfs_ino_t ino, int flags, - int lock_flags) __releases(pag->pag_ici_lock) + int lock_flags) __releases(RCU) { struct inode *inode = VFS_I(ip); struct xfs_mount *mp = ip->i_mount; int error; + /* + * check for re-use of an inode within an RCU grace period due to the + * radix tree nodes not being updated yet. We monitor for this by + * setting the inode number to zero before freeing the inode structure. + * If the inode has been reallocated and set up, then the inode number + * will not match, so check for that, too. + */ spin_lock(&ip->i_flags_lock); + if (ip->i_ino != ino) { + trace_xfs_iget_skip(ip); + XFS_STATS_INC(xs_ig_frecycle); + error = EAGAIN; + goto out_error; + } + /* * If we are racing with another cache hit that is currently @@ -219,7 +242,7 @@ xfs_iget_cache_hit( ip->i_flags |= XFS_IRECLAIM; spin_unlock(&ip->i_flags_lock); - read_unlock(&pag->pag_ici_lock); + rcu_read_unlock(); error = -inode_init_always(mp->m_super, inode); if (error) { @@ -227,7 +250,7 @@ xfs_iget_cache_hit( * Re-initializing the inode failed, and we are in deep * trouble. Try to re-add it to the reclaim list. */ - read_lock(&pag->pag_ici_lock); + rcu_read_lock(); spin_lock(&ip->i_flags_lock); ip->i_flags &= ~XFS_INEW; @@ -261,7 +284,7 @@ xfs_iget_cache_hit( /* We've got a live one. */ spin_unlock(&ip->i_flags_lock); - read_unlock(&pag->pag_ici_lock); + rcu_read_unlock(); trace_xfs_iget_hit(ip); } @@ -275,7 +298,7 @@ xfs_iget_cache_hit( out_error: spin_unlock(&ip->i_flags_lock); - read_unlock(&pag->pag_ici_lock); + rcu_read_unlock(); return error; } @@ -397,7 +420,7 @@ xfs_iget( xfs_agino_t agino; /* reject inode numbers outside existing AGs */ - if (XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount) + if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount) return EINVAL; /* get the perag structure and ensure that it's inode capable */ @@ -406,15 +429,15 @@ xfs_iget( again: error = 0; - read_lock(&pag->pag_ici_lock); + rcu_read_lock(); ip = radix_tree_lookup(&pag->pag_ici_root, agino); if (ip) { - error = xfs_iget_cache_hit(pag, ip, flags, lock_flags); + error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags); if (error) goto out_error_or_again; } else { - read_unlock(&pag->pag_ici_lock); + rcu_read_unlock(); XFS_STATS_INC(xs_ig_missed); error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip, diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 108c7a085f94..43ffd9079106 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -2000,16 +2000,32 @@ xfs_ifree_cluster( */ for (i = 0; i < ninodes; i++) { retry: - read_lock(&pag->pag_ici_lock); + rcu_read_lock(); ip = radix_tree_lookup(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, (inum + i))); - /* Inode not in memory or stale, nothing to do */ - if (!ip || xfs_iflags_test(ip, XFS_ISTALE)) { - read_unlock(&pag->pag_ici_lock); + /* Inode not in memory, nothing to do */ + if (!ip) { + rcu_read_unlock(); continue; } + /* + * because this is an RCU protected lookup, we could + * find a recently freed or even reallocated inode + * during the lookup. We need to check under the + * i_flags_lock for a valid inode here. Skip it if it + * is not valid, the wrong inode or stale. + */ + spin_lock(&ip->i_flags_lock); + if (ip->i_ino != inum + i || + __xfs_iflags_test(ip, XFS_ISTALE)) { + spin_unlock(&ip->i_flags_lock); + rcu_read_unlock(); + continue; + } + spin_unlock(&ip->i_flags_lock); + /* * Don't try to lock/unlock the current inode, but we * _cannot_ skip the other inodes that we did not find @@ -2019,11 +2035,11 @@ retry: */ if (ip != free_ip && !xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) { - read_unlock(&pag->pag_ici_lock); + rcu_read_unlock(); delay(1); goto retry; } - read_unlock(&pag->pag_ici_lock); + rcu_read_unlock(); xfs_iflock(ip); xfs_iflags_set(ip, XFS_ISTALE); @@ -2629,7 +2645,7 @@ xfs_iflush_cluster( mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1); first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask; - read_lock(&pag->pag_ici_lock); + rcu_read_lock(); /* really need a gang lookup range call here */ nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)ilist, first_index, inodes_per_cluster); @@ -2640,9 +2656,21 @@ xfs_iflush_cluster( iq = ilist[i]; if (iq == ip) continue; - /* if the inode lies outside this cluster, we're done. */ - if ((XFS_INO_TO_AGINO(mp, iq->i_ino) & mask) != first_index) - break; + + /* + * because this is an RCU protected lookup, we could find a + * recently freed or even reallocated inode during the lookup. + * We need to check under the i_flags_lock for a valid inode + * here. Skip it if it is not valid or the wrong inode. + */ + spin_lock(&ip->i_flags_lock); + if (!ip->i_ino || + (XFS_INO_TO_AGINO(mp, iq->i_ino) & mask) != first_index) { + spin_unlock(&ip->i_flags_lock); + continue; + } + spin_unlock(&ip->i_flags_lock); + /* * Do an un-protected check to see if the inode is dirty and * is a candidate for flushing. These checks will be repeated @@ -2692,7 +2720,7 @@ xfs_iflush_cluster( } out_free: - read_unlock(&pag->pag_ici_lock); + rcu_read_unlock(); kmem_free(ilist); out_put: xfs_perag_put(pag); @@ -2704,7 +2732,7 @@ cluster_corrupt_out: * Corruption detected in the clustering loop. Invalidate the * inode buffer and shut down the filesystem. */ - read_unlock(&pag->pag_ici_lock); + rcu_read_unlock(); /* * Clean up the buffer. If it was B_DELWRI, just release it -- * brelse can handle it with no problems. If not, shut down the -- cgit v1.2.2 From 1a427ab0c1b205d1bda8da0b77ea9d295ac23c57 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Thu, 16 Dec 2010 17:08:41 +1100 Subject: xfs: convert pag_ici_lock to a spin lock now that we are using RCU protection for the inode cache lookups, the lock is only needed on the modification side. Hence it is not necessary for the lock to be a rwlock as there are no read side holders anymore. Convert it to a spin lock to reflect it's exclusive nature. Signed-off-by: Dave Chinner Reviewed-by: Alex Elder Reviewed-by: Christoph Hellwig --- fs/xfs/linux-2.6/xfs_sync.c | 8 ++++---- fs/xfs/xfs_ag.h | 2 +- fs/xfs/xfs_iget.c | 10 +++++----- fs/xfs/xfs_mount.c | 2 +- 4 files changed, 11 insertions(+), 11 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index fd38682da851..a02480de9759 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c @@ -620,12 +620,12 @@ xfs_inode_set_reclaim_tag( struct xfs_perag *pag; pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); - write_lock(&pag->pag_ici_lock); + spin_lock(&pag->pag_ici_lock); spin_lock(&ip->i_flags_lock); __xfs_inode_set_reclaim_tag(pag, ip); __xfs_iflags_set(ip, XFS_IRECLAIMABLE); spin_unlock(&ip->i_flags_lock); - write_unlock(&pag->pag_ici_lock); + spin_unlock(&pag->pag_ici_lock); xfs_perag_put(pag); } @@ -833,12 +833,12 @@ reclaim: * added to the tree assert that it's been there before to catch * problems with the inode life time early on. */ - write_lock(&pag->pag_ici_lock); + spin_lock(&pag->pag_ici_lock); if (!radix_tree_delete(&pag->pag_ici_root, XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino))) ASSERT(0); __xfs_inode_clear_reclaim(pag, ip); - write_unlock(&pag->pag_ici_lock); + spin_unlock(&pag->pag_ici_lock); /* * Here we do an (almost) spurious inode lock in order to coordinate diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h index 63c7a1a6c022..58632cc17f2d 100644 --- a/fs/xfs/xfs_ag.h +++ b/fs/xfs/xfs_ag.h @@ -227,7 +227,7 @@ typedef struct xfs_perag { atomic_t pagf_fstrms; /* # of filestreams active in this AG */ - rwlock_t pag_ici_lock; /* incore inode lock */ + spinlock_t pag_ici_lock; /* incore inode cache lock */ struct radix_tree_root pag_ici_root; /* incore inode cache root */ int pag_ici_reclaimable; /* reclaimable inodes */ struct mutex pag_ici_reclaim_lock; /* serialisation point */ diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index 04ed09b907b8..3ecad00e8409 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c @@ -260,7 +260,7 @@ xfs_iget_cache_hit( goto out_error; } - write_lock(&pag->pag_ici_lock); + spin_lock(&pag->pag_ici_lock); spin_lock(&ip->i_flags_lock); ip->i_flags &= ~(XFS_IRECLAIMABLE | XFS_IRECLAIM); ip->i_flags |= XFS_INEW; @@ -273,7 +273,7 @@ xfs_iget_cache_hit( &xfs_iolock_active, "xfs_iolock_active"); spin_unlock(&ip->i_flags_lock); - write_unlock(&pag->pag_ici_lock); + spin_unlock(&pag->pag_ici_lock); } else { /* If the VFS inode is being torn down, pause and try again. */ if (!igrab(inode)) { @@ -351,7 +351,7 @@ xfs_iget_cache_miss( BUG(); } - write_lock(&pag->pag_ici_lock); + spin_lock(&pag->pag_ici_lock); /* insert the new inode */ error = radix_tree_insert(&pag->pag_ici_root, agino, ip); @@ -366,14 +366,14 @@ xfs_iget_cache_miss( ip->i_udquot = ip->i_gdquot = NULL; xfs_iflags_set(ip, XFS_INEW); - write_unlock(&pag->pag_ici_lock); + spin_unlock(&pag->pag_ici_lock); radix_tree_preload_end(); *ipp = ip; return 0; out_preload_end: - write_unlock(&pag->pag_ici_lock); + spin_unlock(&pag->pag_ici_lock); radix_tree_preload_end(); if (lock_flags) xfs_iunlock(ip, lock_flags); diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 40579fdf0d0a..d447aef84bc3 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -472,7 +472,7 @@ xfs_initialize_perag( goto out_unwind; pag->pag_agno = index; pag->pag_mount = mp; - rwlock_init(&pag->pag_ici_lock); + spin_lock_init(&pag->pag_ici_lock); mutex_init(&pag->pag_ici_reclaim_lock); INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC); spin_lock_init(&pag->pag_buf_lock); -- cgit v1.2.2 From ff57ab21995a8636cfc72efeebb09cc6034d756f Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Tue, 30 Nov 2010 17:27:57 +1100 Subject: xfs: convert xfsbud shrinker to a per-buftarg shrinker. Before we introduce per-buftarg LRU lists, split the shrinker implementation into per-buftarg shrinker callbacks. At the moment we wake all the xfsbufds to run the delayed write queues to free the dirty buffers and make their pages available for reclaim. However, with an LRU, we want to be able to free clean, unused buffers as well, so we need to separate the xfsbufd from the shrinker callbacks. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig Reviewed-by: Alex Elder --- fs/xfs/linux-2.6/xfs_buf.c | 89 +++++++++++++--------------------------------- fs/xfs/linux-2.6/xfs_buf.h | 4 ++- 2 files changed, 27 insertions(+), 66 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index 4c5deb6e9e31..0a00d7a2fc23 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c @@ -44,12 +44,7 @@ static kmem_zone_t *xfs_buf_zone; STATIC int xfsbufd(void *); -STATIC int xfsbufd_wakeup(struct shrinker *, int, gfp_t); STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int); -static struct shrinker xfs_buf_shake = { - .shrink = xfsbufd_wakeup, - .seeks = DEFAULT_SEEKS, -}; static struct workqueue_struct *xfslogd_workqueue; struct workqueue_struct *xfsdatad_workqueue; @@ -337,7 +332,6 @@ _xfs_buf_lookup_pages( __func__, gfp_mask); XFS_STATS_INC(xb_page_retries); - xfsbufd_wakeup(NULL, 0, gfp_mask); congestion_wait(BLK_RW_ASYNC, HZ/50); goto retry; } @@ -1461,28 +1455,23 @@ xfs_wait_buftarg( } } -/* - * buftarg list for delwrite queue processing - */ -static LIST_HEAD(xfs_buftarg_list); -static DEFINE_SPINLOCK(xfs_buftarg_lock); - -STATIC void -xfs_register_buftarg( - xfs_buftarg_t *btp) -{ - spin_lock(&xfs_buftarg_lock); - list_add(&btp->bt_list, &xfs_buftarg_list); - spin_unlock(&xfs_buftarg_lock); -} - -STATIC void -xfs_unregister_buftarg( - xfs_buftarg_t *btp) +int +xfs_buftarg_shrink( + struct shrinker *shrink, + int nr_to_scan, + gfp_t mask) { - spin_lock(&xfs_buftarg_lock); - list_del(&btp->bt_list); - spin_unlock(&xfs_buftarg_lock); + struct xfs_buftarg *btp = container_of(shrink, + struct xfs_buftarg, bt_shrinker); + if (nr_to_scan) { + if (test_bit(XBT_FORCE_SLEEP, &btp->bt_flags)) + return -1; + if (list_empty(&btp->bt_delwrite_queue)) + return -1; + set_bit(XBT_FORCE_FLUSH, &btp->bt_flags); + wake_up_process(btp->bt_task); + } + return list_empty(&btp->bt_delwrite_queue) ? -1 : 1; } void @@ -1490,17 +1479,14 @@ xfs_free_buftarg( struct xfs_mount *mp, struct xfs_buftarg *btp) { + unregister_shrinker(&btp->bt_shrinker); + xfs_flush_buftarg(btp, 1); if (mp->m_flags & XFS_MOUNT_BARRIER) xfs_blkdev_issue_flush(btp); iput(btp->bt_mapping->host); - /* Unregister the buftarg first so that we don't get a - * wakeup finding a non-existent task - */ - xfs_unregister_buftarg(btp); kthread_stop(btp->bt_task); - kmem_free(btp); } @@ -1597,20 +1583,13 @@ xfs_alloc_delwrite_queue( xfs_buftarg_t *btp, const char *fsname) { - int error = 0; - - INIT_LIST_HEAD(&btp->bt_list); INIT_LIST_HEAD(&btp->bt_delwrite_queue); spin_lock_init(&btp->bt_delwrite_lock); btp->bt_flags = 0; btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd/%s", fsname); - if (IS_ERR(btp->bt_task)) { - error = PTR_ERR(btp->bt_task); - goto out_error; - } - xfs_register_buftarg(btp); -out_error: - return error; + if (IS_ERR(btp->bt_task)) + return PTR_ERR(btp->bt_task); + return 0; } xfs_buftarg_t * @@ -1633,6 +1612,9 @@ xfs_alloc_buftarg( goto error; if (xfs_alloc_delwrite_queue(btp, fsname)) goto error; + btp->bt_shrinker.shrink = xfs_buftarg_shrink; + btp->bt_shrinker.seeks = DEFAULT_SEEKS; + register_shrinker(&btp->bt_shrinker); return btp; error: @@ -1737,27 +1719,6 @@ xfs_buf_runall_queues( flush_workqueue(queue); } -STATIC int -xfsbufd_wakeup( - struct shrinker *shrink, - int priority, - gfp_t mask) -{ - xfs_buftarg_t *btp; - - spin_lock(&xfs_buftarg_lock); - list_for_each_entry(btp, &xfs_buftarg_list, bt_list) { - if (test_bit(XBT_FORCE_SLEEP, &btp->bt_flags)) - continue; - if (list_empty(&btp->bt_delwrite_queue)) - continue; - set_bit(XBT_FORCE_FLUSH, &btp->bt_flags); - wake_up_process(btp->bt_task); - } - spin_unlock(&xfs_buftarg_lock); - return 0; -} - /* * Move as many buffers as specified to the supplied list * idicating if we skipped any buffers to prevent deadlocks. @@ -1952,7 +1913,6 @@ xfs_buf_init(void) if (!xfsconvertd_workqueue) goto out_destroy_xfsdatad_workqueue; - register_shrinker(&xfs_buf_shake); return 0; out_destroy_xfsdatad_workqueue: @@ -1968,7 +1928,6 @@ xfs_buf_init(void) void xfs_buf_terminate(void) { - unregister_shrinker(&xfs_buf_shake); destroy_workqueue(xfsconvertd_workqueue); destroy_workqueue(xfsdatad_workqueue); destroy_workqueue(xfslogd_workqueue); diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h index 383a3f37cf98..9344103e77d6 100644 --- a/fs/xfs/linux-2.6/xfs_buf.h +++ b/fs/xfs/linux-2.6/xfs_buf.h @@ -128,10 +128,12 @@ typedef struct xfs_buftarg { /* per device delwri queue */ struct task_struct *bt_task; - struct list_head bt_list; struct list_head bt_delwrite_queue; spinlock_t bt_delwrite_lock; unsigned long bt_flags; + + /* LRU control structures */ + struct shrinker bt_shrinker; } xfs_buftarg_t; /* -- cgit v1.2.2 From 430cbeb86fdcbbdabea7d4aa65307de8de425350 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Thu, 2 Dec 2010 16:30:55 +1100 Subject: xfs: add a lru to the XFS buffer cache Introduce a per-buftarg LRU for memory reclaim to operate on. This is the last piece we need to put in place so that we can fully control the buffer lifecycle. This allows XFS to be responsibile for maintaining the working set of buffers under memory pressure instead of relying on the VM reclaim not to take pages we need out from underneath us. The implementation introduces a b_lru_ref counter into the buffer. This is currently set to 1 whenever the buffer is referenced and so is used to determine if the buffer should be added to the LRU or not when freed. Effectively it allows lazy LRU initialisation of the buffer so we do not need to touch the LRU list and locks in xfs_buf_find(). Instead, when the buffer is being released and we drop the last reference to it, we check the b_lru_ref count and if it is none zero we re-add the buffer reference and add the inode to the LRU. The b_lru_ref counter is decremented by the shrinker, and whenever the shrinker comes across a buffer with a zero b_lru_ref counter, if released the LRU reference on the buffer. In the absence of a lookup race, this will result in the buffer being freed. This counting mechanism is used instead of a reference flag so that it is simple to re-introduce buffer-type specific reclaim reference counts to prioritise reclaim more effectively. We still have all those hooks in the XFS code, so this will provide the infrastructure to re-implement that functionality. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig --- fs/xfs/linux-2.6/xfs_buf.c | 164 +++++++++++++++++++++++++++++++++++++++------ fs/xfs/linux-2.6/xfs_buf.h | 8 ++- 2 files changed, 150 insertions(+), 22 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index 0a00d7a2fc23..92f1f2acc6ab 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c @@ -163,8 +163,79 @@ test_page_region( } /* - * Internal xfs_buf_t object manipulation + * xfs_buf_lru_add - add a buffer to the LRU. + * + * The LRU takes a new reference to the buffer so that it will only be freed + * once the shrinker takes the buffer off the LRU. */ +STATIC void +xfs_buf_lru_add( + struct xfs_buf *bp) +{ + struct xfs_buftarg *btp = bp->b_target; + + spin_lock(&btp->bt_lru_lock); + if (list_empty(&bp->b_lru)) { + atomic_inc(&bp->b_hold); + list_add_tail(&bp->b_lru, &btp->bt_lru); + btp->bt_lru_nr++; + } + spin_unlock(&btp->bt_lru_lock); +} + +/* + * xfs_buf_lru_del - remove a buffer from the LRU + * + * The unlocked check is safe here because it only occurs when there are not + * b_lru_ref counts left on the inode under the pag->pag_buf_lock. it is there + * to optimise the shrinker removing the buffer from the LRU and calling + * xfs_buf_free(). i.e. it removes an unneccessary round trip on the + * bt_lru_lock. + */ +STATIC void +xfs_buf_lru_del( + struct xfs_buf *bp) +{ + struct xfs_buftarg *btp = bp->b_target; + + if (list_empty(&bp->b_lru)) + return; + + spin_lock(&btp->bt_lru_lock); + if (!list_empty(&bp->b_lru)) { + list_del_init(&bp->b_lru); + btp->bt_lru_nr--; + } + spin_unlock(&btp->bt_lru_lock); +} + +/* + * When we mark a buffer stale, we remove the buffer from the LRU and clear the + * b_lru_ref count so that the buffer is freed immediately when the buffer + * reference count falls to zero. If the buffer is already on the LRU, we need + * to remove the reference that LRU holds on the buffer. + * + * This prevents build-up of stale buffers on the LRU. + */ +void +xfs_buf_stale( + struct xfs_buf *bp) +{ + bp->b_flags |= XBF_STALE; + atomic_set(&(bp)->b_lru_ref, 0); + if (!list_empty(&bp->b_lru)) { + struct xfs_buftarg *btp = bp->b_target; + + spin_lock(&btp->bt_lru_lock); + if (!list_empty(&bp->b_lru)) { + list_del_init(&bp->b_lru); + btp->bt_lru_nr--; + atomic_dec(&bp->b_hold); + } + spin_unlock(&btp->bt_lru_lock); + } + ASSERT(atomic_read(&bp->b_hold) >= 1); +} STATIC void _xfs_buf_initialize( @@ -181,7 +252,9 @@ _xfs_buf_initialize( memset(bp, 0, sizeof(xfs_buf_t)); atomic_set(&bp->b_hold, 1); + atomic_set(&bp->b_lru_ref, 1); init_completion(&bp->b_iowait); + INIT_LIST_HEAD(&bp->b_lru); INIT_LIST_HEAD(&bp->b_list); RB_CLEAR_NODE(&bp->b_rbnode); sema_init(&bp->b_sema, 0); /* held, no waiters */ @@ -257,6 +330,8 @@ xfs_buf_free( { trace_xfs_buf_free(bp, _RET_IP_); + ASSERT(list_empty(&bp->b_lru)); + if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) { uint i; @@ -822,6 +897,7 @@ xfs_buf_rele( if (!pag) { ASSERT(!bp->b_relse); + ASSERT(list_empty(&bp->b_lru)); ASSERT(RB_EMPTY_NODE(&bp->b_rbnode)); if (atomic_dec_and_test(&bp->b_hold)) xfs_buf_free(bp); @@ -829,13 +905,19 @@ xfs_buf_rele( } ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode)); + ASSERT(atomic_read(&bp->b_hold) > 0); if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) { if (bp->b_relse) { atomic_inc(&bp->b_hold); spin_unlock(&pag->pag_buf_lock); bp->b_relse(bp); + } else if (!(bp->b_flags & XBF_STALE) && + atomic_read(&bp->b_lru_ref)) { + xfs_buf_lru_add(bp); + spin_unlock(&pag->pag_buf_lock); } else { + xfs_buf_lru_del(bp); ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q))); rb_erase(&bp->b_rbnode, &pag->pag_buf_tree); spin_unlock(&pag->pag_buf_lock); @@ -1432,27 +1514,35 @@ xfs_buf_iomove( */ /* - * Wait for any bufs with callbacks that have been submitted but - * have not yet returned... walk the hash list for the target. + * Wait for any bufs with callbacks that have been submitted but have not yet + * returned. These buffers will have an elevated hold count, so wait on those + * while freeing all the buffers only held by the LRU. */ void xfs_wait_buftarg( struct xfs_buftarg *btp) { - struct xfs_perag *pag; - uint i; + struct xfs_buf *bp; - for (i = 0; i < btp->bt_mount->m_sb.sb_agcount; i++) { - pag = xfs_perag_get(btp->bt_mount, i); - spin_lock(&pag->pag_buf_lock); - while (rb_first(&pag->pag_buf_tree)) { - spin_unlock(&pag->pag_buf_lock); +restart: + spin_lock(&btp->bt_lru_lock); + while (!list_empty(&btp->bt_lru)) { + bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru); + if (atomic_read(&bp->b_hold) > 1) { + spin_unlock(&btp->bt_lru_lock); delay(100); - spin_lock(&pag->pag_buf_lock); + goto restart; } - spin_unlock(&pag->pag_buf_lock); - xfs_perag_put(pag); + /* + * clear the LRU reference count so the bufer doesn't get + * ignored in xfs_buf_rele(). + */ + atomic_set(&bp->b_lru_ref, 0); + spin_unlock(&btp->bt_lru_lock); + xfs_buf_rele(bp); + spin_lock(&btp->bt_lru_lock); } + spin_unlock(&btp->bt_lru_lock); } int @@ -1463,15 +1553,45 @@ xfs_buftarg_shrink( { struct xfs_buftarg *btp = container_of(shrink, struct xfs_buftarg, bt_shrinker); - if (nr_to_scan) { - if (test_bit(XBT_FORCE_SLEEP, &btp->bt_flags)) - return -1; - if (list_empty(&btp->bt_delwrite_queue)) - return -1; - set_bit(XBT_FORCE_FLUSH, &btp->bt_flags); - wake_up_process(btp->bt_task); + struct xfs_buf *bp; + LIST_HEAD(dispose); + + if (!nr_to_scan) + return btp->bt_lru_nr; + + spin_lock(&btp->bt_lru_lock); + while (!list_empty(&btp->bt_lru)) { + if (nr_to_scan-- <= 0) + break; + + bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru); + + /* + * Decrement the b_lru_ref count unless the value is already + * zero. If the value is already zero, we need to reclaim the + * buffer, otherwise it gets another trip through the LRU. + */ + if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) { + list_move_tail(&bp->b_lru, &btp->bt_lru); + continue; + } + + /* + * remove the buffer from the LRU now to avoid needing another + * lock round trip inside xfs_buf_rele(). + */ + list_move(&bp->b_lru, &dispose); + btp->bt_lru_nr--; } - return list_empty(&btp->bt_delwrite_queue) ? -1 : 1; + spin_unlock(&btp->bt_lru_lock); + + while (!list_empty(&dispose)) { + bp = list_first_entry(&dispose, struct xfs_buf, b_lru); + list_del_init(&bp->b_lru); + xfs_buf_rele(bp); + } + + return btp->bt_lru_nr; } void @@ -1606,6 +1726,8 @@ xfs_alloc_buftarg( btp->bt_mount = mp; btp->bt_dev = bdev->bd_dev; btp->bt_bdev = bdev; + INIT_LIST_HEAD(&btp->bt_lru); + spin_lock_init(&btp->bt_lru_lock); if (xfs_setsize_buftarg_early(btp, bdev)) goto error; if (xfs_mapping_buftarg(btp, bdev)) diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h index 9344103e77d6..4601eabd0da0 100644 --- a/fs/xfs/linux-2.6/xfs_buf.h +++ b/fs/xfs/linux-2.6/xfs_buf.h @@ -134,6 +134,9 @@ typedef struct xfs_buftarg { /* LRU control structures */ struct shrinker bt_shrinker; + struct list_head bt_lru; + spinlock_t bt_lru_lock; + unsigned int bt_lru_nr; } xfs_buftarg_t; /* @@ -166,9 +169,11 @@ typedef struct xfs_buf { xfs_off_t b_file_offset; /* offset in file */ size_t b_buffer_length;/* size of buffer in bytes */ atomic_t b_hold; /* reference count */ + atomic_t b_lru_ref; /* lru reclaim ref count */ xfs_buf_flags_t b_flags; /* status flags */ struct semaphore b_sema; /* semaphore for lockables */ + struct list_head b_lru; /* lru list */ wait_queue_head_t b_waiters; /* unpin waiters */ struct list_head b_list; struct xfs_perag *b_pag; /* contains rbtree root */ @@ -266,7 +271,8 @@ extern void xfs_buf_terminate(void); #define XFS_BUF_ZEROFLAGS(bp) ((bp)->b_flags &= \ ~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI|XBF_ORDERED)) -#define XFS_BUF_STALE(bp) ((bp)->b_flags |= XBF_STALE) +void xfs_buf_stale(struct xfs_buf *bp); +#define XFS_BUF_STALE(bp) xfs_buf_stale(bp); #define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XBF_STALE) #define XFS_BUF_ISSTALE(bp) ((bp)->b_flags & XBF_STALE) #define XFS_BUF_SUPER_STALE(bp) do { \ -- cgit v1.2.2 From 821eb21d97a8b686649c08b7284d0b9f34d0e138 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Thu, 2 Dec 2010 16:31:13 +1100 Subject: xfs: connect up buffer reclaim priority hooks Now that the buffer reclaim infrastructure can handle different reclaim priorities for different types of buffers, reconnect the hooks in the XFS code that has been sitting dormant since it was ported to Linux. This should finally give use reclaim prioritisation that is on a par with the functionality that Irix provided XFS 15 years ago. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig --- fs/xfs/linux-2.6/xfs_buf.h | 10 ++++++++-- fs/xfs/xfs_btree.c | 9 ++++----- fs/xfs/xfs_inode.c | 2 +- fs/xfs/xfs_trans.h | 2 +- 4 files changed, 14 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h index 4601eabd0da0..a76c2428faff 100644 --- a/fs/xfs/linux-2.6/xfs_buf.h +++ b/fs/xfs/linux-2.6/xfs_buf.h @@ -336,9 +336,15 @@ void xfs_buf_stale(struct xfs_buf *bp); #define XFS_BUF_SIZE(bp) ((bp)->b_buffer_length) #define XFS_BUF_SET_SIZE(bp, cnt) ((bp)->b_buffer_length = (cnt)) -#define XFS_BUF_SET_VTYPE_REF(bp, type, ref) do { } while (0) +static inline void +xfs_buf_set_ref( + struct xfs_buf *bp, + int lru_ref) +{ + atomic_set(&bp->b_lru_ref, lru_ref); +} +#define XFS_BUF_SET_VTYPE_REF(bp, type, ref) xfs_buf_set_ref(bp, ref) #define XFS_BUF_SET_VTYPE(bp, type) do { } while (0) -#define XFS_BUF_SET_REF(bp, ref) do { } while (0) #define XFS_BUF_ISPINNED(bp) atomic_read(&((bp)->b_pin_count)) diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c index 04f9cca8da7e..2f9e97c128a0 100644 --- a/fs/xfs/xfs_btree.c +++ b/fs/xfs/xfs_btree.c @@ -634,9 +634,8 @@ xfs_btree_read_bufl( return error; } ASSERT(!bp || !XFS_BUF_GETERROR(bp)); - if (bp != NULL) { + if (bp) XFS_BUF_SET_VTYPE_REF(bp, B_FS_MAP, refval); - } *bpp = bp; return 0; } @@ -944,13 +943,13 @@ xfs_btree_set_refs( switch (cur->bc_btnum) { case XFS_BTNUM_BNO: case XFS_BTNUM_CNT: - XFS_BUF_SET_VTYPE_REF(*bpp, B_FS_MAP, XFS_ALLOC_BTREE_REF); + XFS_BUF_SET_VTYPE_REF(bp, B_FS_MAP, XFS_ALLOC_BTREE_REF); break; case XFS_BTNUM_INO: - XFS_BUF_SET_VTYPE_REF(*bpp, B_FS_INOMAP, XFS_INO_BTREE_REF); + XFS_BUF_SET_VTYPE_REF(bp, B_FS_INOMAP, XFS_INO_BTREE_REF); break; case XFS_BTNUM_BMAP: - XFS_BUF_SET_VTYPE_REF(*bpp, B_FS_MAP, XFS_BMAP_BTREE_REF); + XFS_BUF_SET_VTYPE_REF(bp, B_FS_MAP, XFS_BMAP_BTREE_REF); break; default: ASSERT(0); diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 43ffd9079106..be7cf625421f 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -887,7 +887,7 @@ xfs_iread( * around for a while. This helps to keep recently accessed * meta-data in-core longer. */ - XFS_BUF_SET_REF(bp, XFS_INO_REF); + xfs_buf_set_ref(bp, XFS_INO_REF); /* * Use xfs_trans_brelse() to release the buffer containing the diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h index 246286b77a86..c2042b736b81 100644 --- a/fs/xfs/xfs_trans.h +++ b/fs/xfs/xfs_trans.h @@ -294,8 +294,8 @@ struct xfs_log_item_desc { #define XFS_ALLOC_BTREE_REF 2 #define XFS_BMAP_BTREE_REF 2 #define XFS_DIR_BTREE_REF 2 +#define XFS_INO_REF 2 #define XFS_ATTR_BTREE_REF 1 -#define XFS_INO_REF 1 #define XFS_DQUOT_REF 1 #ifdef __KERNEL__ -- cgit v1.2.2 From 9c5f8414efd5eeed9f498d4170337a3eb126341f Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Mon, 20 Dec 2010 11:57:24 +1100 Subject: xfs: fix EFI transaction cancellation. XFS_EFI_CANCELED has not been set in the code base since xfs_efi_cancel() was removed back in 2006 by commit 065d312e15902976d256ddaf396a7950ec0350a8 ("[XFS] Remove unused iop_abort log item operation), and even then xfs_efi_cancel() was never called. I haven't tracked it back further than that (beyond git history), but it indicates that the handling of EFIs in cancelled transactions has been broken for a long time. Basically, when we get an IOP_UNPIN(lip, 1); call from xfs_trans_uncommit() (i.e. remove == 1), if we don't free the log item descriptor we leak it. Fix the behviour to be correct and kill the XFS_EFI_CANCELED flag. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig --- fs/xfs/xfs_extfree_item.c | 20 +++++++++----------- fs/xfs/xfs_extfree_item.h | 1 - 2 files changed, 9 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c index a55e687bf562..5997efae05dc 100644 --- a/fs/xfs/xfs_extfree_item.c +++ b/fs/xfs/xfs_extfree_item.c @@ -99,10 +99,11 @@ xfs_efi_item_pin( } /* - * While EFIs cannot really be pinned, the unpin operation is the - * last place at which the EFI is manipulated during a transaction. - * Here we coordinate with xfs_efi_cancel() to determine who gets to - * free the EFI. + * While EFIs cannot really be pinned, the unpin operation is the last place at + * which the EFI is manipulated during a transaction. If we are being asked to + * remove the EFI it's because the transaction has been cancelled and by + * definition that means the EFI cannot be in the AIL so remove it from the + * transaction and free it. */ STATIC void xfs_efi_item_unpin( @@ -113,17 +114,14 @@ xfs_efi_item_unpin( struct xfs_ail *ailp = lip->li_ailp; spin_lock(&ailp->xa_lock); - if (efip->efi_flags & XFS_EFI_CANCELED) { - if (remove) - xfs_trans_del_item(lip); - - /* xfs_trans_ail_delete() drops the AIL lock. */ - xfs_trans_ail_delete(ailp, lip); + if (remove) { + ASSERT(!(lip->li_flags & XFS_LI_IN_AIL)); + xfs_trans_del_item(lip); xfs_efi_item_free(efip); } else { efip->efi_flags |= XFS_EFI_COMMITTED; - spin_unlock(&ailp->xa_lock); } + spin_unlock(&ailp->xa_lock); } /* diff --git a/fs/xfs/xfs_extfree_item.h b/fs/xfs/xfs_extfree_item.h index 0d22c56fdf64..f7834ec8efad 100644 --- a/fs/xfs/xfs_extfree_item.h +++ b/fs/xfs/xfs_extfree_item.h @@ -115,7 +115,6 @@ typedef struct xfs_efd_log_format_64 { */ #define XFS_EFI_RECOVERED 0x1 #define XFS_EFI_COMMITTED 0x2 -#define XFS_EFI_CANCELED 0x4 /* * This is the "extent free intention" log item. It is used -- cgit v1.2.2 From b199c8a4ba11879df87daad496ceee41fdc6aa82 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Mon, 20 Dec 2010 11:59:49 +1100 Subject: xfs: Pull EFI/EFD handling out from under the AIL lock EFI/EFD interactions are protected from races by the AIL lock. They are the only type of log items that require the the AIL lock to serialise internal state, so they need to be separated from the AIL lock before we can do bulk insert operations on the AIL. To acheive this, convert the counter of the number of extents in the EFI to an atomic so it can be safely manipulated by EFD processing without locks. Also, convert the EFI state flag manipulations to use atomic bit operations so no locks are needed to record state changes. Finally, use the state bits to determine when it is safe to free the EFI and clean up the code to do this neatly. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig --- fs/xfs/xfs_extfree_item.c | 81 +++++++++++++++++++++++++--------------------- fs/xfs/xfs_extfree_item.h | 10 +++--- fs/xfs/xfs_log_recover.c | 9 +++--- fs/xfs/xfs_trans_extfree.c | 8 +++-- 4 files changed, 59 insertions(+), 49 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c index 5997efae05dc..75f2ef60e579 100644 --- a/fs/xfs/xfs_extfree_item.c +++ b/fs/xfs/xfs_extfree_item.c @@ -47,6 +47,28 @@ xfs_efi_item_free( kmem_zone_free(xfs_efi_zone, efip); } +/* + * Freeing the efi requires that we remove it from the AIL if it has already + * been placed there. However, the EFI may not yet have been placed in the AIL + * when called by xfs_efi_release() from EFD processing due to the ordering of + * committed vs unpin operations in bulk insert operations. Hence the + * test_and_clear_bit(XFS_EFI_COMMITTED) to ensure only the last caller frees + * the EFI. + */ +STATIC void +__xfs_efi_release( + struct xfs_efi_log_item *efip) +{ + struct xfs_ail *ailp = efip->efi_item.li_ailp; + + if (!test_and_clear_bit(XFS_EFI_COMMITTED, &efip->efi_flags)) { + spin_lock(&ailp->xa_lock); + /* xfs_trans_ail_delete() drops the AIL lock. */ + xfs_trans_ail_delete(ailp, &efip->efi_item); + xfs_efi_item_free(efip); + } +} + /* * This returns the number of iovecs needed to log the given efi item. * We only need 1 iovec for an efi item. It just logs the efi_log_format @@ -74,7 +96,8 @@ xfs_efi_item_format( struct xfs_efi_log_item *efip = EFI_ITEM(lip); uint size; - ASSERT(efip->efi_next_extent == efip->efi_format.efi_nextents); + ASSERT(atomic_read(&efip->efi_next_extent) == + efip->efi_format.efi_nextents); efip->efi_format.efi_type = XFS_LI_EFI; @@ -103,7 +126,8 @@ xfs_efi_item_pin( * which the EFI is manipulated during a transaction. If we are being asked to * remove the EFI it's because the transaction has been cancelled and by * definition that means the EFI cannot be in the AIL so remove it from the - * transaction and free it. + * transaction and free it. Otherwise coordinate with xfs_efi_release() (via + * XFS_EFI_COMMITTED) to determine who gets to free the EFI. */ STATIC void xfs_efi_item_unpin( @@ -111,17 +135,14 @@ xfs_efi_item_unpin( int remove) { struct xfs_efi_log_item *efip = EFI_ITEM(lip); - struct xfs_ail *ailp = lip->li_ailp; - spin_lock(&ailp->xa_lock); if (remove) { ASSERT(!(lip->li_flags & XFS_LI_IN_AIL)); xfs_trans_del_item(lip); xfs_efi_item_free(efip); - } else { - efip->efi_flags |= XFS_EFI_COMMITTED; + return; } - spin_unlock(&ailp->xa_lock); + __xfs_efi_release(efip); } /* @@ -150,16 +171,20 @@ xfs_efi_item_unlock( } /* - * The EFI is logged only once and cannot be moved in the log, so - * simply return the lsn at which it's been logged. The canceled - * flag is not paid any attention here. Checking for that is delayed - * until the EFI is unpinned. + * The EFI is logged only once and cannot be moved in the log, so simply return + * the lsn at which it's been logged. For bulk transaction committed + * processing, the EFI may be processed but not yet unpinned prior to the EFD + * being processed. Set the XFS_EFI_COMMITTED flag so this case can be detected + * when processing the EFD. */ STATIC xfs_lsn_t xfs_efi_item_committed( struct xfs_log_item *lip, xfs_lsn_t lsn) { + struct xfs_efi_log_item *efip = EFI_ITEM(lip); + + set_bit(XFS_EFI_COMMITTED, &efip->efi_flags); return lsn; } @@ -228,6 +253,7 @@ xfs_efi_init( xfs_log_item_init(mp, &efip->efi_item, XFS_LI_EFI, &xfs_efi_item_ops); efip->efi_format.efi_nextents = nextents; efip->efi_format.efi_id = (__psint_t)(void*)efip; + atomic_set(&efip->efi_next_extent, 0); return efip; } @@ -287,37 +313,18 @@ xfs_efi_copy_format(xfs_log_iovec_t *buf, xfs_efi_log_format_t *dst_efi_fmt) } /* - * This is called by the efd item code below to release references to - * the given efi item. Each efd calls this with the number of - * extents that it has logged, and when the sum of these reaches - * the total number of extents logged by this efi item we can free - * the efi item. - * - * Freeing the efi item requires that we remove it from the AIL. - * We'll use the AIL lock to protect our counters as well as - * the removal from the AIL. + * This is called by the efd item code below to release references to the given + * efi item. Each efd calls this with the number of extents that it has + * logged, and when the sum of these reaches the total number of extents logged + * by this efi item we can free the efi item. */ void xfs_efi_release(xfs_efi_log_item_t *efip, uint nextents) { - struct xfs_ail *ailp = efip->efi_item.li_ailp; - int extents_left; - - ASSERT(efip->efi_next_extent > 0); - ASSERT(efip->efi_flags & XFS_EFI_COMMITTED); - - spin_lock(&ailp->xa_lock); - ASSERT(efip->efi_next_extent >= nextents); - efip->efi_next_extent -= nextents; - extents_left = efip->efi_next_extent; - if (extents_left == 0) { - /* xfs_trans_ail_delete() drops the AIL lock. */ - xfs_trans_ail_delete(ailp, (xfs_log_item_t *)efip); - xfs_efi_item_free(efip); - } else { - spin_unlock(&ailp->xa_lock); - } + ASSERT(atomic_read(&efip->efi_next_extent) >= nextents); + if (atomic_sub_and_test(nextents, &efip->efi_next_extent)) + __xfs_efi_release(efip); } static inline struct xfs_efd_log_item *EFD_ITEM(struct xfs_log_item *lip) diff --git a/fs/xfs/xfs_extfree_item.h b/fs/xfs/xfs_extfree_item.h index f7834ec8efad..375f68e42531 100644 --- a/fs/xfs/xfs_extfree_item.h +++ b/fs/xfs/xfs_extfree_item.h @@ -111,10 +111,10 @@ typedef struct xfs_efd_log_format_64 { #define XFS_EFI_MAX_FAST_EXTENTS 16 /* - * Define EFI flags. + * Define EFI flag bits. Manipulated by set/clear/test_bit operators. */ -#define XFS_EFI_RECOVERED 0x1 -#define XFS_EFI_COMMITTED 0x2 +#define XFS_EFI_RECOVERED 1 +#define XFS_EFI_COMMITTED 2 /* * This is the "extent free intention" log item. It is used @@ -124,8 +124,8 @@ typedef struct xfs_efd_log_format_64 { */ typedef struct xfs_efi_log_item { xfs_log_item_t efi_item; - uint efi_flags; /* misc flags */ - uint efi_next_extent; + atomic_t efi_next_extent; + unsigned long efi_flags; /* misc flags */ xfs_efi_log_format_t efi_format; } xfs_efi_log_item_t; diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 4ab4f6ff48aa..d7219e29d9ab 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -2567,8 +2567,7 @@ xlog_recover_efi_pass2( xfs_efi_item_free(efip); return error; } - efip->efi_next_extent = efi_formatp->efi_nextents; - efip->efi_flags |= XFS_EFI_COMMITTED; + atomic_set(&efip->efi_next_extent, efi_formatp->efi_nextents); spin_lock(&log->l_ailp->xa_lock); /* @@ -2878,7 +2877,7 @@ xlog_recover_process_efi( xfs_extent_t *extp; xfs_fsblock_t startblock_fsb; - ASSERT(!(efip->efi_flags & XFS_EFI_RECOVERED)); + ASSERT(!test_bit(XFS_EFI_RECOVERED, &efip->efi_flags)); /* * First check the validity of the extents described by the @@ -2917,7 +2916,7 @@ xlog_recover_process_efi( extp->ext_len); } - efip->efi_flags |= XFS_EFI_RECOVERED; + set_bit(XFS_EFI_RECOVERED, &efip->efi_flags); error = xfs_trans_commit(tp, 0); return error; @@ -2974,7 +2973,7 @@ xlog_recover_process_efis( * Skip EFIs that we've already processed. */ efip = (xfs_efi_log_item_t *)lip; - if (efip->efi_flags & XFS_EFI_RECOVERED) { + if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags)) { lip = xfs_trans_ail_cursor_next(ailp, &cur); continue; } diff --git a/fs/xfs/xfs_trans_extfree.c b/fs/xfs/xfs_trans_extfree.c index f783d5e9fa70..f7590f5badea 100644 --- a/fs/xfs/xfs_trans_extfree.c +++ b/fs/xfs/xfs_trans_extfree.c @@ -69,12 +69,16 @@ xfs_trans_log_efi_extent(xfs_trans_t *tp, tp->t_flags |= XFS_TRANS_DIRTY; efip->efi_item.li_desc->lid_flags |= XFS_LID_DIRTY; - next_extent = efip->efi_next_extent; + /* + * atomic_inc_return gives us the value after the increment; + * we want to use it as an array index so we need to subtract 1 from + * it. + */ + next_extent = atomic_inc_return(&efip->efi_next_extent) - 1; ASSERT(next_extent < efip->efi_format.efi_nextents); extp = &(efip->efi_format.efi_extents[next_extent]); extp->ext_start = start_block; extp->ext_len = ext_len; - efip->efi_next_extent++; } -- cgit v1.2.2 From eb3efa1249b6413be930bdf13d10b6238028a440 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Fri, 3 Dec 2010 16:42:57 +1100 Subject: xfs: clean up xfs_ail_delete() xfs_ail_delete() has a needlessly complex interface. It returns the log item that was passed in for deletion (which the callers then assert is identical to the one passed in), and callers of xfs_ail_delete() still need to invalidate current traversal cursors. Make xfs_ail_delete() return void, move the cursor invalidation inside it, and clean up the callers just to use the log item pointer they passed in. While cleaning up, remove the messy and unnecessary "/* ARGUSED */" comments around all these functions. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig --- fs/xfs/xfs_trans_ail.c | 27 +++++++-------------------- 1 file changed, 7 insertions(+), 20 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c index dc9069568ff7..645928cab42d 100644 --- a/fs/xfs/xfs_trans_ail.c +++ b/fs/xfs/xfs_trans_ail.c @@ -29,7 +29,7 @@ #include "xfs_error.h" STATIC void xfs_ail_insert(struct xfs_ail *, xfs_log_item_t *); -STATIC xfs_log_item_t * xfs_ail_delete(struct xfs_ail *, xfs_log_item_t *); +STATIC void xfs_ail_delete(struct xfs_ail *, xfs_log_item_t *); STATIC xfs_log_item_t * xfs_ail_min(struct xfs_ail *); STATIC xfs_log_item_t * xfs_ail_next(struct xfs_ail *, xfs_log_item_t *); @@ -468,16 +468,13 @@ xfs_trans_ail_update( xfs_log_item_t *lip, xfs_lsn_t lsn) __releases(ailp->xa_lock) { - xfs_log_item_t *dlip = NULL; xfs_log_item_t *mlip; /* ptr to minimum lip */ xfs_lsn_t tail_lsn; mlip = xfs_ail_min(ailp); if (lip->li_flags & XFS_LI_IN_AIL) { - dlip = xfs_ail_delete(ailp, lip); - ASSERT(dlip == lip); - xfs_trans_ail_cursor_clear(ailp, dlip); + xfs_ail_delete(ailp, lip); } else { lip->li_flags |= XFS_LI_IN_AIL; } @@ -485,7 +482,7 @@ xfs_trans_ail_update( lip->li_lsn = lsn; xfs_ail_insert(ailp, lip); - if (mlip == dlip) { + if (mlip == lip) { mlip = xfs_ail_min(ailp); /* * It is not safe to access mlip after the AIL lock is @@ -524,21 +521,18 @@ xfs_trans_ail_delete( struct xfs_ail *ailp, xfs_log_item_t *lip) __releases(ailp->xa_lock) { - xfs_log_item_t *dlip; xfs_log_item_t *mlip; xfs_lsn_t tail_lsn; if (lip->li_flags & XFS_LI_IN_AIL) { mlip = xfs_ail_min(ailp); - dlip = xfs_ail_delete(ailp, lip); - ASSERT(dlip == lip); - xfs_trans_ail_cursor_clear(ailp, dlip); + xfs_ail_delete(ailp, lip); lip->li_flags &= ~XFS_LI_IN_AIL; lip->li_lsn = 0; - if (mlip == dlip) { + if (mlip == lip) { mlip = xfs_ail_min(ailp); /* * It is not safe to access mlip after the AIL lock @@ -632,7 +626,6 @@ STATIC void xfs_ail_insert( struct xfs_ail *ailp, xfs_log_item_t *lip) -/* ARGSUSED */ { xfs_log_item_t *next_lip; @@ -661,18 +654,14 @@ xfs_ail_insert( /* * Delete the given item from the AIL. Return a pointer to the item. */ -/*ARGSUSED*/ -STATIC xfs_log_item_t * +STATIC void xfs_ail_delete( struct xfs_ail *ailp, xfs_log_item_t *lip) -/* ARGSUSED */ { xfs_ail_check(ailp, lip); - list_del(&lip->li_ail); - - return lip; + xfs_trans_ail_cursor_clear(ailp, lip); } /* @@ -682,7 +671,6 @@ xfs_ail_delete( STATIC xfs_log_item_t * xfs_ail_min( struct xfs_ail *ailp) -/* ARGSUSED */ { if (list_empty(&ailp->xa_ail)) return NULL; @@ -699,7 +687,6 @@ STATIC xfs_log_item_t * xfs_ail_next( struct xfs_ail *ailp, xfs_log_item_t *lip) -/* ARGSUSED */ { if (lip->li_ail.next == &ailp->xa_ail) return NULL; -- cgit v1.2.2 From 0e57f6a36f9be03e5abb755f524ee91c4aebe854 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Mon, 20 Dec 2010 12:02:19 +1100 Subject: xfs: bulk AIL insertion during transaction commit When inserting items into the AIL from the transaction committed callbacks, we take the AIL lock for every single item that is to be inserted. For a CIL checkpoint commit, this can be tens of thousands of individual inserts, yet almost all of the items will be inserted at the same point in the AIL because they have the same index. To reduce the overhead and contention on the AIL lock for such operations, introduce a "bulk insert" operation which allows a list of log items with the same LSN to be inserted in a single operation via a list splice. To do this, we need to pre-sort the log items being committed into a temporary list for insertion. The complexity is that not every log item will end up with the same LSN, and not every item is actually inserted into the AIL. Items that don't match the commit LSN will be inserted and unpinned as per the current one-at-a-time method (relatively rare), while items that are not to be inserted will be unpinned and freed immediately. Items that are to be inserted at the given commit lsn are placed in a temporary array and inserted into the AIL in bulk each time the array fills up. As a result of this, we trade off AIL hold time for a significant reduction in traffic. lock_stat output shows that the worst case hold time is unchanged, but contention from AIL inserts drops by an order of magnitude and the number of lock traversal decreases significantly. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig --- fs/xfs/xfs_log_cil.c | 9 +--- fs/xfs/xfs_trans.c | 79 ++++++++++++++++++++++++++++++++++- fs/xfs/xfs_trans_ail.c | 109 +++++++++++++++++++++++++++++++++++++++++++++++- fs/xfs/xfs_trans_priv.h | 10 ++++- 4 files changed, 195 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c index 23d6ceb5e97b..f36f1a2f4dc1 100644 --- a/fs/xfs/xfs_log_cil.c +++ b/fs/xfs/xfs_log_cil.c @@ -361,15 +361,10 @@ xlog_cil_committed( int abort) { struct xfs_cil_ctx *ctx = args; - struct xfs_log_vec *lv; - int abortflag = abort ? XFS_LI_ABORTED : 0; struct xfs_busy_extent *busyp, *n; - /* unpin all the log items */ - for (lv = ctx->lv_chain; lv; lv = lv->lv_next ) { - xfs_trans_item_committed(lv->lv_item, ctx->start_lsn, - abortflag); - } + xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, ctx->lv_chain, + ctx->start_lsn, abort); list_for_each_entry_safe(busyp, n, &ctx->busy_extents, list) xfs_alloc_busy_clear(ctx->cil->xc_log->l_mp, busyp); diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index f6d956b7711e..f80a067a4658 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c @@ -1350,7 +1350,7 @@ xfs_trans_fill_vecs( * they could be immediately flushed and we'd have to race with the flusher * trying to pull the item from the AIL as we add it. */ -void +static void xfs_trans_item_committed( struct xfs_log_item *lip, xfs_lsn_t commit_lsn, @@ -1425,6 +1425,83 @@ xfs_trans_committed( xfs_trans_free(tp); } +static inline void +xfs_log_item_batch_insert( + struct xfs_ail *ailp, + struct xfs_log_item **log_items, + int nr_items, + xfs_lsn_t commit_lsn) +{ + int i; + + spin_lock(&ailp->xa_lock); + /* xfs_trans_ail_update_bulk drops ailp->xa_lock */ + xfs_trans_ail_update_bulk(ailp, log_items, nr_items, commit_lsn); + + for (i = 0; i < nr_items; i++) + IOP_UNPIN(log_items[i], 0); +} + +/* + * Bulk operation version of xfs_trans_committed that takes a log vector of + * items to insert into the AIL. This uses bulk AIL insertion techniques to + * minimise lock traffic. + */ +void +xfs_trans_committed_bulk( + struct xfs_ail *ailp, + struct xfs_log_vec *log_vector, + xfs_lsn_t commit_lsn, + int aborted) +{ +#define LOG_ITEM_BATCH_SIZE 32 + struct xfs_log_item *log_items[LOG_ITEM_BATCH_SIZE]; + struct xfs_log_vec *lv; + int i = 0; + + /* unpin all the log items */ + for (lv = log_vector; lv; lv = lv->lv_next ) { + struct xfs_log_item *lip = lv->lv_item; + xfs_lsn_t item_lsn; + + if (aborted) + lip->li_flags |= XFS_LI_ABORTED; + item_lsn = IOP_COMMITTED(lip, commit_lsn); + + /* item_lsn of -1 means the item was freed */ + if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0) + continue; + + if (item_lsn != commit_lsn) { + + /* + * Not a bulk update option due to unusual item_lsn. + * Push into AIL immediately, rechecking the lsn once + * we have the ail lock. Then unpin the item. + */ + spin_lock(&ailp->xa_lock); + if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0) + xfs_trans_ail_update(ailp, lip, item_lsn); + else + spin_unlock(&ailp->xa_lock); + IOP_UNPIN(lip, 0); + continue; + } + + /* Item is a candidate for bulk AIL insert. */ + log_items[i++] = lv->lv_item; + if (i >= LOG_ITEM_BATCH_SIZE) { + xfs_log_item_batch_insert(ailp, log_items, + LOG_ITEM_BATCH_SIZE, commit_lsn); + i = 0; + } + } + + /* make sure we insert the remainder! */ + if (i) + xfs_log_item_batch_insert(ailp, log_items, i, commit_lsn); +} + /* * Called from the trans_commit code when we notice that * the filesystem is in the middle of a forced shutdown. diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c index 645928cab42d..fe991a76bf14 100644 --- a/fs/xfs/xfs_trans_ail.c +++ b/fs/xfs/xfs_trans_ail.c @@ -29,6 +29,7 @@ #include "xfs_error.h" STATIC void xfs_ail_insert(struct xfs_ail *, xfs_log_item_t *); +STATIC void xfs_ail_splice(struct xfs_ail *, struct list_head *, xfs_lsn_t); STATIC void xfs_ail_delete(struct xfs_ail *, xfs_log_item_t *); STATIC xfs_log_item_t * xfs_ail_min(struct xfs_ail *); STATIC xfs_log_item_t * xfs_ail_next(struct xfs_ail *, xfs_log_item_t *); @@ -501,6 +502,79 @@ xfs_trans_ail_update( } /* xfs_trans_update_ail */ +/* + * xfs_trans_ail_update - bulk AIL insertion operation. + * + * @xfs_trans_ail_update takes an array of log items that all need to be + * positioned at the same LSN in the AIL. If an item is not in the AIL, it will + * be added. Otherwise, it will be repositioned by removing it and re-adding + * it to the AIL. If we move the first item in the AIL, update the log tail to + * match the new minimum LSN in the AIL. + * + * This function takes the AIL lock once to execute the update operations on + * all the items in the array, and as such should not be called with the AIL + * lock held. As a result, once we have the AIL lock, we need to check each log + * item LSN to confirm it needs to be moved forward in the AIL. + * + * To optimise the insert operation, we delete all the items from the AIL in + * the first pass, moving them into a temporary list, then splice the temporary + * list into the correct position in the AIL. This avoids needing to do an + * insert operation on every item. + * + * This function must be called with the AIL lock held. The lock is dropped + * before returning. + */ +void +xfs_trans_ail_update_bulk( + struct xfs_ail *ailp, + struct xfs_log_item **log_items, + int nr_items, + xfs_lsn_t lsn) __releases(ailp->xa_lock) +{ + xfs_log_item_t *mlip; + xfs_lsn_t tail_lsn; + int mlip_changed = 0; + int i; + LIST_HEAD(tmp); + + mlip = xfs_ail_min(ailp); + + for (i = 0; i < nr_items; i++) { + struct xfs_log_item *lip = log_items[i]; + if (lip->li_flags & XFS_LI_IN_AIL) { + /* check if we really need to move the item */ + if (XFS_LSN_CMP(lsn, lip->li_lsn) <= 0) + continue; + + xfs_ail_delete(ailp, lip); + if (mlip == lip) + mlip_changed = 1; + } else { + lip->li_flags |= XFS_LI_IN_AIL; + } + lip->li_lsn = lsn; + list_add(&lip->li_ail, &tmp); + } + + xfs_ail_splice(ailp, &tmp, lsn); + + if (!mlip_changed) { + spin_unlock(&ailp->xa_lock); + return; + } + + /* + * It is not safe to access mlip after the AIL lock is dropped, so we + * must get a copy of li_lsn before we do so. This is especially + * important on 32-bit platforms where accessing and updating 64-bit + * values like li_lsn is not atomic. + */ + mlip = xfs_ail_min(ailp); + tail_lsn = mlip->li_lsn; + spin_unlock(&ailp->xa_lock); + xfs_log_move_tail(ailp->xa_mount, tail_lsn); +} + /* * Delete the given item from the AIL. It must already be in * the AIL. @@ -642,8 +716,8 @@ xfs_ail_insert( break; } - ASSERT((&next_lip->li_ail == &ailp->xa_ail) || - (XFS_LSN_CMP(next_lip->li_lsn, lip->li_lsn) <= 0)); + ASSERT(&next_lip->li_ail == &ailp->xa_ail || + XFS_LSN_CMP(next_lip->li_lsn, lip->li_lsn) <= 0); list_add(&lip->li_ail, &next_lip->li_ail); @@ -651,6 +725,37 @@ xfs_ail_insert( return; } +/* + * splice the log item list into the AIL at the given LSN. + */ +STATIC void +xfs_ail_splice( + struct xfs_ail *ailp, + struct list_head *list, + xfs_lsn_t lsn) +{ + xfs_log_item_t *next_lip; + + /* + * If the list is empty, just insert the item. + */ + if (list_empty(&ailp->xa_ail)) { + list_splice(list, &ailp->xa_ail); + return; + } + + list_for_each_entry_reverse(next_lip, &ailp->xa_ail, li_ail) { + if (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0) + break; + } + + ASSERT((&next_lip->li_ail == &ailp->xa_ail) || + (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0)); + + list_splice_init(list, &next_lip->li_ail); + return; +} + /* * Delete the given item from the AIL. Return a pointer to the item. */ diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h index 62da86c90de5..e039729186e9 100644 --- a/fs/xfs/xfs_trans_priv.h +++ b/fs/xfs/xfs_trans_priv.h @@ -22,15 +22,17 @@ struct xfs_log_item; struct xfs_log_item_desc; struct xfs_mount; struct xfs_trans; +struct xfs_ail; +struct xfs_log_vec; void xfs_trans_add_item(struct xfs_trans *, struct xfs_log_item *); void xfs_trans_del_item(struct xfs_log_item *); void xfs_trans_free_items(struct xfs_trans *tp, xfs_lsn_t commit_lsn, int flags); -void xfs_trans_item_committed(struct xfs_log_item *lip, - xfs_lsn_t commit_lsn, int aborted); void xfs_trans_unreserve_and_mod_sb(struct xfs_trans *tp); +void xfs_trans_committed_bulk(struct xfs_ail *ailp, struct xfs_log_vec *lv, + xfs_lsn_t commit_lsn, int aborted); /* * AIL traversal cursor. * @@ -76,6 +78,10 @@ struct xfs_ail { void xfs_trans_ail_update(struct xfs_ail *ailp, struct xfs_log_item *lip, xfs_lsn_t lsn) __releases(ailp->xa_lock); +void xfs_trans_ail_update_bulk(struct xfs_ail *ailp, + struct xfs_log_item **log_items, + int nr_items, xfs_lsn_t lsn) + __releases(ailp->xa_lock); void xfs_trans_ail_delete(struct xfs_ail *ailp, struct xfs_log_item *lip) __releases(ailp->xa_lock); -- cgit v1.2.2 From e677d0f9548e2245ee3c2977661ca8ca165af188 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Fri, 17 Dec 2010 20:08:04 +1100 Subject: xfs: reduce the number of AIL push wakeups The xfaild often tries to rest to wait for congestion to pass of for IO to complete, but is regularly woken in tail-pushing situations. In severe cases, the xfsaild is getting woken tens of thousands of times a second. Reduce the number needless wakeups by only waking the xfsaild if the new target is larger than the old one. Further make short sleeps uninterruptible as they occur when the xfsaild has decided it needs to back off to allow some IO to complete and being woken early is counter-productive. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig --- fs/xfs/linux-2.6/xfs_super.c | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index c45b3233d486..c51faaa5e291 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -834,8 +834,11 @@ xfsaild_wakeup( struct xfs_ail *ailp, xfs_lsn_t threshold_lsn) { - ailp->xa_target = threshold_lsn; - wake_up_process(ailp->xa_task); + /* only ever move the target forwards */ + if (XFS_LSN_CMP(threshold_lsn, ailp->xa_target) > 0) { + ailp->xa_target = threshold_lsn; + wake_up_process(ailp->xa_task); + } } STATIC int @@ -847,8 +850,17 @@ xfsaild( long tout = 0; /* milliseconds */ while (!kthread_should_stop()) { - schedule_timeout_interruptible(tout ? - msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT); + /* + * for short sleeps indicating congestion, don't allow us to + * get woken early. Otherwise all we do is bang on the AIL lock + * without making progress. + */ + if (tout && tout <= 20) + __set_current_state(TASK_KILLABLE); + else + __set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(tout ? + msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT); /* swsusp */ try_to_freeze(); -- cgit v1.2.2 From c90821a26a8c90ad1e3116393b8a8260ab46bffb Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Fri, 3 Dec 2010 17:00:52 +1100 Subject: xfs: consume iodone callback items on buffers as they are processed To allow buffer iodone callbacks to consume multiple items off the callback list, first we need to convert the xfs_buf_do_callbacks() to consume items and always pull the next item from the head of the list. The means the item list walk is never dependent on knowing the next item on the list and hence allows callbacks to remove items from the list as well. This allows callbacks to do bulk operations by scanning the list for identical callbacks, consuming them all and then processing them in bulk, negating the need for multiple callbacks of that type. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig --- fs/xfs/xfs_buf_item.c | 32 +++++++++++++++++++++----------- 1 file changed, 21 insertions(+), 11 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index 2686d0d54c5b..ed2b65f3f8b9 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c @@ -142,7 +142,7 @@ xfs_buf_item_log_check( #endif STATIC void xfs_buf_error_relse(xfs_buf_t *bp); -STATIC void xfs_buf_do_callbacks(xfs_buf_t *bp, xfs_log_item_t *lip); +STATIC void xfs_buf_do_callbacks(struct xfs_buf *bp); /* * This returns the number of log iovecs needed to log the @@ -450,7 +450,7 @@ xfs_buf_item_unpin( * xfs_trans_ail_delete() drops the AIL lock. */ if (bip->bli_flags & XFS_BLI_STALE_INODE) { - xfs_buf_do_callbacks(bp, (xfs_log_item_t *)bip); + xfs_buf_do_callbacks(bp); XFS_BUF_SET_FSPRIVATE(bp, NULL); XFS_BUF_CLR_IODONE_FUNC(bp); } else { @@ -918,15 +918,26 @@ xfs_buf_attach_iodone( XFS_BUF_SET_IODONE_FUNC(bp, xfs_buf_iodone_callbacks); } +/* + * We can have many callbacks on a buffer. Running the callbacks individually + * can cause a lot of contention on the AIL lock, so we allow for a single + * callback to be able to scan the remaining lip->li_bio_list for other items + * of the same type and callback to be processed in the first call. + * + * As a result, the loop walking the callback list below will also modify the + * list. it removes the first item from the list and then runs the callback. + * The loop then restarts from the new head of the list. This allows the + * callback to scan and modify the list attached to the buffer and we don't + * have to care about maintaining a next item pointer. + */ STATIC void xfs_buf_do_callbacks( - xfs_buf_t *bp, - xfs_log_item_t *lip) + struct xfs_buf *bp) { - xfs_log_item_t *nlip; + struct xfs_log_item *lip; - while (lip != NULL) { - nlip = lip->li_bio_list; + while ((lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *)) != NULL) { + XFS_BUF_SET_FSPRIVATE(bp, lip->li_bio_list); ASSERT(lip->li_cb != NULL); /* * Clear the next pointer so we don't have any @@ -936,7 +947,6 @@ xfs_buf_do_callbacks( */ lip->li_bio_list = NULL; lip->li_cb(bp, lip); - lip = nlip; } } @@ -970,7 +980,7 @@ xfs_buf_iodone_callbacks( ASSERT(XFS_BUF_TARGET(bp) == mp->m_ddev_targp); XFS_BUF_SUPER_STALE(bp); trace_xfs_buf_item_iodone(bp, _RET_IP_); - xfs_buf_do_callbacks(bp, lip); + xfs_buf_do_callbacks(bp); XFS_BUF_SET_FSPRIVATE(bp, NULL); XFS_BUF_CLR_IODONE_FUNC(bp); xfs_buf_ioend(bp, 0); @@ -1029,7 +1039,7 @@ xfs_buf_iodone_callbacks( return; } - xfs_buf_do_callbacks(bp, lip); + xfs_buf_do_callbacks(bp); XFS_BUF_SET_FSPRIVATE(bp, NULL); XFS_BUF_CLR_IODONE_FUNC(bp); xfs_buf_ioend(bp, 0); @@ -1063,7 +1073,7 @@ xfs_buf_error_relse( * We have to unpin the pinned buffers so do the * callbacks. */ - xfs_buf_do_callbacks(bp, lip); + xfs_buf_do_callbacks(bp); XFS_BUF_SET_FSPRIVATE(bp, NULL); XFS_BUF_CLR_IODONE_FUNC(bp); XFS_BUF_SET_BRELSE_FUNC(bp,NULL); -- cgit v1.2.2 From 3013683253ad04f67d8cfaa25be708353686b90a Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Mon, 20 Dec 2010 12:03:17 +1100 Subject: xfs: remove all the inodes on a buffer from the AIL in bulk When inode buffer IO completes, usually all of the inodes are removed from the AIL. This involves processing them one at a time and taking the AIL lock once for every inode. When all CPUs are processing inode IO completions, this causes excessive amount sof contention on the AIL lock. Instead, change the way we process inode IO completion in the buffer IO done callback. Allow the inode IO done callback to walk the list of IO done callbacks and pull all the inodes off the buffer in one go and then process them as a batch. Once all the inodes for removal are collected, take the AIL lock once and do a bulk removal operation to minimise traffic on the AIL lock. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig --- fs/xfs/xfs_inode_item.c | 90 ++++++++++++++++++++++++++++++++++++++++--------- fs/xfs/xfs_trans_ail.c | 73 +++++++++++++++++++++++++++++++++++++++ fs/xfs/xfs_trans_priv.h | 4 +++ 3 files changed, 151 insertions(+), 16 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c index 7c8d30c453c3..fd4f398bd6f1 100644 --- a/fs/xfs/xfs_inode_item.c +++ b/fs/xfs/xfs_inode_item.c @@ -842,15 +842,64 @@ xfs_inode_item_destroy( * flushed to disk. It is responsible for removing the inode item * from the AIL if it has not been re-logged, and unlocking the inode's * flush lock. + * + * To reduce AIL lock traffic as much as possible, we scan the buffer log item + * list for other inodes that will run this function. We remove them from the + * buffer list so we can process all the inode IO completions in one AIL lock + * traversal. */ void xfs_iflush_done( struct xfs_buf *bp, struct xfs_log_item *lip) { - struct xfs_inode_log_item *iip = INODE_ITEM(lip); - xfs_inode_t *ip = iip->ili_inode; + struct xfs_inode_log_item *iip; + struct xfs_log_item *blip; + struct xfs_log_item *next; + struct xfs_log_item *prev; struct xfs_ail *ailp = lip->li_ailp; + int need_ail = 0; + + /* + * Scan the buffer IO completions for other inodes being completed and + * attach them to the current inode log item. + */ + blip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); + prev = NULL; + while (blip != NULL) { + if (lip->li_cb != xfs_iflush_done) { + prev = blip; + blip = blip->li_bio_list; + continue; + } + + /* remove from list */ + next = blip->li_bio_list; + if (!prev) { + XFS_BUF_SET_FSPRIVATE(bp, next); + } else { + prev->li_bio_list = next; + } + + /* add to current list */ + blip->li_bio_list = lip->li_bio_list; + lip->li_bio_list = blip; + + /* + * while we have the item, do the unlocked check for needing + * the AIL lock. + */ + iip = INODE_ITEM(blip); + if (iip->ili_logged && blip->li_lsn == iip->ili_flush_lsn) + need_ail++; + + blip = next; + } + + /* make sure we capture the state of the initial inode. */ + iip = INODE_ITEM(lip); + if (iip->ili_logged && lip->li_lsn == iip->ili_flush_lsn) + need_ail++; /* * We only want to pull the item from the AIL if it is @@ -861,28 +910,37 @@ xfs_iflush_done( * the lock since it's cheaper, and then we recheck while * holding the lock before removing the inode from the AIL. */ - if (iip->ili_logged && lip->li_lsn == iip->ili_flush_lsn) { + if (need_ail) { + struct xfs_log_item *log_items[need_ail]; + int i = 0; spin_lock(&ailp->xa_lock); - if (lip->li_lsn == iip->ili_flush_lsn) { - /* xfs_trans_ail_delete() drops the AIL lock. */ - xfs_trans_ail_delete(ailp, lip); - } else { - spin_unlock(&ailp->xa_lock); + for (blip = lip; blip; blip = blip->li_bio_list) { + iip = INODE_ITEM(blip); + if (iip->ili_logged && + blip->li_lsn == iip->ili_flush_lsn) { + log_items[i++] = blip; + } + ASSERT(i <= need_ail); } + /* xfs_trans_ail_delete_bulk() drops the AIL lock. */ + xfs_trans_ail_delete_bulk(ailp, log_items, i); } - iip->ili_logged = 0; /* - * Clear the ili_last_fields bits now that we know that the - * data corresponding to them is safely on disk. + * clean up and unlock the flush lock now we are done. We can clear the + * ili_last_fields bits now that we know that the data corresponding to + * them is safely on disk. */ - iip->ili_last_fields = 0; + for (blip = lip; blip; blip = next) { + next = blip->li_bio_list; + blip->li_bio_list = NULL; - /* - * Release the inode's flush lock since we're done with it. - */ - xfs_ifunlock(ip); + iip = INODE_ITEM(blip); + iip->ili_logged = 0; + iip->ili_last_fields = 0; + xfs_ifunlock(iip->ili_inode); + } } /* diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c index fe991a76bf14..218f96861c80 100644 --- a/fs/xfs/xfs_trans_ail.c +++ b/fs/xfs/xfs_trans_ail.c @@ -639,6 +639,79 @@ xfs_trans_ail_delete( } } +/* + * xfs_trans_ail_delete_bulk - remove multiple log items from the AIL + * + * @xfs_trans_ail_delete_bulk takes an array of log items that all need to + * removed from the AIL. The caller is already holding the AIL lock, and done + * all the checks necessary to ensure the items passed in via @log_items are + * ready for deletion. This includes checking that the items are in the AIL. + * + * For each log item to be removed, unlink it from the AIL, clear the IN_AIL + * flag from the item and reset the item's lsn to 0. If we remove the first + * item in the AIL, update the log tail to match the new minimum LSN in the + * AIL. + * + * This function will not drop the AIL lock until all items are removed from + * the AIL to minimise the amount of lock traffic on the AIL. This does not + * greatly increase the AIL hold time, but does significantly reduce the amount + * of traffic on the lock, especially during IO completion. + * + * This function must be called with the AIL lock held. The lock is dropped + * before returning. + */ +void +xfs_trans_ail_delete_bulk( + struct xfs_ail *ailp, + struct xfs_log_item **log_items, + int nr_items) __releases(ailp->xa_lock) +{ + xfs_log_item_t *mlip; + xfs_lsn_t tail_lsn; + int mlip_changed = 0; + int i; + + mlip = xfs_ail_min(ailp); + + for (i = 0; i < nr_items; i++) { + struct xfs_log_item *lip = log_items[i]; + if (!(lip->li_flags & XFS_LI_IN_AIL)) { + struct xfs_mount *mp = ailp->xa_mount; + + spin_unlock(&ailp->xa_lock); + if (!XFS_FORCED_SHUTDOWN(mp)) { + xfs_cmn_err(XFS_PTAG_AILDELETE, CE_ALERT, mp, + "%s: attempting to delete a log item that is not in the AIL", + __func__); + xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); + } + return; + } + + xfs_ail_delete(ailp, lip); + lip->li_flags &= ~XFS_LI_IN_AIL; + lip->li_lsn = 0; + if (mlip == lip) + mlip_changed = 1; + } + + if (!mlip_changed) { + spin_unlock(&ailp->xa_lock); + return; + } + + /* + * It is not safe to access mlip after the AIL lock is dropped, so we + * must get a copy of li_lsn before we do so. This is especially + * important on 32-bit platforms where accessing and updating 64-bit + * values like li_lsn is not atomic. It is possible we've emptied the + * AIL here, so if that is the case, pass an LSN of 0 to the tail move. + */ + mlip = xfs_ail_min(ailp); + tail_lsn = mlip ? mlip->li_lsn : 0; + spin_unlock(&ailp->xa_lock); + xfs_log_move_tail(ailp->xa_mount, tail_lsn); +} /* diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h index e039729186e9..246ca4dcb5c4 100644 --- a/fs/xfs/xfs_trans_priv.h +++ b/fs/xfs/xfs_trans_priv.h @@ -85,6 +85,10 @@ void xfs_trans_ail_update_bulk(struct xfs_ail *ailp, void xfs_trans_ail_delete(struct xfs_ail *ailp, struct xfs_log_item *lip) __releases(ailp->xa_lock); +void xfs_trans_ail_delete_bulk(struct xfs_ail *ailp, + struct xfs_log_item **log_items, + int nr_items) + __releases(ailp->xa_lock); void xfs_trans_ail_push(struct xfs_ail *, xfs_lsn_t); void xfs_trans_unlocked_item(struct xfs_ail *, xfs_log_item_t *); -- cgit v1.2.2 From e60599492990d1b52c70e9ed2f8e062fe11ca937 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Mon, 20 Dec 2010 12:34:26 +1100 Subject: xfs: use AIL bulk update function to implement single updates We now have two copies of AIL insert operations that are mostly duplicate functionality. The single log item updates can be implemented via the bulk updates by turning xfs_trans_ail_update() into a simple wrapper. This removes all the duplicate insert functionality and associated helpers. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig --- fs/xfs/xfs_log_recover.c | 2 +- fs/xfs/xfs_trans_ail.c | 88 ------------------------------------------------ fs/xfs/xfs_trans_priv.h | 19 +++++++---- 3 files changed, 13 insertions(+), 96 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index d7219e29d9ab..4abe7a9b380e 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -2573,7 +2573,7 @@ xlog_recover_efi_pass2( /* * xfs_trans_ail_update() drops the AIL lock. */ - xfs_trans_ail_update(log->l_ailp, (xfs_log_item_t *)efip, lsn); + xfs_trans_ail_update(log->l_ailp, &efip->efi_item, lsn); return 0; } diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c index 218f96861c80..8481a5a6d6c2 100644 --- a/fs/xfs/xfs_trans_ail.c +++ b/fs/xfs/xfs_trans_ail.c @@ -28,7 +28,6 @@ #include "xfs_trans_priv.h" #include "xfs_error.h" -STATIC void xfs_ail_insert(struct xfs_ail *, xfs_log_item_t *); STATIC void xfs_ail_splice(struct xfs_ail *, struct list_head *, xfs_lsn_t); STATIC void xfs_ail_delete(struct xfs_ail *, xfs_log_item_t *); STATIC xfs_log_item_t * xfs_ail_min(struct xfs_ail *); @@ -450,58 +449,6 @@ xfs_trans_unlocked_item( xfs_log_move_tail(ailp->xa_mount, 1); } /* xfs_trans_unlocked_item */ - -/* - * Update the position of the item in the AIL with the new - * lsn. If it is not yet in the AIL, add it. Otherwise, move - * it to its new position by removing it and re-adding it. - * - * Wakeup anyone with an lsn less than the item's lsn. If the item - * we move in the AIL is the minimum one, update the tail lsn in the - * log manager. - * - * This function must be called with the AIL lock held. The lock - * is dropped before returning. - */ -void -xfs_trans_ail_update( - struct xfs_ail *ailp, - xfs_log_item_t *lip, - xfs_lsn_t lsn) __releases(ailp->xa_lock) -{ - xfs_log_item_t *mlip; /* ptr to minimum lip */ - xfs_lsn_t tail_lsn; - - mlip = xfs_ail_min(ailp); - - if (lip->li_flags & XFS_LI_IN_AIL) { - xfs_ail_delete(ailp, lip); - } else { - lip->li_flags |= XFS_LI_IN_AIL; - } - - lip->li_lsn = lsn; - xfs_ail_insert(ailp, lip); - - if (mlip == lip) { - mlip = xfs_ail_min(ailp); - /* - * It is not safe to access mlip after the AIL lock is - * dropped, so we must get a copy of li_lsn before we do - * so. This is especially important on 32-bit platforms - * where accessing and updating 64-bit values like li_lsn - * is not atomic. - */ - tail_lsn = mlip->li_lsn; - spin_unlock(&ailp->xa_lock); - xfs_log_move_tail(ailp->xa_mount, tail_lsn); - } else { - spin_unlock(&ailp->xa_lock); - } - - -} /* xfs_trans_update_ail */ - /* * xfs_trans_ail_update - bulk AIL insertion operation. * @@ -763,41 +710,6 @@ xfs_trans_ail_destroy( kmem_free(ailp); } -/* - * Insert the given log item into the AIL. - * We almost always insert at the end of the list, so on inserts - * we search from the end of the list to find where the - * new item belongs. - */ -STATIC void -xfs_ail_insert( - struct xfs_ail *ailp, - xfs_log_item_t *lip) -{ - xfs_log_item_t *next_lip; - - /* - * If the list is empty, just insert the item. - */ - if (list_empty(&ailp->xa_ail)) { - list_add(&lip->li_ail, &ailp->xa_ail); - return; - } - - list_for_each_entry_reverse(next_lip, &ailp->xa_ail, li_ail) { - if (XFS_LSN_CMP(next_lip->li_lsn, lip->li_lsn) <= 0) - break; - } - - ASSERT(&next_lip->li_ail == &ailp->xa_ail || - XFS_LSN_CMP(next_lip->li_lsn, lip->li_lsn) <= 0); - - list_add(&lip->li_ail, &next_lip->li_ail); - - xfs_ail_check(ailp, lip); - return; -} - /* * splice the log item list into the AIL at the given LSN. */ diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h index 246ca4dcb5c4..f46920589ca5 100644 --- a/fs/xfs/xfs_trans_priv.h +++ b/fs/xfs/xfs_trans_priv.h @@ -75,13 +75,18 @@ struct xfs_ail { /* * From xfs_trans_ail.c */ -void xfs_trans_ail_update(struct xfs_ail *ailp, - struct xfs_log_item *lip, xfs_lsn_t lsn) - __releases(ailp->xa_lock); -void xfs_trans_ail_update_bulk(struct xfs_ail *ailp, - struct xfs_log_item **log_items, - int nr_items, xfs_lsn_t lsn) - __releases(ailp->xa_lock); +void xfs_trans_ail_update_bulk(struct xfs_ail *ailp, + struct xfs_log_item **log_items, int nr_items, + xfs_lsn_t lsn) __releases(ailp->xa_lock); +static inline void +xfs_trans_ail_update( + struct xfs_ail *ailp, + struct xfs_log_item *lip, + xfs_lsn_t lsn) __releases(ailp->xa_lock) +{ + xfs_trans_ail_update_bulk(ailp, &lip, 1, lsn); +} + void xfs_trans_ail_delete(struct xfs_ail *ailp, struct xfs_log_item *lip) __releases(ailp->xa_lock); -- cgit v1.2.2 From 9552e7f2f3dd13a7580e488a7a3582332daad4f5 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Mon, 20 Dec 2010 12:36:15 +1100 Subject: xfs: use AIL bulk delete function to implement single delete We now have two copies of AIL delete operations that are mostly duplicate functionality. The single log item deletes can be implemented via the bulk updates by turning xfs_trans_ail_delete() into a simple wrapper. This removes all the duplicate delete functionality and associated helpers. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig --- fs/xfs/xfs_trans_ail.c | 65 ------------------------------------------------- fs/xfs/xfs_trans_priv.h | 18 ++++++++------ 2 files changed, 11 insertions(+), 72 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c index 8481a5a6d6c2..c5bbbc45db91 100644 --- a/fs/xfs/xfs_trans_ail.c +++ b/fs/xfs/xfs_trans_ail.c @@ -522,70 +522,6 @@ xfs_trans_ail_update_bulk( xfs_log_move_tail(ailp->xa_mount, tail_lsn); } -/* - * Delete the given item from the AIL. It must already be in - * the AIL. - * - * Wakeup anyone with an lsn less than item's lsn. If the item - * we delete in the AIL is the minimum one, update the tail lsn in the - * log manager. - * - * Clear the IN_AIL flag from the item, reset its lsn to 0, and - * bump the AIL's generation count to indicate that the tree - * has changed. - * - * This function must be called with the AIL lock held. The lock - * is dropped before returning. - */ -void -xfs_trans_ail_delete( - struct xfs_ail *ailp, - xfs_log_item_t *lip) __releases(ailp->xa_lock) -{ - xfs_log_item_t *mlip; - xfs_lsn_t tail_lsn; - - if (lip->li_flags & XFS_LI_IN_AIL) { - mlip = xfs_ail_min(ailp); - xfs_ail_delete(ailp, lip); - - - lip->li_flags &= ~XFS_LI_IN_AIL; - lip->li_lsn = 0; - - if (mlip == lip) { - mlip = xfs_ail_min(ailp); - /* - * It is not safe to access mlip after the AIL lock - * is dropped, so we must get a copy of li_lsn - * before we do so. This is especially important - * on 32-bit platforms where accessing and updating - * 64-bit values like li_lsn is not atomic. - */ - tail_lsn = mlip ? mlip->li_lsn : 0; - spin_unlock(&ailp->xa_lock); - xfs_log_move_tail(ailp->xa_mount, tail_lsn); - } else { - spin_unlock(&ailp->xa_lock); - } - } - else { - /* - * If the file system is not being shutdown, we are in - * serious trouble if we get to this stage. - */ - struct xfs_mount *mp = ailp->xa_mount; - - spin_unlock(&ailp->xa_lock); - if (!XFS_FORCED_SHUTDOWN(mp)) { - xfs_cmn_err(XFS_PTAG_AILDELETE, CE_ALERT, mp, - "%s: attempting to delete a log item that is not in the AIL", - __func__); - xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); - } - } -} - /* * xfs_trans_ail_delete_bulk - remove multiple log items from the AIL * @@ -660,7 +596,6 @@ xfs_trans_ail_delete_bulk( xfs_log_move_tail(ailp->xa_mount, tail_lsn); } - /* * The active item list (AIL) is a doubly linked list of log * items sorted by ascending lsn. The base of the list is diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h index f46920589ca5..35162c238fa3 100644 --- a/fs/xfs/xfs_trans_priv.h +++ b/fs/xfs/xfs_trans_priv.h @@ -87,13 +87,17 @@ xfs_trans_ail_update( xfs_trans_ail_update_bulk(ailp, &lip, 1, lsn); } -void xfs_trans_ail_delete(struct xfs_ail *ailp, - struct xfs_log_item *lip) - __releases(ailp->xa_lock); -void xfs_trans_ail_delete_bulk(struct xfs_ail *ailp, - struct xfs_log_item **log_items, - int nr_items) - __releases(ailp->xa_lock); +void xfs_trans_ail_delete_bulk(struct xfs_ail *ailp, + struct xfs_log_item **log_items, int nr_items) + __releases(ailp->xa_lock); +static inline void +xfs_trans_ail_delete( + struct xfs_ail *ailp, + xfs_log_item_t *lip) __releases(ailp->xa_lock) +{ + xfs_trans_ail_delete_bulk(ailp, &lip, 1); +} + void xfs_trans_ail_push(struct xfs_ail *, xfs_lsn_t); void xfs_trans_unlocked_item(struct xfs_ail *, xfs_log_item_t *); -- cgit v1.2.2 From 1054794198e39103cb986618c4c10ec2252b7089 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Tue, 21 Dec 2010 12:02:25 +1100 Subject: xfs: convert log grant ticket queues to list heads The grant write and reserve queues use a roll-your-own double linked list, so convert it to a standard list_head structure and convert all the list traversals to use list_for_each_entry(). We can also get rid of the XLOG_TIC_IN_Q flag as we can use the list_empty() check to tell if the ticket is in a list or not. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig --- fs/xfs/linux-2.6/xfs_trace.h | 16 +++--- fs/xfs/xfs_log.c | 123 +++++++++++++++---------------------------- fs/xfs/xfs_log_priv.h | 11 ++-- 3 files changed, 53 insertions(+), 97 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_trace.h b/fs/xfs/linux-2.6/xfs_trace.h index 83e8760159ef..69b9e1f1baaf 100644 --- a/fs/xfs/linux-2.6/xfs_trace.h +++ b/fs/xfs/linux-2.6/xfs_trace.h @@ -766,8 +766,8 @@ DECLARE_EVENT_CLASS(xfs_loggrant_class, __field(int, curr_res) __field(int, unit_res) __field(unsigned int, flags) - __field(void *, reserve_headq) - __field(void *, write_headq) + __field(int, reserveq) + __field(int, writeq) __field(int, grant_reserve_cycle) __field(int, grant_reserve_bytes) __field(int, grant_write_cycle) @@ -784,8 +784,8 @@ DECLARE_EVENT_CLASS(xfs_loggrant_class, __entry->curr_res = tic->t_curr_res; __entry->unit_res = tic->t_unit_res; __entry->flags = tic->t_flags; - __entry->reserve_headq = log->l_reserve_headq; - __entry->write_headq = log->l_write_headq; + __entry->reserveq = list_empty(&log->l_reserveq); + __entry->writeq = list_empty(&log->l_writeq); __entry->grant_reserve_cycle = log->l_grant_reserve_cycle; __entry->grant_reserve_bytes = log->l_grant_reserve_bytes; __entry->grant_write_cycle = log->l_grant_write_cycle; @@ -795,8 +795,8 @@ DECLARE_EVENT_CLASS(xfs_loggrant_class, __entry->tail_lsn = log->l_tail_lsn; ), TP_printk("dev %d:%d type %s t_ocnt %u t_cnt %u t_curr_res %u " - "t_unit_res %u t_flags %s reserve_headq 0x%p " - "write_headq 0x%p grant_reserve_cycle %d " + "t_unit_res %u t_flags %s reserveq %s " + "writeq %s grant_reserve_cycle %d " "grant_reserve_bytes %d grant_write_cycle %d " "grant_write_bytes %d curr_cycle %d curr_block %d " "tail_cycle %d tail_block %d", @@ -807,8 +807,8 @@ DECLARE_EVENT_CLASS(xfs_loggrant_class, __entry->curr_res, __entry->unit_res, __print_flags(__entry->flags, "|", XLOG_TIC_FLAGS), - __entry->reserve_headq, - __entry->write_headq, + __entry->reserveq ? "empty" : "active", + __entry->writeq ? "empty" : "active", __entry->grant_reserve_cycle, __entry->grant_reserve_bytes, __entry->grant_write_cycle, diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index cee4ab9f8a9e..1b82735471ab 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -95,38 +95,6 @@ STATIC void xlog_verify_tail_lsn(xlog_t *log, xlog_in_core_t *iclog, STATIC int xlog_iclogs_empty(xlog_t *log); - -static void -xlog_ins_ticketq(struct xlog_ticket **qp, struct xlog_ticket *tic) -{ - if (*qp) { - tic->t_next = (*qp); - tic->t_prev = (*qp)->t_prev; - (*qp)->t_prev->t_next = tic; - (*qp)->t_prev = tic; - } else { - tic->t_prev = tic->t_next = tic; - *qp = tic; - } - - tic->t_flags |= XLOG_TIC_IN_Q; -} - -static void -xlog_del_ticketq(struct xlog_ticket **qp, struct xlog_ticket *tic) -{ - if (tic == tic->t_next) { - *qp = NULL; - } else { - *qp = tic->t_next; - tic->t_next->t_prev = tic->t_prev; - tic->t_prev->t_next = tic->t_next; - } - - tic->t_next = tic->t_prev = NULL; - tic->t_flags &= ~XLOG_TIC_IN_Q; -} - static void xlog_grant_sub_space(struct log *log, int bytes) { @@ -724,7 +692,7 @@ xfs_log_move_tail(xfs_mount_t *mp, log->l_tail_lsn = tail_lsn; } - if ((tic = log->l_write_headq)) { + if (!list_empty(&log->l_writeq)) { #ifdef DEBUG if (log->l_flags & XLOG_ACTIVE_RECOVERY) panic("Recovery problem"); @@ -732,7 +700,7 @@ xfs_log_move_tail(xfs_mount_t *mp, cycle = log->l_grant_write_cycle; bytes = log->l_grant_write_bytes; free_bytes = xlog_space_left(log, cycle, bytes); - do { + list_for_each_entry(tic, &log->l_writeq, t_queue) { ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV); if (free_bytes < tic->t_unit_res && tail_lsn != 1) @@ -740,10 +708,10 @@ xfs_log_move_tail(xfs_mount_t *mp, tail_lsn = 0; free_bytes -= tic->t_unit_res; sv_signal(&tic->t_wait); - tic = tic->t_next; - } while (tic != log->l_write_headq); + } } - if ((tic = log->l_reserve_headq)) { + + if (!list_empty(&log->l_reserveq)) { #ifdef DEBUG if (log->l_flags & XLOG_ACTIVE_RECOVERY) panic("Recovery problem"); @@ -751,7 +719,7 @@ xfs_log_move_tail(xfs_mount_t *mp, cycle = log->l_grant_reserve_cycle; bytes = log->l_grant_reserve_bytes; free_bytes = xlog_space_left(log, cycle, bytes); - do { + list_for_each_entry(tic, &log->l_reserveq, t_queue) { if (tic->t_flags & XLOG_TIC_PERM_RESERV) need_bytes = tic->t_unit_res*tic->t_cnt; else @@ -761,8 +729,7 @@ xfs_log_move_tail(xfs_mount_t *mp, tail_lsn = 0; free_bytes -= need_bytes; sv_signal(&tic->t_wait); - tic = tic->t_next; - } while (tic != log->l_reserve_headq); + } } spin_unlock(&log->l_grant_lock); } /* xfs_log_move_tail */ @@ -1053,6 +1020,8 @@ xlog_alloc_log(xfs_mount_t *mp, log->l_curr_cycle = 1; /* 0 is bad since this is initial value */ log->l_grant_reserve_cycle = 1; log->l_grant_write_cycle = 1; + INIT_LIST_HEAD(&log->l_reserveq); + INIT_LIST_HEAD(&log->l_writeq); error = EFSCORRUPTED; if (xfs_sb_version_hassector(&mp->m_sb)) { @@ -2550,8 +2519,8 @@ xlog_grant_log_space(xlog_t *log, trace_xfs_log_grant_enter(log, tic); /* something is already sleeping; insert new transaction at end */ - if (log->l_reserve_headq) { - xlog_ins_ticketq(&log->l_reserve_headq, tic); + if (!list_empty(&log->l_reserveq)) { + list_add_tail(&tic->t_queue, &log->l_reserveq); trace_xfs_log_grant_sleep1(log, tic); @@ -2583,8 +2552,8 @@ redo: free_bytes = xlog_space_left(log, log->l_grant_reserve_cycle, log->l_grant_reserve_bytes); if (free_bytes < need_bytes) { - if ((tic->t_flags & XLOG_TIC_IN_Q) == 0) - xlog_ins_ticketq(&log->l_reserve_headq, tic); + if (list_empty(&tic->t_queue)) + list_add_tail(&tic->t_queue, &log->l_reserveq); trace_xfs_log_grant_sleep2(log, tic); @@ -2602,8 +2571,9 @@ redo: trace_xfs_log_grant_wake2(log, tic); goto redo; - } else if (tic->t_flags & XLOG_TIC_IN_Q) - xlog_del_ticketq(&log->l_reserve_headq, tic); + } + + list_del_init(&tic->t_queue); /* we've got enough space */ xlog_grant_add_space(log, need_bytes); @@ -2626,9 +2596,7 @@ redo: return 0; error_return: - if (tic->t_flags & XLOG_TIC_IN_Q) - xlog_del_ticketq(&log->l_reserve_headq, tic); - + list_del_init(&tic->t_queue); trace_xfs_log_grant_error(log, tic); /* @@ -2653,7 +2621,6 @@ xlog_regrant_write_log_space(xlog_t *log, xlog_ticket_t *tic) { int free_bytes, need_bytes; - xlog_ticket_t *ntic; #ifdef DEBUG xfs_lsn_t tail_lsn; #endif @@ -2683,22 +2650,23 @@ xlog_regrant_write_log_space(xlog_t *log, * this transaction. */ need_bytes = tic->t_unit_res; - if ((ntic = log->l_write_headq)) { + if (!list_empty(&log->l_writeq)) { + struct xlog_ticket *ntic; free_bytes = xlog_space_left(log, log->l_grant_write_cycle, log->l_grant_write_bytes); - do { + list_for_each_entry(ntic, &log->l_writeq, t_queue) { ASSERT(ntic->t_flags & XLOG_TIC_PERM_RESERV); if (free_bytes < ntic->t_unit_res) break; free_bytes -= ntic->t_unit_res; sv_signal(&ntic->t_wait); - ntic = ntic->t_next; - } while (ntic != log->l_write_headq); + } - if (ntic != log->l_write_headq) { - if ((tic->t_flags & XLOG_TIC_IN_Q) == 0) - xlog_ins_ticketq(&log->l_write_headq, tic); + if (ntic != list_first_entry(&log->l_writeq, + struct xlog_ticket, t_queue)) { + if (list_empty(&tic->t_queue)) + list_add_tail(&tic->t_queue, &log->l_writeq); trace_xfs_log_regrant_write_sleep1(log, tic); @@ -2727,8 +2695,8 @@ redo: free_bytes = xlog_space_left(log, log->l_grant_write_cycle, log->l_grant_write_bytes); if (free_bytes < need_bytes) { - if ((tic->t_flags & XLOG_TIC_IN_Q) == 0) - xlog_ins_ticketq(&log->l_write_headq, tic); + if (list_empty(&tic->t_queue)) + list_add_tail(&tic->t_queue, &log->l_writeq); spin_unlock(&log->l_grant_lock); xlog_grant_push_ail(log->l_mp, need_bytes); spin_lock(&log->l_grant_lock); @@ -2745,8 +2713,9 @@ redo: trace_xfs_log_regrant_write_wake2(log, tic); goto redo; - } else if (tic->t_flags & XLOG_TIC_IN_Q) - xlog_del_ticketq(&log->l_write_headq, tic); + } + + list_del_init(&tic->t_queue); /* we've got enough space */ xlog_grant_add_space_write(log, need_bytes); @@ -2766,9 +2735,7 @@ redo: error_return: - if (tic->t_flags & XLOG_TIC_IN_Q) - xlog_del_ticketq(&log->l_reserve_headq, tic); - + list_del_init(&tic->t_queue); trace_xfs_log_regrant_write_error(log, tic); /* @@ -3435,6 +3402,7 @@ xlog_ticket_alloc( } atomic_set(&tic->t_ref, 1); + INIT_LIST_HEAD(&tic->t_queue); tic->t_unit_res = unit_bytes; tic->t_curr_res = unit_bytes; tic->t_cnt = cnt; @@ -3742,26 +3710,17 @@ xfs_log_force_umount( spin_unlock(&log->l_icloglock); /* - * We don't want anybody waiting for log reservations - * after this. That means we have to wake up everybody - * queued up on reserve_headq as well as write_headq. - * In addition, we make sure in xlog_{re}grant_log_space - * that we don't enqueue anything once the SHUTDOWN flag - * is set, and this action is protected by the GRANTLOCK. + * We don't want anybody waiting for log reservations after this. That + * means we have to wake up everybody queued up on reserveq as well as + * writeq. In addition, we make sure in xlog_{re}grant_log_space that + * we don't enqueue anything once the SHUTDOWN flag is set, and this + * action is protected by the GRANTLOCK. */ - if ((tic = log->l_reserve_headq)) { - do { - sv_signal(&tic->t_wait); - tic = tic->t_next; - } while (tic != log->l_reserve_headq); - } + list_for_each_entry(tic, &log->l_reserveq, t_queue) + sv_signal(&tic->t_wait); - if ((tic = log->l_write_headq)) { - do { - sv_signal(&tic->t_wait); - tic = tic->t_next; - } while (tic != log->l_write_headq); - } + list_for_each_entry(tic, &log->l_writeq, t_queue) + sv_signal(&tic->t_wait); spin_unlock(&log->l_grant_lock); if (!(log->l_iclog->ic_state & XLOG_STATE_IOERROR)) { diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h index c1ce505313e9..a5b3c021a406 100644 --- a/fs/xfs/xfs_log_priv.h +++ b/fs/xfs/xfs_log_priv.h @@ -132,12 +132,10 @@ static inline uint xlog_get_client_id(__be32 i) */ #define XLOG_TIC_INITED 0x1 /* has been initialized */ #define XLOG_TIC_PERM_RESERV 0x2 /* permanent reservation */ -#define XLOG_TIC_IN_Q 0x4 #define XLOG_TIC_FLAGS \ { XLOG_TIC_INITED, "XLOG_TIC_INITED" }, \ - { XLOG_TIC_PERM_RESERV, "XLOG_TIC_PERM_RESERV" }, \ - { XLOG_TIC_IN_Q, "XLOG_TIC_IN_Q" } + { XLOG_TIC_PERM_RESERV, "XLOG_TIC_PERM_RESERV" } #endif /* __KERNEL__ */ @@ -244,8 +242,7 @@ typedef struct xlog_res { typedef struct xlog_ticket { sv_t t_wait; /* ticket wait queue : 20 */ - struct xlog_ticket *t_next; /* :4|8 */ - struct xlog_ticket *t_prev; /* :4|8 */ + struct list_head t_queue; /* reserve/write queue */ xlog_tid_t t_tid; /* transaction identifier : 4 */ atomic_t t_ref; /* ticket reference count : 4 */ int t_curr_res; /* current reservation in bytes : 4 */ @@ -519,8 +516,8 @@ typedef struct log { /* The following block of fields are changed while holding grant_lock */ spinlock_t l_grant_lock ____cacheline_aligned_in_smp; - xlog_ticket_t *l_reserve_headq; - xlog_ticket_t *l_write_headq; + struct list_head l_reserveq; + struct list_head l_writeq; int l_grant_reserve_cycle; int l_grant_reserve_bytes; int l_grant_write_cycle; -- cgit v1.2.2 From 3f336c6fa17c2b3d14b3dd1bd6e64e9cc97b6359 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Tue, 21 Dec 2010 12:02:52 +1100 Subject: xfs: fact out common grant head/log tail verification code Factor repeated debug code out of grant head manipulation functions into a separate function. This removes ifdef DEBUG spagetti from the code and makes the code easier to follow. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig --- fs/xfs/xfs_log.c | 51 ++++++++++++++++++++++----------------------------- 1 file changed, 22 insertions(+), 29 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 1b82735471ab..99c62855432e 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -82,6 +82,7 @@ STATIC void xlog_ungrant_log_space(xlog_t *log, #if defined(DEBUG) STATIC void xlog_verify_dest_ptr(xlog_t *log, char *ptr); STATIC void xlog_verify_grant_head(xlog_t *log, int equals); +STATIC void xlog_verify_grant_tail(struct log *log); STATIC void xlog_verify_iclog(xlog_t *log, xlog_in_core_t *iclog, int count, boolean_t syncing); STATIC void xlog_verify_tail_lsn(xlog_t *log, xlog_in_core_t *iclog, @@ -89,6 +90,7 @@ STATIC void xlog_verify_tail_lsn(xlog_t *log, xlog_in_core_t *iclog, #else #define xlog_verify_dest_ptr(a,b) #define xlog_verify_grant_head(a,b) +#define xlog_verify_grant_tail(a) #define xlog_verify_iclog(a,b,c,d) #define xlog_verify_tail_lsn(a,b,c) #endif @@ -2503,10 +2505,6 @@ xlog_grant_log_space(xlog_t *log, { int free_bytes; int need_bytes; -#ifdef DEBUG - xfs_lsn_t tail_lsn; -#endif - #ifdef DEBUG if (log->l_flags & XLOG_ACTIVE_RECOVERY) @@ -2577,21 +2575,9 @@ redo: /* we've got enough space */ xlog_grant_add_space(log, need_bytes); -#ifdef DEBUG - tail_lsn = log->l_tail_lsn; - /* - * Check to make sure the grant write head didn't just over lap the - * tail. If the cycles are the same, we can't be overlapping. - * Otherwise, make sure that the cycles differ by exactly one and - * check the byte count. - */ - if (CYCLE_LSN(tail_lsn) != log->l_grant_write_cycle) { - ASSERT(log->l_grant_write_cycle-1 == CYCLE_LSN(tail_lsn)); - ASSERT(log->l_grant_write_bytes <= BBTOB(BLOCK_LSN(tail_lsn))); - } -#endif trace_xfs_log_grant_exit(log, tic); xlog_verify_grant_head(log, 1); + xlog_verify_grant_tail(log); spin_unlock(&log->l_grant_lock); return 0; @@ -2621,9 +2607,6 @@ xlog_regrant_write_log_space(xlog_t *log, xlog_ticket_t *tic) { int free_bytes, need_bytes; -#ifdef DEBUG - xfs_lsn_t tail_lsn; -#endif tic->t_curr_res = tic->t_unit_res; xlog_tic_reset_res(tic); @@ -2719,17 +2702,9 @@ redo: /* we've got enough space */ xlog_grant_add_space_write(log, need_bytes); -#ifdef DEBUG - tail_lsn = log->l_tail_lsn; - if (CYCLE_LSN(tail_lsn) != log->l_grant_write_cycle) { - ASSERT(log->l_grant_write_cycle-1 == CYCLE_LSN(tail_lsn)); - ASSERT(log->l_grant_write_bytes <= BBTOB(BLOCK_LSN(tail_lsn))); - } -#endif - trace_xfs_log_regrant_write_exit(log, tic); - xlog_verify_grant_head(log, 1); + xlog_verify_grant_tail(log); spin_unlock(&log->l_grant_lock); return 0; @@ -3465,6 +3440,24 @@ xlog_verify_grant_head(xlog_t *log, int equals) } } /* xlog_verify_grant_head */ +STATIC void +xlog_verify_grant_tail( + struct log *log) +{ + xfs_lsn_t tail_lsn = log->l_tail_lsn; + + /* + * Check to make sure the grant write head didn't just over lap the + * tail. If the cycles are the same, we can't be overlapping. + * Otherwise, make sure that the cycles differ by exactly one and + * check the byte count. + */ + if (CYCLE_LSN(tail_lsn) != log->l_grant_write_cycle) { + ASSERT(log->l_grant_write_cycle - 1 == CYCLE_LSN(tail_lsn)); + ASSERT(log->l_grant_write_bytes <= BBTOB(BLOCK_LSN(tail_lsn))); + } +} + /* check if it will fit */ STATIC void xlog_verify_tail_lsn(xlog_t *log, -- cgit v1.2.2 From 663e496a720a3a9fc08ea70b29724e8906b34e43 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Tue, 21 Dec 2010 12:06:05 +1100 Subject: xfs: rework log grant space calculations The log grant space calculations are repeated for both write and reserve grant heads. To make it simpler to convert the calculations toa different algorithm, factor them so both the gratn heads use the same calculation functions. Once this is done we can drop the wrappers that are used in only a couple of place to update both grant heads at once as they don't provide any particular value. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig --- fs/xfs/xfs_log.c | 95 ++++++++++++++++++++++++++++---------------------------- 1 file changed, 48 insertions(+), 47 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 99c62855432e..9a4b9edad847 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -98,53 +98,34 @@ STATIC void xlog_verify_tail_lsn(xlog_t *log, xlog_in_core_t *iclog, STATIC int xlog_iclogs_empty(xlog_t *log); static void -xlog_grant_sub_space(struct log *log, int bytes) -{ - log->l_grant_write_bytes -= bytes; - if (log->l_grant_write_bytes < 0) { - log->l_grant_write_bytes += log->l_logsize; - log->l_grant_write_cycle--; - } - - log->l_grant_reserve_bytes -= bytes; - if ((log)->l_grant_reserve_bytes < 0) { - log->l_grant_reserve_bytes += log->l_logsize; - log->l_grant_reserve_cycle--; - } - -} - -static void -xlog_grant_add_space_write(struct log *log, int bytes) +xlog_grant_sub_space( + struct log *log, + int *cycle, + int *space, + int bytes) { - int tmp = log->l_logsize - log->l_grant_write_bytes; - if (tmp > bytes) - log->l_grant_write_bytes += bytes; - else { - log->l_grant_write_cycle++; - log->l_grant_write_bytes = bytes - tmp; + *space -= bytes; + if (*space < 0) { + *space += log->l_logsize; + (*cycle)--; } } static void -xlog_grant_add_space_reserve(struct log *log, int bytes) +xlog_grant_add_space( + struct log *log, + int *cycle, + int *space, + int bytes) { - int tmp = log->l_logsize - log->l_grant_reserve_bytes; + int tmp = log->l_logsize - *space; if (tmp > bytes) - log->l_grant_reserve_bytes += bytes; + *space += bytes; else { - log->l_grant_reserve_cycle++; - log->l_grant_reserve_bytes = bytes - tmp; + *space = bytes - tmp; + (*cycle)++; } } - -static inline void -xlog_grant_add_space(struct log *log, int bytes) -{ - xlog_grant_add_space_write(log, bytes); - xlog_grant_add_space_reserve(log, bytes); -} - static void xlog_tic_reset_res(xlog_ticket_t *tic) { @@ -1344,7 +1325,10 @@ xlog_sync(xlog_t *log, /* move grant heads by roundoff in sync */ spin_lock(&log->l_grant_lock); - xlog_grant_add_space(log, roundoff); + xlog_grant_add_space(log, &log->l_grant_reserve_cycle, + &log->l_grant_reserve_bytes, roundoff); + xlog_grant_add_space(log, &log->l_grant_write_cycle, + &log->l_grant_write_bytes, roundoff); spin_unlock(&log->l_grant_lock); /* put cycle number in every block */ @@ -2574,7 +2558,10 @@ redo: list_del_init(&tic->t_queue); /* we've got enough space */ - xlog_grant_add_space(log, need_bytes); + xlog_grant_add_space(log, &log->l_grant_reserve_cycle, + &log->l_grant_reserve_bytes, need_bytes); + xlog_grant_add_space(log, &log->l_grant_write_cycle, + &log->l_grant_write_bytes, need_bytes); trace_xfs_log_grant_exit(log, tic); xlog_verify_grant_head(log, 1); xlog_verify_grant_tail(log); @@ -2701,7 +2688,8 @@ redo: list_del_init(&tic->t_queue); /* we've got enough space */ - xlog_grant_add_space_write(log, need_bytes); + xlog_grant_add_space(log, &log->l_grant_write_cycle, + &log->l_grant_write_bytes, need_bytes); trace_xfs_log_regrant_write_exit(log, tic); xlog_verify_grant_head(log, 1); xlog_verify_grant_tail(log); @@ -2742,7 +2730,12 @@ xlog_regrant_reserve_log_space(xlog_t *log, ticket->t_cnt--; spin_lock(&log->l_grant_lock); - xlog_grant_sub_space(log, ticket->t_curr_res); + xlog_grant_sub_space(log, &log->l_grant_reserve_cycle, + &log->l_grant_reserve_bytes, + ticket->t_curr_res); + xlog_grant_sub_space(log, &log->l_grant_write_cycle, + &log->l_grant_write_bytes, + ticket->t_curr_res); ticket->t_curr_res = ticket->t_unit_res; xlog_tic_reset_res(ticket); @@ -2756,7 +2749,9 @@ xlog_regrant_reserve_log_space(xlog_t *log, return; } - xlog_grant_add_space_reserve(log, ticket->t_unit_res); + xlog_grant_add_space(log, &log->l_grant_reserve_cycle, + &log->l_grant_reserve_bytes, + ticket->t_unit_res); trace_xfs_log_regrant_reserve_exit(log, ticket); @@ -2785,24 +2780,30 @@ STATIC void xlog_ungrant_log_space(xlog_t *log, xlog_ticket_t *ticket) { + int bytes; + if (ticket->t_cnt > 0) ticket->t_cnt--; spin_lock(&log->l_grant_lock); trace_xfs_log_ungrant_enter(log, ticket); - - xlog_grant_sub_space(log, ticket->t_curr_res); - trace_xfs_log_ungrant_sub(log, ticket); - /* If this is a permanent reservation ticket, we may be able to free + /* + * If this is a permanent reservation ticket, we may be able to free * up more space based on the remaining count. */ + bytes = ticket->t_curr_res; if (ticket->t_cnt > 0) { ASSERT(ticket->t_flags & XLOG_TIC_PERM_RESERV); - xlog_grant_sub_space(log, ticket->t_unit_res*ticket->t_cnt); + bytes += ticket->t_unit_res*ticket->t_cnt; } + xlog_grant_sub_space(log, &log->l_grant_reserve_cycle, + &log->l_grant_reserve_bytes, bytes); + xlog_grant_sub_space(log, &log->l_grant_write_cycle, + &log->l_grant_write_bytes, bytes); + trace_xfs_log_ungrant_exit(log, ticket); xlog_verify_grant_head(log, 1); -- cgit v1.2.2 From a69ed03c24d4a336c23b7116127713d5a8c5ac4d Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Tue, 21 Dec 2010 12:08:20 +1100 Subject: xfs: combine grant heads into a single 64 bit integer Prepare for switching the grant heads to atomic variables by combining the two 32 bit values that make up the grant head into a single 64 bit variable. Provide wrapper functions to combine and split the grant heads appropriately for calculations and use them as necessary. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig --- fs/xfs/linux-2.6/xfs_trace.h | 10 +-- fs/xfs/xfs_log.c | 166 +++++++++++++++++++++++-------------------- fs/xfs/xfs_log_priv.h | 26 +++++-- fs/xfs/xfs_log_recover.c | 8 +-- 4 files changed, 119 insertions(+), 91 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_trace.h b/fs/xfs/linux-2.6/xfs_trace.h index 69b9e1f1baaf..3ff6b35f9207 100644 --- a/fs/xfs/linux-2.6/xfs_trace.h +++ b/fs/xfs/linux-2.6/xfs_trace.h @@ -786,10 +786,12 @@ DECLARE_EVENT_CLASS(xfs_loggrant_class, __entry->flags = tic->t_flags; __entry->reserveq = list_empty(&log->l_reserveq); __entry->writeq = list_empty(&log->l_writeq); - __entry->grant_reserve_cycle = log->l_grant_reserve_cycle; - __entry->grant_reserve_bytes = log->l_grant_reserve_bytes; - __entry->grant_write_cycle = log->l_grant_write_cycle; - __entry->grant_write_bytes = log->l_grant_write_bytes; + xlog_crack_grant_head(&log->l_grant_reserve_head, + &__entry->grant_reserve_cycle, + &__entry->grant_reserve_bytes); + xlog_crack_grant_head(&log->l_grant_write_head, + &__entry->grant_write_cycle, + &__entry->grant_write_bytes); __entry->curr_cycle = log->l_curr_cycle; __entry->curr_block = log->l_curr_block; __entry->tail_lsn = log->l_tail_lsn; diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 9a4b9edad847..6bba8b4b8596 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -47,7 +47,7 @@ STATIC xlog_t * xlog_alloc_log(xfs_mount_t *mp, xfs_buftarg_t *log_target, xfs_daddr_t blk_offset, int num_bblks); -STATIC int xlog_space_left(xlog_t *log, int cycle, int bytes); +STATIC int xlog_space_left(struct log *log, int64_t *head); STATIC int xlog_sync(xlog_t *log, xlog_in_core_t *iclog); STATIC void xlog_dealloc_log(xlog_t *log); @@ -100,32 +100,44 @@ STATIC int xlog_iclogs_empty(xlog_t *log); static void xlog_grant_sub_space( struct log *log, - int *cycle, - int *space, + int64_t *head, int bytes) { - *space -= bytes; - if (*space < 0) { - *space += log->l_logsize; - (*cycle)--; + int cycle, space; + + xlog_crack_grant_head(head, &cycle, &space); + + space -= bytes; + if (space < 0) { + space += log->l_logsize; + cycle--; } + + xlog_assign_grant_head(head, cycle, space); } static void xlog_grant_add_space( struct log *log, - int *cycle, - int *space, + int64_t *head, int bytes) { - int tmp = log->l_logsize - *space; + int tmp; + int cycle, space; + + xlog_crack_grant_head(head, &cycle, &space); + + tmp = log->l_logsize - space; if (tmp > bytes) - *space += bytes; + space += bytes; else { - *space = bytes - tmp; - (*cycle)++; + space = bytes - tmp; + cycle++; } + + xlog_assign_grant_head(head, cycle, space); } + static void xlog_tic_reset_res(xlog_ticket_t *tic) { @@ -654,7 +666,7 @@ xfs_log_move_tail(xfs_mount_t *mp, { xlog_ticket_t *tic; xlog_t *log = mp->m_log; - int need_bytes, free_bytes, cycle, bytes; + int need_bytes, free_bytes; if (XLOG_FORCED_SHUTDOWN(log)) return; @@ -680,9 +692,7 @@ xfs_log_move_tail(xfs_mount_t *mp, if (log->l_flags & XLOG_ACTIVE_RECOVERY) panic("Recovery problem"); #endif - cycle = log->l_grant_write_cycle; - bytes = log->l_grant_write_bytes; - free_bytes = xlog_space_left(log, cycle, bytes); + free_bytes = xlog_space_left(log, &log->l_grant_write_head); list_for_each_entry(tic, &log->l_writeq, t_queue) { ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV); @@ -699,9 +709,7 @@ xfs_log_move_tail(xfs_mount_t *mp, if (log->l_flags & XLOG_ACTIVE_RECOVERY) panic("Recovery problem"); #endif - cycle = log->l_grant_reserve_cycle; - bytes = log->l_grant_reserve_bytes; - free_bytes = xlog_space_left(log, cycle, bytes); + free_bytes = xlog_space_left(log, &log->l_grant_reserve_head); list_for_each_entry(tic, &log->l_reserveq, t_queue) { if (tic->t_flags & XLOG_TIC_PERM_RESERV) need_bytes = tic->t_unit_res*tic->t_cnt; @@ -814,21 +822,26 @@ xlog_assign_tail_lsn(xfs_mount_t *mp) * result is that we return the size of the log as the amount of space left. */ STATIC int -xlog_space_left(xlog_t *log, int cycle, int bytes) +xlog_space_left( + struct log *log, + int64_t *head) { - int free_bytes; - int tail_bytes; - int tail_cycle; + int free_bytes; + int tail_bytes; + int tail_cycle; + int head_cycle; + int head_bytes; + xlog_crack_grant_head(head, &head_cycle, &head_bytes); tail_bytes = BBTOB(BLOCK_LSN(log->l_tail_lsn)); tail_cycle = CYCLE_LSN(log->l_tail_lsn); - if ((tail_cycle == cycle) && (bytes >= tail_bytes)) { - free_bytes = log->l_logsize - (bytes - tail_bytes); - } else if ((tail_cycle + 1) < cycle) { + if (tail_cycle == head_cycle && head_bytes >= tail_bytes) + free_bytes = log->l_logsize - (head_bytes - tail_bytes); + else if (tail_cycle + 1 < head_cycle) return 0; - } else if (tail_cycle < cycle) { - ASSERT(tail_cycle == (cycle - 1)); - free_bytes = tail_bytes - bytes; + else if (tail_cycle < head_cycle) { + ASSERT(tail_cycle == (head_cycle - 1)); + free_bytes = tail_bytes - head_bytes; } else { /* * The reservation head is behind the tail. @@ -839,12 +852,12 @@ xlog_space_left(xlog_t *log, int cycle, int bytes) "xlog_space_left: head behind tail\n" " tail_cycle = %d, tail_bytes = %d\n" " GH cycle = %d, GH bytes = %d", - tail_cycle, tail_bytes, cycle, bytes); + tail_cycle, tail_bytes, head_cycle, head_bytes); ASSERT(0); free_bytes = log->l_logsize; } return free_bytes; -} /* xlog_space_left */ +} /* @@ -1001,8 +1014,8 @@ xlog_alloc_log(xfs_mount_t *mp, /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */ log->l_last_sync_lsn = log->l_tail_lsn; log->l_curr_cycle = 1; /* 0 is bad since this is initial value */ - log->l_grant_reserve_cycle = 1; - log->l_grant_write_cycle = 1; + xlog_assign_grant_head(&log->l_grant_reserve_head, 1, 0); + xlog_assign_grant_head(&log->l_grant_write_head, 1, 0); INIT_LIST_HEAD(&log->l_reserveq); INIT_LIST_HEAD(&log->l_writeq); @@ -1190,9 +1203,7 @@ xlog_grant_push_ail(xfs_mount_t *mp, ASSERT(BTOBB(need_bytes) < log->l_logBBsize); spin_lock(&log->l_grant_lock); - free_bytes = xlog_space_left(log, - log->l_grant_reserve_cycle, - log->l_grant_reserve_bytes); + free_bytes = xlog_space_left(log, &log->l_grant_reserve_head); tail_lsn = log->l_tail_lsn; free_blocks = BTOBBT(free_bytes); @@ -1325,10 +1336,8 @@ xlog_sync(xlog_t *log, /* move grant heads by roundoff in sync */ spin_lock(&log->l_grant_lock); - xlog_grant_add_space(log, &log->l_grant_reserve_cycle, - &log->l_grant_reserve_bytes, roundoff); - xlog_grant_add_space(log, &log->l_grant_write_cycle, - &log->l_grant_write_bytes, roundoff); + xlog_grant_add_space(log, &log->l_grant_reserve_head, roundoff); + xlog_grant_add_space(log, &log->l_grant_write_head, roundoff); spin_unlock(&log->l_grant_lock); /* put cycle number in every block */ @@ -2531,8 +2540,7 @@ redo: if (XLOG_FORCED_SHUTDOWN(log)) goto error_return; - free_bytes = xlog_space_left(log, log->l_grant_reserve_cycle, - log->l_grant_reserve_bytes); + free_bytes = xlog_space_left(log, &log->l_grant_reserve_head); if (free_bytes < need_bytes) { if (list_empty(&tic->t_queue)) list_add_tail(&tic->t_queue, &log->l_reserveq); @@ -2558,10 +2566,8 @@ redo: list_del_init(&tic->t_queue); /* we've got enough space */ - xlog_grant_add_space(log, &log->l_grant_reserve_cycle, - &log->l_grant_reserve_bytes, need_bytes); - xlog_grant_add_space(log, &log->l_grant_write_cycle, - &log->l_grant_write_bytes, need_bytes); + xlog_grant_add_space(log, &log->l_grant_reserve_head, need_bytes); + xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes); trace_xfs_log_grant_exit(log, tic); xlog_verify_grant_head(log, 1); xlog_verify_grant_tail(log); @@ -2622,8 +2628,7 @@ xlog_regrant_write_log_space(xlog_t *log, need_bytes = tic->t_unit_res; if (!list_empty(&log->l_writeq)) { struct xlog_ticket *ntic; - free_bytes = xlog_space_left(log, log->l_grant_write_cycle, - log->l_grant_write_bytes); + free_bytes = xlog_space_left(log, &log->l_grant_write_head); list_for_each_entry(ntic, &log->l_writeq, t_queue) { ASSERT(ntic->t_flags & XLOG_TIC_PERM_RESERV); @@ -2662,8 +2667,7 @@ redo: if (XLOG_FORCED_SHUTDOWN(log)) goto error_return; - free_bytes = xlog_space_left(log, log->l_grant_write_cycle, - log->l_grant_write_bytes); + free_bytes = xlog_space_left(log, &log->l_grant_write_head); if (free_bytes < need_bytes) { if (list_empty(&tic->t_queue)) list_add_tail(&tic->t_queue, &log->l_writeq); @@ -2688,8 +2692,7 @@ redo: list_del_init(&tic->t_queue); /* we've got enough space */ - xlog_grant_add_space(log, &log->l_grant_write_cycle, - &log->l_grant_write_bytes, need_bytes); + xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes); trace_xfs_log_regrant_write_exit(log, tic); xlog_verify_grant_head(log, 1); xlog_verify_grant_tail(log); @@ -2730,12 +2733,10 @@ xlog_regrant_reserve_log_space(xlog_t *log, ticket->t_cnt--; spin_lock(&log->l_grant_lock); - xlog_grant_sub_space(log, &log->l_grant_reserve_cycle, - &log->l_grant_reserve_bytes, - ticket->t_curr_res); - xlog_grant_sub_space(log, &log->l_grant_write_cycle, - &log->l_grant_write_bytes, - ticket->t_curr_res); + xlog_grant_sub_space(log, &log->l_grant_reserve_head, + ticket->t_curr_res); + xlog_grant_sub_space(log, &log->l_grant_write_head, + ticket->t_curr_res); ticket->t_curr_res = ticket->t_unit_res; xlog_tic_reset_res(ticket); @@ -2749,9 +2750,8 @@ xlog_regrant_reserve_log_space(xlog_t *log, return; } - xlog_grant_add_space(log, &log->l_grant_reserve_cycle, - &log->l_grant_reserve_bytes, - ticket->t_unit_res); + xlog_grant_add_space(log, &log->l_grant_reserve_head, + ticket->t_unit_res); trace_xfs_log_regrant_reserve_exit(log, ticket); @@ -2799,10 +2799,8 @@ xlog_ungrant_log_space(xlog_t *log, bytes += ticket->t_unit_res*ticket->t_cnt; } - xlog_grant_sub_space(log, &log->l_grant_reserve_cycle, - &log->l_grant_reserve_bytes, bytes); - xlog_grant_sub_space(log, &log->l_grant_write_cycle, - &log->l_grant_write_bytes, bytes); + xlog_grant_sub_space(log, &log->l_grant_reserve_head, bytes); + xlog_grant_sub_space(log, &log->l_grant_write_head, bytes); trace_xfs_log_ungrant_exit(log, ticket); @@ -3430,22 +3428,31 @@ xlog_verify_dest_ptr( STATIC void xlog_verify_grant_head(xlog_t *log, int equals) { - if (log->l_grant_reserve_cycle == log->l_grant_write_cycle) { - if (equals) - ASSERT(log->l_grant_reserve_bytes >= log->l_grant_write_bytes); - else - ASSERT(log->l_grant_reserve_bytes > log->l_grant_write_bytes); - } else { - ASSERT(log->l_grant_reserve_cycle-1 == log->l_grant_write_cycle); - ASSERT(log->l_grant_write_bytes >= log->l_grant_reserve_bytes); - } -} /* xlog_verify_grant_head */ + int reserve_cycle, reserve_space; + int write_cycle, write_space; + + xlog_crack_grant_head(&log->l_grant_reserve_head, + &reserve_cycle, &reserve_space); + xlog_crack_grant_head(&log->l_grant_write_head, + &write_cycle, &write_space); + + if (reserve_cycle == write_cycle) { + if (equals) + ASSERT(reserve_space >= write_space); + else + ASSERT(reserve_space > write_space); + } else { + ASSERT(reserve_cycle - 1 == write_cycle); + ASSERT(write_space >= reserve_space); + } +} STATIC void xlog_verify_grant_tail( struct log *log) { xfs_lsn_t tail_lsn = log->l_tail_lsn; + int cycle, space; /* * Check to make sure the grant write head didn't just over lap the @@ -3453,9 +3460,10 @@ xlog_verify_grant_tail( * Otherwise, make sure that the cycles differ by exactly one and * check the byte count. */ - if (CYCLE_LSN(tail_lsn) != log->l_grant_write_cycle) { - ASSERT(log->l_grant_write_cycle - 1 == CYCLE_LSN(tail_lsn)); - ASSERT(log->l_grant_write_bytes <= BBTOB(BLOCK_LSN(tail_lsn))); + xlog_crack_grant_head(&log->l_grant_write_head, &cycle, &space); + if (CYCLE_LSN(tail_lsn) != cycle) { + ASSERT(cycle - 1 == CYCLE_LSN(tail_lsn)); + ASSERT(space <= BBTOB(BLOCK_LSN(tail_lsn))); } } diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h index a5b3c021a406..2f74c80a7a40 100644 --- a/fs/xfs/xfs_log_priv.h +++ b/fs/xfs/xfs_log_priv.h @@ -518,10 +518,8 @@ typedef struct log { spinlock_t l_grant_lock ____cacheline_aligned_in_smp; struct list_head l_reserveq; struct list_head l_writeq; - int l_grant_reserve_cycle; - int l_grant_reserve_bytes; - int l_grant_write_cycle; - int l_grant_write_bytes; + int64_t l_grant_reserve_head; + int64_t l_grant_write_head; /* The following field are used for debugging; need to hold icloglock */ #ifdef DEBUG @@ -560,6 +558,26 @@ int xlog_write(struct log *log, struct xfs_log_vec *log_vector, struct xlog_ticket *tic, xfs_lsn_t *start_lsn, xlog_in_core_t **commit_iclog, uint flags); +/* + * When we crack the grrant head, we sample it first so that the value will not + * change while we are cracking it into the component values. This means we + * will always get consistent component values to work from. + */ +static inline void +xlog_crack_grant_head(int64_t *head, int *cycle, int *space) +{ + int64_t val = *head; + + *cycle = val >> 32; + *space = val & 0xffffffff; +} + +static inline void +xlog_assign_grant_head(int64_t *head, int cycle, int space) +{ + *head = ((int64_t)cycle << 32) | space; +} + /* * Committed Item List interfaces */ diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 4abe7a9b380e..1550404a8aeb 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -938,10 +938,10 @@ xlog_find_tail( log->l_curr_cycle++; log->l_tail_lsn = be64_to_cpu(rhead->h_tail_lsn); log->l_last_sync_lsn = be64_to_cpu(rhead->h_lsn); - log->l_grant_reserve_cycle = log->l_curr_cycle; - log->l_grant_reserve_bytes = BBTOB(log->l_curr_block); - log->l_grant_write_cycle = log->l_curr_cycle; - log->l_grant_write_bytes = BBTOB(log->l_curr_block); + xlog_assign_grant_head(&log->l_grant_reserve_head, log->l_curr_cycle, + BBTOB(log->l_curr_block)); + xlog_assign_grant_head(&log->l_grant_write_head, log->l_curr_cycle, + BBTOB(log->l_curr_block)); /* * Look for unmount record. If we find it, then we know there -- cgit v1.2.2 From eb40a87500ac2f6be7eaf8ebb35610e6d0e60e9a Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Tue, 21 Dec 2010 12:09:01 +1100 Subject: xfs: use wait queues directly for the log wait queues The log grant queues are one of the few places left using sv_t constructs for waiting. Given we are touching this code, we should convert them to plain wait queues. While there, convert all the other sv_t users in the log code as well. Seeing as this removes the last users of the sv_t type, remove the header file defining the wrapper and the fragments that still reference it. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig --- fs/xfs/linux-2.6/sv.h | 59 ---------------------------------------- fs/xfs/linux-2.6/xfs_linux.h | 1 - fs/xfs/quota/xfs_dquot.c | 1 - fs/xfs/xfs_log.c | 64 +++++++++++++++++++------------------------- fs/xfs/xfs_log_cil.c | 8 +++--- fs/xfs/xfs_log_priv.h | 25 +++++++++++++---- 6 files changed, 52 insertions(+), 106 deletions(-) delete mode 100644 fs/xfs/linux-2.6/sv.h (limited to 'fs') diff --git a/fs/xfs/linux-2.6/sv.h b/fs/xfs/linux-2.6/sv.h deleted file mode 100644 index 4dfc7c370819..000000000000 --- a/fs/xfs/linux-2.6/sv.h +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. - * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ -#ifndef __XFS_SUPPORT_SV_H__ -#define __XFS_SUPPORT_SV_H__ - -#include -#include -#include - -/* - * Synchronisation variables. - * - * (Parameters "pri", "svf" and "rts" are not implemented) - */ - -typedef struct sv_s { - wait_queue_head_t waiters; -} sv_t; - -static inline void _sv_wait(sv_t *sv, spinlock_t *lock) -{ - DECLARE_WAITQUEUE(wait, current); - - add_wait_queue_exclusive(&sv->waiters, &wait); - __set_current_state(TASK_UNINTERRUPTIBLE); - spin_unlock(lock); - - schedule(); - - remove_wait_queue(&sv->waiters, &wait); -} - -#define sv_init(sv,flag,name) \ - init_waitqueue_head(&(sv)->waiters) -#define sv_destroy(sv) \ - /*NOTHING*/ -#define sv_wait(sv, pri, lock, s) \ - _sv_wait(sv, lock) -#define sv_signal(sv) \ - wake_up(&(sv)->waiters) -#define sv_broadcast(sv) \ - wake_up_all(&(sv)->waiters) - -#endif /* __XFS_SUPPORT_SV_H__ */ diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h index 214ddd71ff79..096494997747 100644 --- a/fs/xfs/linux-2.6/xfs_linux.h +++ b/fs/xfs/linux-2.6/xfs_linux.h @@ -37,7 +37,6 @@ #include #include -#include #include #include diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c index faf8e1a83a12..d22aa3103106 100644 --- a/fs/xfs/quota/xfs_dquot.c +++ b/fs/xfs/quota/xfs_dquot.c @@ -149,7 +149,6 @@ xfs_qm_dqdestroy( ASSERT(list_empty(&dqp->q_freelist)); mutex_destroy(&dqp->q_qlock); - sv_destroy(&dqp->q_pinwait); kmem_zone_free(xfs_Gqm->qm_dqzone, dqp); atomic_dec(&xfs_Gqm->qm_totaldquots); diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 6bba8b4b8596..cc0504e0bb3b 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -547,8 +547,8 @@ xfs_log_unmount_write(xfs_mount_t *mp) if (!(iclog->ic_state == XLOG_STATE_ACTIVE || iclog->ic_state == XLOG_STATE_DIRTY)) { if (!XLOG_FORCED_SHUTDOWN(log)) { - sv_wait(&iclog->ic_force_wait, PMEM, - &log->l_icloglock, s); + xlog_wait(&iclog->ic_force_wait, + &log->l_icloglock); } else { spin_unlock(&log->l_icloglock); } @@ -588,8 +588,8 @@ xfs_log_unmount_write(xfs_mount_t *mp) || iclog->ic_state == XLOG_STATE_DIRTY || iclog->ic_state == XLOG_STATE_IOERROR) ) { - sv_wait(&iclog->ic_force_wait, PMEM, - &log->l_icloglock, s); + xlog_wait(&iclog->ic_force_wait, + &log->l_icloglock); } else { spin_unlock(&log->l_icloglock); } @@ -700,7 +700,7 @@ xfs_log_move_tail(xfs_mount_t *mp, break; tail_lsn = 0; free_bytes -= tic->t_unit_res; - sv_signal(&tic->t_wait); + wake_up(&tic->t_wait); } } @@ -719,7 +719,7 @@ xfs_log_move_tail(xfs_mount_t *mp, break; tail_lsn = 0; free_bytes -= need_bytes; - sv_signal(&tic->t_wait); + wake_up(&tic->t_wait); } } spin_unlock(&log->l_grant_lock); @@ -1060,7 +1060,7 @@ xlog_alloc_log(xfs_mount_t *mp, spin_lock_init(&log->l_icloglock); spin_lock_init(&log->l_grant_lock); - sv_init(&log->l_flush_wait, 0, "flush_wait"); + init_waitqueue_head(&log->l_flush_wait); /* log record size must be multiple of BBSIZE; see xlog_rec_header_t */ ASSERT((XFS_BUF_SIZE(bp) & BBMASK) == 0); @@ -1116,8 +1116,8 @@ xlog_alloc_log(xfs_mount_t *mp, ASSERT(XFS_BUF_ISBUSY(iclog->ic_bp)); ASSERT(XFS_BUF_VALUSEMA(iclog->ic_bp) <= 0); - sv_init(&iclog->ic_force_wait, SV_DEFAULT, "iclog-force"); - sv_init(&iclog->ic_write_wait, SV_DEFAULT, "iclog-write"); + init_waitqueue_head(&iclog->ic_force_wait); + init_waitqueue_head(&iclog->ic_write_wait); iclogp = &iclog->ic_next; } @@ -1132,11 +1132,8 @@ xlog_alloc_log(xfs_mount_t *mp, out_free_iclog: for (iclog = log->l_iclog; iclog; iclog = prev_iclog) { prev_iclog = iclog->ic_next; - if (iclog->ic_bp) { - sv_destroy(&iclog->ic_force_wait); - sv_destroy(&iclog->ic_write_wait); + if (iclog->ic_bp) xfs_buf_free(iclog->ic_bp); - } kmem_free(iclog); } spinlock_destroy(&log->l_icloglock); @@ -1453,8 +1450,6 @@ xlog_dealloc_log(xlog_t *log) iclog = log->l_iclog; for (i=0; il_iclog_bufs; i++) { - sv_destroy(&iclog->ic_force_wait); - sv_destroy(&iclog->ic_write_wait); xfs_buf_free(iclog->ic_bp); next_iclog = iclog->ic_next; kmem_free(iclog); @@ -2261,7 +2256,7 @@ xlog_state_do_callback( xlog_state_clean_log(log); /* wake up threads waiting in xfs_log_force() */ - sv_broadcast(&iclog->ic_force_wait); + wake_up_all(&iclog->ic_force_wait); iclog = iclog->ic_next; } while (first_iclog != iclog); @@ -2308,7 +2303,7 @@ xlog_state_do_callback( spin_unlock(&log->l_icloglock); if (wake) - sv_broadcast(&log->l_flush_wait); + wake_up_all(&log->l_flush_wait); } @@ -2359,7 +2354,7 @@ xlog_state_done_syncing( * iclog buffer, we wake them all, one will get to do the * I/O, the others get to wait for the result. */ - sv_broadcast(&iclog->ic_write_wait); + wake_up_all(&iclog->ic_write_wait); spin_unlock(&log->l_icloglock); xlog_state_do_callback(log, aborted, iclog); /* also cleans log */ } /* xlog_state_done_syncing */ @@ -2408,7 +2403,7 @@ restart: XFS_STATS_INC(xs_log_noiclogs); /* Wait for log writes to have flushed */ - sv_wait(&log->l_flush_wait, 0, &log->l_icloglock, 0); + xlog_wait(&log->l_flush_wait, &log->l_icloglock); goto restart; } @@ -2523,7 +2518,8 @@ xlog_grant_log_space(xlog_t *log, goto error_return; XFS_STATS_INC(xs_sleep_logspace); - sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s); + xlog_wait(&tic->t_wait, &log->l_grant_lock); + /* * If we got an error, and the filesystem is shutting down, * we'll catch it down below. So just continue... @@ -2552,7 +2548,7 @@ redo: spin_lock(&log->l_grant_lock); XFS_STATS_INC(xs_sleep_logspace); - sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s); + xlog_wait(&tic->t_wait, &log->l_grant_lock); spin_lock(&log->l_grant_lock); if (XLOG_FORCED_SHUTDOWN(log)) @@ -2635,7 +2631,7 @@ xlog_regrant_write_log_space(xlog_t *log, if (free_bytes < ntic->t_unit_res) break; free_bytes -= ntic->t_unit_res; - sv_signal(&ntic->t_wait); + wake_up(&ntic->t_wait); } if (ntic != list_first_entry(&log->l_writeq, @@ -2650,8 +2646,7 @@ xlog_regrant_write_log_space(xlog_t *log, spin_lock(&log->l_grant_lock); XFS_STATS_INC(xs_sleep_logspace); - sv_wait(&tic->t_wait, PINOD|PLTWAIT, - &log->l_grant_lock, s); + xlog_wait(&tic->t_wait, &log->l_grant_lock); /* If we're shutting down, this tic is already * off the queue */ @@ -2677,8 +2672,7 @@ redo: XFS_STATS_INC(xs_sleep_logspace); trace_xfs_log_regrant_write_sleep2(log, tic); - - sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s); + xlog_wait(&tic->t_wait, &log->l_grant_lock); /* If we're shutting down, this tic is already off the queue */ spin_lock(&log->l_grant_lock); @@ -3029,7 +3023,7 @@ maybe_sleep: return XFS_ERROR(EIO); } XFS_STATS_INC(xs_log_force_sleep); - sv_wait(&iclog->ic_force_wait, PINOD, &log->l_icloglock, s); + xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); /* * No need to grab the log lock here since we're * only deciding whether or not to return EIO @@ -3147,8 +3141,8 @@ try_again: XFS_STATS_INC(xs_log_force_sleep); - sv_wait(&iclog->ic_prev->ic_write_wait, - PSWP, &log->l_icloglock, s); + xlog_wait(&iclog->ic_prev->ic_write_wait, + &log->l_icloglock); if (log_flushed) *log_flushed = 1; already_slept = 1; @@ -3176,7 +3170,7 @@ try_again: return XFS_ERROR(EIO); } XFS_STATS_INC(xs_log_force_sleep); - sv_wait(&iclog->ic_force_wait, PSWP, &log->l_icloglock, s); + xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); /* * No need to grab the log lock here since we're * only deciding whether or not to return EIO @@ -3251,10 +3245,8 @@ xfs_log_ticket_put( xlog_ticket_t *ticket) { ASSERT(atomic_read(&ticket->t_ref) > 0); - if (atomic_dec_and_test(&ticket->t_ref)) { - sv_destroy(&ticket->t_wait); + if (atomic_dec_and_test(&ticket->t_ref)) kmem_zone_free(xfs_log_ticket_zone, ticket); - } } xlog_ticket_t * @@ -3387,7 +3379,7 @@ xlog_ticket_alloc( tic->t_trans_type = 0; if (xflags & XFS_LOG_PERM_RESERV) tic->t_flags |= XLOG_TIC_PERM_RESERV; - sv_init(&tic->t_wait, SV_DEFAULT, "logtick"); + init_waitqueue_head(&tic->t_wait); xlog_tic_reset_res(tic); @@ -3719,10 +3711,10 @@ xfs_log_force_umount( * action is protected by the GRANTLOCK. */ list_for_each_entry(tic, &log->l_reserveq, t_queue) - sv_signal(&tic->t_wait); + wake_up(&tic->t_wait); list_for_each_entry(tic, &log->l_writeq, t_queue) - sv_signal(&tic->t_wait); + wake_up(&tic->t_wait); spin_unlock(&log->l_grant_lock); if (!(log->l_iclog->ic_state & XLOG_STATE_IOERROR)) { diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c index f36f1a2f4dc1..9dc8125d04e5 100644 --- a/fs/xfs/xfs_log_cil.c +++ b/fs/xfs/xfs_log_cil.c @@ -61,7 +61,7 @@ xlog_cil_init( INIT_LIST_HEAD(&cil->xc_committing); spin_lock_init(&cil->xc_cil_lock); init_rwsem(&cil->xc_ctx_lock); - sv_init(&cil->xc_commit_wait, SV_DEFAULT, "cilwait"); + init_waitqueue_head(&cil->xc_commit_wait); INIT_LIST_HEAD(&ctx->committing); INIT_LIST_HEAD(&ctx->busy_extents); @@ -563,7 +563,7 @@ restart: * It is still being pushed! Wait for the push to * complete, then start again from the beginning. */ - sv_wait(&cil->xc_commit_wait, 0, &cil->xc_cil_lock, 0); + xlog_wait(&cil->xc_commit_wait, &cil->xc_cil_lock); goto restart; } } @@ -587,7 +587,7 @@ restart: */ spin_lock(&cil->xc_cil_lock); ctx->commit_lsn = commit_lsn; - sv_broadcast(&cil->xc_commit_wait); + wake_up_all(&cil->xc_commit_wait); spin_unlock(&cil->xc_cil_lock); /* release the hounds! */ @@ -752,7 +752,7 @@ restart: * It is still being pushed! Wait for the push to * complete, then start again from the beginning. */ - sv_wait(&cil->xc_commit_wait, 0, &cil->xc_cil_lock, 0); + xlog_wait(&cil->xc_commit_wait, &cil->xc_cil_lock); goto restart; } if (ctx->sequence != sequence) diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h index 2f74c80a7a40..e2bb276eb2a7 100644 --- a/fs/xfs/xfs_log_priv.h +++ b/fs/xfs/xfs_log_priv.h @@ -241,7 +241,7 @@ typedef struct xlog_res { } xlog_res_t; typedef struct xlog_ticket { - sv_t t_wait; /* ticket wait queue : 20 */ + wait_queue_head_t t_wait; /* ticket wait queue */ struct list_head t_queue; /* reserve/write queue */ xlog_tid_t t_tid; /* transaction identifier : 4 */ atomic_t t_ref; /* ticket reference count : 4 */ @@ -349,8 +349,8 @@ typedef union xlog_in_core2 { * and move everything else out to subsequent cachelines. */ typedef struct xlog_in_core { - sv_t ic_force_wait; - sv_t ic_write_wait; + wait_queue_head_t ic_force_wait; + wait_queue_head_t ic_write_wait; struct xlog_in_core *ic_next; struct xlog_in_core *ic_prev; struct xfs_buf *ic_bp; @@ -417,7 +417,7 @@ struct xfs_cil { struct xfs_cil_ctx *xc_ctx; struct rw_semaphore xc_ctx_lock; struct list_head xc_committing; - sv_t xc_commit_wait; + wait_queue_head_t xc_commit_wait; xfs_lsn_t xc_current_sequence; }; @@ -499,7 +499,7 @@ typedef struct log { int l_logBBsize; /* size of log in BB chunks */ /* The following block of fields are changed while holding icloglock */ - sv_t l_flush_wait ____cacheline_aligned_in_smp; + wait_queue_head_t l_flush_wait ____cacheline_aligned_in_smp; /* waiting for iclog flush */ int l_covered_state;/* state of "covering disk * log entries" */ @@ -602,6 +602,21 @@ xlog_cil_force(struct log *log) */ #define XLOG_UNMOUNT_REC_TYPE (-1U) +/* + * Wrapper function for waiting on a wait queue serialised against wakeups + * by a spinlock. This matches the semantics of all the wait queues used in the + * log code. + */ +static inline void xlog_wait(wait_queue_head_t *wq, spinlock_t *lock) +{ + DECLARE_WAITQUEUE(wait, current); + + add_wait_queue_exclusive(wq, &wait); + __set_current_state(TASK_UNINTERRUPTIBLE); + spin_unlock(lock); + schedule(); + remove_wait_queue(wq, &wait); +} #endif /* __KERNEL__ */ #endif /* __XFS_LOG_PRIV_H__ */ -- cgit v1.2.2 From 2ced19cbae5448b720919a494606c62095d4f4db Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Tue, 21 Dec 2010 12:09:20 +1100 Subject: xfs: make AIL tail pushing independent of the grant lock The xlog_grant_push_ail() currently takes the grant lock internally to sample the tail lsn, last sync lsn and the reserve grant head. Most of the callers already hold the grant lock but have to drop it before calling xlog_grant_push_ail(). This is a left over from when the AIL tail pushing was done in line and hence xlog_grant_push_ail had to drop the grant lock. AIL push is now done in another thread and hence we can safely hold the grant lock over the entire xlog_grant_push_ail call. Push the grant lock outside of xlog_grant_push_ail() to simplify the locking and synchronisation needed for tail pushing. This will reduce traffic on the grant lock by itself, but this is only one step in preparing for the complete removal of the grant lock. While there, clean up the formatting of xlog_grant_push_ail() to match the rest of the XFS code. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig --- fs/xfs/xfs_log.c | 111 +++++++++++++++++++++++++++---------------------------- 1 file changed, 54 insertions(+), 57 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index cc0504e0bb3b..1e2020d5a8b6 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -70,7 +70,7 @@ STATIC void xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog); /* local functions to manipulate grant head */ STATIC int xlog_grant_log_space(xlog_t *log, xlog_ticket_t *xtic); -STATIC void xlog_grant_push_ail(xfs_mount_t *mp, +STATIC void xlog_grant_push_ail(struct log *log, int need_bytes); STATIC void xlog_regrant_reserve_log_space(xlog_t *log, xlog_ticket_t *ticket); @@ -318,7 +318,9 @@ xfs_log_reserve( trace_xfs_log_reserve(log, internal_ticket); - xlog_grant_push_ail(mp, internal_ticket->t_unit_res); + spin_lock(&log->l_grant_lock); + xlog_grant_push_ail(log, internal_ticket->t_unit_res); + spin_unlock(&log->l_grant_lock); retval = xlog_regrant_write_log_space(log, internal_ticket); } else { /* may sleep if need to allocate more tickets */ @@ -332,9 +334,11 @@ xfs_log_reserve( trace_xfs_log_reserve(log, internal_ticket); - xlog_grant_push_ail(mp, + spin_lock(&log->l_grant_lock); + xlog_grant_push_ail(log, (internal_ticket->t_unit_res * internal_ticket->t_cnt)); + spin_unlock(&log->l_grant_lock); retval = xlog_grant_log_space(log, internal_ticket); } @@ -1185,59 +1189,58 @@ xlog_commit_record( * water mark. In this manner, we would be creating a low water mark. */ STATIC void -xlog_grant_push_ail(xfs_mount_t *mp, - int need_bytes) +xlog_grant_push_ail( + struct log *log, + int need_bytes) { - xlog_t *log = mp->m_log; /* pointer to the log */ - xfs_lsn_t tail_lsn; /* lsn of the log tail */ - xfs_lsn_t threshold_lsn = 0; /* lsn we'd like to be at */ - int free_blocks; /* free blocks left to write to */ - int free_bytes; /* free bytes left to write to */ - int threshold_block; /* block in lsn we'd like to be at */ - int threshold_cycle; /* lsn cycle we'd like to be at */ - int free_threshold; - - ASSERT(BTOBB(need_bytes) < log->l_logBBsize); - - spin_lock(&log->l_grant_lock); - free_bytes = xlog_space_left(log, &log->l_grant_reserve_head); - tail_lsn = log->l_tail_lsn; - free_blocks = BTOBBT(free_bytes); - - /* - * Set the threshold for the minimum number of free blocks in the - * log to the maximum of what the caller needs, one quarter of the - * log, and 256 blocks. - */ - free_threshold = BTOBB(need_bytes); - free_threshold = MAX(free_threshold, (log->l_logBBsize >> 2)); - free_threshold = MAX(free_threshold, 256); - if (free_blocks < free_threshold) { + xfs_lsn_t threshold_lsn = 0; + xfs_lsn_t tail_lsn; + int free_blocks; + int free_bytes; + int threshold_block; + int threshold_cycle; + int free_threshold; + + ASSERT(BTOBB(need_bytes) < log->l_logBBsize); + + tail_lsn = log->l_tail_lsn; + free_bytes = xlog_space_left(log, &log->l_grant_reserve_head); + free_blocks = BTOBBT(free_bytes); + + /* + * Set the threshold for the minimum number of free blocks in the + * log to the maximum of what the caller needs, one quarter of the + * log, and 256 blocks. + */ + free_threshold = BTOBB(need_bytes); + free_threshold = MAX(free_threshold, (log->l_logBBsize >> 2)); + free_threshold = MAX(free_threshold, 256); + if (free_blocks >= free_threshold) + return; + threshold_block = BLOCK_LSN(tail_lsn) + free_threshold; threshold_cycle = CYCLE_LSN(tail_lsn); if (threshold_block >= log->l_logBBsize) { - threshold_block -= log->l_logBBsize; - threshold_cycle += 1; + threshold_block -= log->l_logBBsize; + threshold_cycle += 1; } - threshold_lsn = xlog_assign_lsn(threshold_cycle, threshold_block); - - /* Don't pass in an lsn greater than the lsn of the last + threshold_lsn = xlog_assign_lsn(threshold_cycle, + threshold_block); + /* + * Don't pass in an lsn greater than the lsn of the last * log record known to be on disk. */ if (XFS_LSN_CMP(threshold_lsn, log->l_last_sync_lsn) > 0) - threshold_lsn = log->l_last_sync_lsn; - } - spin_unlock(&log->l_grant_lock); - - /* - * Get the transaction layer to kick the dirty buffers out to - * disk asynchronously. No point in trying to do this if - * the filesystem is shutting down. - */ - if (threshold_lsn && - !XLOG_FORCED_SHUTDOWN(log)) - xfs_trans_ail_push(log->l_ailp, threshold_lsn); -} /* xlog_grant_push_ail */ + threshold_lsn = log->l_last_sync_lsn; + + /* + * Get the transaction layer to kick the dirty buffers out to + * disk asynchronously. No point in trying to do this if + * the filesystem is shutting down. + */ + if (!XLOG_FORCED_SHUTDOWN(log)) + xfs_trans_ail_push(log->l_ailp, threshold_lsn); +} /* * The bdstrat callback function for log bufs. This gives us a central @@ -2543,9 +2546,7 @@ redo: trace_xfs_log_grant_sleep2(log, tic); - spin_unlock(&log->l_grant_lock); - xlog_grant_push_ail(log->l_mp, need_bytes); - spin_lock(&log->l_grant_lock); + xlog_grant_push_ail(log, need_bytes); XFS_STATS_INC(xs_sleep_logspace); xlog_wait(&tic->t_wait, &log->l_grant_lock); @@ -2641,9 +2642,7 @@ xlog_regrant_write_log_space(xlog_t *log, trace_xfs_log_regrant_write_sleep1(log, tic); - spin_unlock(&log->l_grant_lock); - xlog_grant_push_ail(log->l_mp, need_bytes); - spin_lock(&log->l_grant_lock); + xlog_grant_push_ail(log, need_bytes); XFS_STATS_INC(xs_sleep_logspace); xlog_wait(&tic->t_wait, &log->l_grant_lock); @@ -2666,9 +2665,7 @@ redo: if (free_bytes < need_bytes) { if (list_empty(&tic->t_queue)) list_add_tail(&tic->t_queue, &log->l_writeq); - spin_unlock(&log->l_grant_lock); - xlog_grant_push_ail(log->l_mp, need_bytes); - spin_lock(&log->l_grant_lock); + xlog_grant_push_ail(log, need_bytes); XFS_STATS_INC(xs_sleep_logspace); trace_xfs_log_regrant_write_sleep2(log, tic); -- cgit v1.2.2 From 84f3c683c4d3f36d3c3ed320babd960a332ac458 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Fri, 3 Dec 2010 22:11:29 +1100 Subject: xfs: convert l_last_sync_lsn to an atomic variable log->l_last_sync_lsn is updated in only one critical spot - log buffer Io completion - and is protected by the grant lock here. This requires the grant lock to be taken for every log buffer IO completion. Converting the l_last_sync_lsn variable to an atomic64_t means that we do not need to take the grant lock in log buffer IO completion to update it. This also removes the need for explicitly holding a spinlock to read the l_last_sync_lsn on 32 bit platforms. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig --- fs/xfs/xfs_log.c | 55 ++++++++++++++++++++++-------------------------- fs/xfs/xfs_log_priv.h | 9 +++++++- fs/xfs/xfs_log_recover.c | 6 +++--- 3 files changed, 36 insertions(+), 34 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 1e2020d5a8b6..70790eb48336 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -675,12 +675,8 @@ xfs_log_move_tail(xfs_mount_t *mp, if (XLOG_FORCED_SHUTDOWN(log)) return; - if (tail_lsn == 0) { - /* needed since sync_lsn is 64 bits */ - spin_lock(&log->l_icloglock); - tail_lsn = log->l_last_sync_lsn; - spin_unlock(&log->l_icloglock); - } + if (tail_lsn == 0) + tail_lsn = atomic64_read(&log->l_last_sync_lsn); spin_lock(&log->l_grant_lock); @@ -800,11 +796,9 @@ xlog_assign_tail_lsn(xfs_mount_t *mp) tail_lsn = xfs_trans_ail_tail(mp->m_ail); spin_lock(&log->l_grant_lock); - if (tail_lsn != 0) { - log->l_tail_lsn = tail_lsn; - } else { - tail_lsn = log->l_tail_lsn = log->l_last_sync_lsn; - } + if (!tail_lsn) + tail_lsn = atomic64_read(&log->l_last_sync_lsn); + log->l_tail_lsn = tail_lsn; spin_unlock(&log->l_grant_lock); return tail_lsn; @@ -1014,9 +1008,9 @@ xlog_alloc_log(xfs_mount_t *mp, log->l_flags |= XLOG_ACTIVE_RECOVERY; log->l_prev_block = -1; - log->l_tail_lsn = xlog_assign_lsn(1, 0); /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */ - log->l_last_sync_lsn = log->l_tail_lsn; + log->l_tail_lsn = xlog_assign_lsn(1, 0); + atomic64_set(&log->l_last_sync_lsn, xlog_assign_lsn(1, 0)); log->l_curr_cycle = 1; /* 0 is bad since this is initial value */ xlog_assign_grant_head(&log->l_grant_reserve_head, 1, 0); xlog_assign_grant_head(&log->l_grant_write_head, 1, 0); @@ -1194,6 +1188,7 @@ xlog_grant_push_ail( int need_bytes) { xfs_lsn_t threshold_lsn = 0; + xfs_lsn_t last_sync_lsn; xfs_lsn_t tail_lsn; int free_blocks; int free_bytes; @@ -1228,10 +1223,12 @@ xlog_grant_push_ail( threshold_block); /* * Don't pass in an lsn greater than the lsn of the last - * log record known to be on disk. + * log record known to be on disk. Use a snapshot of the last sync lsn + * so that it doesn't change between the compare and the set. */ - if (XFS_LSN_CMP(threshold_lsn, log->l_last_sync_lsn) > 0) - threshold_lsn = log->l_last_sync_lsn; + last_sync_lsn = atomic64_read(&log->l_last_sync_lsn); + if (XFS_LSN_CMP(threshold_lsn, last_sync_lsn) > 0) + threshold_lsn = last_sync_lsn; /* * Get the transaction layer to kick the dirty buffers out to @@ -2194,7 +2191,7 @@ xlog_state_do_callback( lowest_lsn = xlog_get_lowest_lsn(log); if (lowest_lsn && XFS_LSN_CMP(lowest_lsn, - be64_to_cpu(iclog->ic_header.h_lsn)) < 0) { + be64_to_cpu(iclog->ic_header.h_lsn)) < 0) { iclog = iclog->ic_next; continue; /* Leave this iclog for * another thread */ @@ -2202,23 +2199,21 @@ xlog_state_do_callback( iclog->ic_state = XLOG_STATE_CALLBACK; - spin_unlock(&log->l_icloglock); - /* l_last_sync_lsn field protected by - * l_grant_lock. Don't worry about iclog's lsn. - * No one else can be here except us. + /* + * update the last_sync_lsn before we drop the + * icloglock to ensure we are the only one that + * can update it. */ - spin_lock(&log->l_grant_lock); - ASSERT(XFS_LSN_CMP(log->l_last_sync_lsn, - be64_to_cpu(iclog->ic_header.h_lsn)) <= 0); - log->l_last_sync_lsn = - be64_to_cpu(iclog->ic_header.h_lsn); - spin_unlock(&log->l_grant_lock); + ASSERT(XFS_LSN_CMP(atomic64_read(&log->l_last_sync_lsn), + be64_to_cpu(iclog->ic_header.h_lsn)) <= 0); + atomic64_set(&log->l_last_sync_lsn, + be64_to_cpu(iclog->ic_header.h_lsn)); - } else { - spin_unlock(&log->l_icloglock); + } else ioerrors++; - } + + spin_unlock(&log->l_icloglock); /* * Keep processing entries in the callback list until diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h index e2bb276eb2a7..958f356df10e 100644 --- a/fs/xfs/xfs_log_priv.h +++ b/fs/xfs/xfs_log_priv.h @@ -507,7 +507,6 @@ typedef struct log { spinlock_t l_icloglock; /* grab to change iclog state */ xfs_lsn_t l_tail_lsn; /* lsn of 1st LR with unflushed * buffers */ - xfs_lsn_t l_last_sync_lsn;/* lsn of last LR on disk */ int l_curr_cycle; /* Cycle number of log writes */ int l_prev_cycle; /* Cycle number before last * block increment */ @@ -521,6 +520,14 @@ typedef struct log { int64_t l_grant_reserve_head; int64_t l_grant_write_head; + /* + * l_last_sync_lsn is an atomic so it can be set and read without + * needing to hold specific locks. To avoid operations contending with + * other hot objects, place it on a separate cacheline. + */ + /* lsn of last LR on disk */ + atomic64_t l_last_sync_lsn ____cacheline_aligned_in_smp; + /* The following field are used for debugging; need to hold icloglock */ #ifdef DEBUG char *l_iclog_bak[XLOG_MAX_ICLOGS]; diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 1550404a8aeb..18e1e18d7147 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -937,7 +937,7 @@ xlog_find_tail( if (found == 2) log->l_curr_cycle++; log->l_tail_lsn = be64_to_cpu(rhead->h_tail_lsn); - log->l_last_sync_lsn = be64_to_cpu(rhead->h_lsn); + atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn)); xlog_assign_grant_head(&log->l_grant_reserve_head, log->l_curr_cycle, BBTOB(log->l_curr_block)); xlog_assign_grant_head(&log->l_grant_write_head, log->l_curr_cycle, @@ -989,9 +989,9 @@ xlog_find_tail( log->l_tail_lsn = xlog_assign_lsn(log->l_curr_cycle, after_umount_blk); - log->l_last_sync_lsn = + atomic64_set(&log->l_last_sync_lsn, xlog_assign_lsn(log->l_curr_cycle, - after_umount_blk); + after_umount_blk)); *tail_blk = after_umount_blk; /* -- cgit v1.2.2 From 1c3cb9ec07fabf0c0970adc46fd2a1f09c1186dd Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Tue, 21 Dec 2010 12:28:39 +1100 Subject: xfs: convert l_tail_lsn to an atomic variable. log->l_tail_lsn is currently protected by the log grant lock. The lock is only needed for serialising readers against writers, so we don't really need the lock if we make the l_tail_lsn variable an atomic. Converting the l_tail_lsn variable to an atomic64_t means we can start to peel back the grant lock from various operations. Also, provide functions to safely crack an atomic LSN variable into it's component pieces and to recombined the components into an atomic variable. Use them where appropriate. This also removes the need for explicitly holding a spinlock to read the l_tail_lsn on 32 bit platforms. Signed-off-by: Dave Chinner --- fs/xfs/linux-2.6/xfs_trace.h | 2 +- fs/xfs/xfs_log.c | 56 ++++++++++++++++++++------------------------ fs/xfs/xfs_log_priv.h | 37 ++++++++++++++++++++++++----- fs/xfs/xfs_log_recover.c | 14 +++++------ 4 files changed, 63 insertions(+), 46 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_trace.h b/fs/xfs/linux-2.6/xfs_trace.h index 3ff6b35f9207..b180e1bf8257 100644 --- a/fs/xfs/linux-2.6/xfs_trace.h +++ b/fs/xfs/linux-2.6/xfs_trace.h @@ -794,7 +794,7 @@ DECLARE_EVENT_CLASS(xfs_loggrant_class, &__entry->grant_write_bytes); __entry->curr_cycle = log->l_curr_cycle; __entry->curr_block = log->l_curr_block; - __entry->tail_lsn = log->l_tail_lsn; + __entry->tail_lsn = atomic64_read(&log->l_tail_lsn); ), TP_printk("dev %d:%d type %s t_ocnt %u t_cnt %u t_curr_res %u " "t_unit_res %u t_flags %s reserveq %s " diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 70790eb48336..d118bf804480 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -678,15 +678,11 @@ xfs_log_move_tail(xfs_mount_t *mp, if (tail_lsn == 0) tail_lsn = atomic64_read(&log->l_last_sync_lsn); - spin_lock(&log->l_grant_lock); - - /* Also an invalid lsn. 1 implies that we aren't passing in a valid - * tail_lsn. - */ - if (tail_lsn != 1) { - log->l_tail_lsn = tail_lsn; - } + /* tail_lsn == 1 implies that we weren't passed a valid value. */ + if (tail_lsn != 1) + atomic64_set(&log->l_tail_lsn, tail_lsn); + spin_lock(&log->l_grant_lock); if (!list_empty(&log->l_writeq)) { #ifdef DEBUG if (log->l_flags & XLOG_ACTIVE_RECOVERY) @@ -789,21 +785,19 @@ xfs_log_need_covered(xfs_mount_t *mp) * We may be holding the log iclog lock upon entering this routine. */ xfs_lsn_t -xlog_assign_tail_lsn(xfs_mount_t *mp) +xlog_assign_tail_lsn( + struct xfs_mount *mp) { - xfs_lsn_t tail_lsn; - xlog_t *log = mp->m_log; + xfs_lsn_t tail_lsn; + struct log *log = mp->m_log; tail_lsn = xfs_trans_ail_tail(mp->m_ail); - spin_lock(&log->l_grant_lock); if (!tail_lsn) tail_lsn = atomic64_read(&log->l_last_sync_lsn); - log->l_tail_lsn = tail_lsn; - spin_unlock(&log->l_grant_lock); + atomic64_set(&log->l_tail_lsn, tail_lsn); return tail_lsn; -} /* xlog_assign_tail_lsn */ - +} /* * Return the space in the log between the tail and the head. The head @@ -831,8 +825,8 @@ xlog_space_left( int head_bytes; xlog_crack_grant_head(head, &head_cycle, &head_bytes); - tail_bytes = BBTOB(BLOCK_LSN(log->l_tail_lsn)); - tail_cycle = CYCLE_LSN(log->l_tail_lsn); + xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_bytes); + tail_bytes = BBTOB(tail_bytes); if (tail_cycle == head_cycle && head_bytes >= tail_bytes) free_bytes = log->l_logsize - (head_bytes - tail_bytes); else if (tail_cycle + 1 < head_cycle) @@ -1009,8 +1003,8 @@ xlog_alloc_log(xfs_mount_t *mp, log->l_prev_block = -1; /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */ - log->l_tail_lsn = xlog_assign_lsn(1, 0); - atomic64_set(&log->l_last_sync_lsn, xlog_assign_lsn(1, 0)); + xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0); + xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0); log->l_curr_cycle = 1; /* 0 is bad since this is initial value */ xlog_assign_grant_head(&log->l_grant_reserve_head, 1, 0); xlog_assign_grant_head(&log->l_grant_write_head, 1, 0); @@ -1189,7 +1183,6 @@ xlog_grant_push_ail( { xfs_lsn_t threshold_lsn = 0; xfs_lsn_t last_sync_lsn; - xfs_lsn_t tail_lsn; int free_blocks; int free_bytes; int threshold_block; @@ -1198,7 +1191,6 @@ xlog_grant_push_ail( ASSERT(BTOBB(need_bytes) < log->l_logBBsize); - tail_lsn = log->l_tail_lsn; free_bytes = xlog_space_left(log, &log->l_grant_reserve_head); free_blocks = BTOBBT(free_bytes); @@ -1213,8 +1205,9 @@ xlog_grant_push_ail( if (free_blocks >= free_threshold) return; - threshold_block = BLOCK_LSN(tail_lsn) + free_threshold; - threshold_cycle = CYCLE_LSN(tail_lsn); + xlog_crack_atomic_lsn(&log->l_tail_lsn, &threshold_cycle, + &threshold_block); + threshold_block += free_threshold; if (threshold_block >= log->l_logBBsize) { threshold_block -= log->l_logBBsize; threshold_cycle += 1; @@ -2828,11 +2821,11 @@ xlog_state_release_iclog( if (iclog->ic_state == XLOG_STATE_WANT_SYNC) { /* update tail before writing to iclog */ - xlog_assign_tail_lsn(log->l_mp); + xfs_lsn_t tail_lsn = xlog_assign_tail_lsn(log->l_mp); sync++; iclog->ic_state = XLOG_STATE_SYNCING; - iclog->ic_header.h_tail_lsn = cpu_to_be64(log->l_tail_lsn); - xlog_verify_tail_lsn(log, iclog, log->l_tail_lsn); + iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn); + xlog_verify_tail_lsn(log, iclog, tail_lsn); /* cycle incremented when incrementing curr_block */ } spin_unlock(&log->l_icloglock); @@ -3435,7 +3428,7 @@ STATIC void xlog_verify_grant_tail( struct log *log) { - xfs_lsn_t tail_lsn = log->l_tail_lsn; + int tail_cycle, tail_blocks; int cycle, space; /* @@ -3445,9 +3438,10 @@ xlog_verify_grant_tail( * check the byte count. */ xlog_crack_grant_head(&log->l_grant_write_head, &cycle, &space); - if (CYCLE_LSN(tail_lsn) != cycle) { - ASSERT(cycle - 1 == CYCLE_LSN(tail_lsn)); - ASSERT(space <= BBTOB(BLOCK_LSN(tail_lsn))); + xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks); + if (tail_cycle != cycle) { + ASSERT(cycle - 1 == tail_cycle); + ASSERT(space <= BBTOB(tail_blocks)); } } diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h index 958f356df10e..d34af1c21ed2 100644 --- a/fs/xfs/xfs_log_priv.h +++ b/fs/xfs/xfs_log_priv.h @@ -53,7 +53,6 @@ struct xfs_mount; BTOBB(XLOG_MAX_ICLOGS << (xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? \ XLOG_MAX_RECORD_BSHIFT : XLOG_BIG_RECORD_BSHIFT)) - static inline xfs_lsn_t xlog_assign_lsn(uint cycle, uint block) { return ((xfs_lsn_t)cycle << 32) | block; @@ -505,8 +504,6 @@ typedef struct log { * log entries" */ xlog_in_core_t *l_iclog; /* head log queue */ spinlock_t l_icloglock; /* grab to change iclog state */ - xfs_lsn_t l_tail_lsn; /* lsn of 1st LR with unflushed - * buffers */ int l_curr_cycle; /* Cycle number of log writes */ int l_prev_cycle; /* Cycle number before last * block increment */ @@ -521,12 +518,15 @@ typedef struct log { int64_t l_grant_write_head; /* - * l_last_sync_lsn is an atomic so it can be set and read without - * needing to hold specific locks. To avoid operations contending with - * other hot objects, place it on a separate cacheline. + * l_last_sync_lsn and l_tail_lsn are atomics so they can be set and + * read without needing to hold specific locks. To avoid operations + * contending with other hot objects, place each of them on a separate + * cacheline. */ /* lsn of last LR on disk */ atomic64_t l_last_sync_lsn ____cacheline_aligned_in_smp; + /* lsn of 1st LR with unflushed * buffers */ + atomic64_t l_tail_lsn ____cacheline_aligned_in_smp; /* The following field are used for debugging; need to hold icloglock */ #ifdef DEBUG @@ -565,6 +565,31 @@ int xlog_write(struct log *log, struct xfs_log_vec *log_vector, struct xlog_ticket *tic, xfs_lsn_t *start_lsn, xlog_in_core_t **commit_iclog, uint flags); +/* + * When we crack an atomic LSN, we sample it first so that the value will not + * change while we are cracking it into the component values. This means we + * will always get consistent component values to work from. This should always + * be used to smaple and crack LSNs taht are stored and updated in atomic + * variables. + */ +static inline void +xlog_crack_atomic_lsn(atomic64_t *lsn, uint *cycle, uint *block) +{ + xfs_lsn_t val = atomic64_read(lsn); + + *cycle = CYCLE_LSN(val); + *block = BLOCK_LSN(val); +} + +/* + * Calculate and assign a value to an atomic LSN variable from component pieces. + */ +static inline void +xlog_assign_atomic_lsn(atomic64_t *lsn, uint cycle, uint block) +{ + atomic64_set(lsn, xlog_assign_lsn(cycle, block)); +} + /* * When we crack the grrant head, we sample it first so that the value will not * change while we are cracking it into the component values. This means we diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 18e1e18d7147..204d8e5fa7fa 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -936,7 +936,7 @@ xlog_find_tail( log->l_curr_cycle = be32_to_cpu(rhead->h_cycle); if (found == 2) log->l_curr_cycle++; - log->l_tail_lsn = be64_to_cpu(rhead->h_tail_lsn); + atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn)); atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn)); xlog_assign_grant_head(&log->l_grant_reserve_head, log->l_curr_cycle, BBTOB(log->l_curr_block)); @@ -971,7 +971,7 @@ xlog_find_tail( } after_umount_blk = (i + hblks + (int) BTOBB(be32_to_cpu(rhead->h_len))) % log->l_logBBsize; - tail_lsn = log->l_tail_lsn; + tail_lsn = atomic64_read(&log->l_tail_lsn); if (*head_blk == after_umount_blk && be32_to_cpu(rhead->h_num_logops) == 1) { umount_data_blk = (i + hblks) % log->l_logBBsize; @@ -986,12 +986,10 @@ xlog_find_tail( * log records will point recovery to after the * current unmount record. */ - log->l_tail_lsn = - xlog_assign_lsn(log->l_curr_cycle, - after_umount_blk); - atomic64_set(&log->l_last_sync_lsn, - xlog_assign_lsn(log->l_curr_cycle, - after_umount_blk)); + xlog_assign_atomic_lsn(&log->l_tail_lsn, + log->l_curr_cycle, after_umount_blk); + xlog_assign_atomic_lsn(&log->l_last_sync_lsn, + log->l_curr_cycle, after_umount_blk); *tail_blk = after_umount_blk; /* -- cgit v1.2.2 From c8a09ff8ca2235bccdaea8a52fbd5349646a8ba4 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Sat, 4 Dec 2010 00:02:40 +1100 Subject: xfs: convert log grant heads to atomic variables Convert the log grant heads to atomic64_t types in preparation for converting the accounting algorithms to atomic operations. his patch just converts the variables; the algorithmic changes are in a separate patch for clarity. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig --- fs/xfs/xfs_log.c | 8 ++++---- fs/xfs/xfs_log_priv.h | 12 ++++++------ 2 files changed, 10 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index d118bf804480..a1d7d12fc51f 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -47,7 +47,7 @@ STATIC xlog_t * xlog_alloc_log(xfs_mount_t *mp, xfs_buftarg_t *log_target, xfs_daddr_t blk_offset, int num_bblks); -STATIC int xlog_space_left(struct log *log, int64_t *head); +STATIC int xlog_space_left(struct log *log, atomic64_t *head); STATIC int xlog_sync(xlog_t *log, xlog_in_core_t *iclog); STATIC void xlog_dealloc_log(xlog_t *log); @@ -100,7 +100,7 @@ STATIC int xlog_iclogs_empty(xlog_t *log); static void xlog_grant_sub_space( struct log *log, - int64_t *head, + atomic64_t *head, int bytes) { int cycle, space; @@ -119,7 +119,7 @@ xlog_grant_sub_space( static void xlog_grant_add_space( struct log *log, - int64_t *head, + atomic64_t *head, int bytes) { int tmp; @@ -816,7 +816,7 @@ xlog_assign_tail_lsn( STATIC int xlog_space_left( struct log *log, - int64_t *head) + atomic64_t *head) { int free_bytes; int tail_bytes; diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h index d34af1c21ed2..7619d6a02388 100644 --- a/fs/xfs/xfs_log_priv.h +++ b/fs/xfs/xfs_log_priv.h @@ -514,8 +514,8 @@ typedef struct log { spinlock_t l_grant_lock ____cacheline_aligned_in_smp; struct list_head l_reserveq; struct list_head l_writeq; - int64_t l_grant_reserve_head; - int64_t l_grant_write_head; + atomic64_t l_grant_reserve_head; + atomic64_t l_grant_write_head; /* * l_last_sync_lsn and l_tail_lsn are atomics so they can be set and @@ -596,18 +596,18 @@ xlog_assign_atomic_lsn(atomic64_t *lsn, uint cycle, uint block) * will always get consistent component values to work from. */ static inline void -xlog_crack_grant_head(int64_t *head, int *cycle, int *space) +xlog_crack_grant_head(atomic64_t *head, int *cycle, int *space) { - int64_t val = *head; + int64_t val = atomic64_read(head); *cycle = val >> 32; *space = val & 0xffffffff; } static inline void -xlog_assign_grant_head(int64_t *head, int cycle, int space) +xlog_assign_grant_head(atomic64_t *head, int cycle, int space) { - *head = ((int64_t)cycle << 32) | space; + atomic64_set(head, ((int64_t)cycle << 32) | space); } /* -- cgit v1.2.2 From 7ddbead6e6d3c730570a215ab9a6b1d126c54d34 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Thu, 4 Nov 2010 20:08:02 -0700 Subject: jffs2: use vzalloc Signed-off-by: Joe Perches Signed-off-by: Artem Bityutskiy Signed-off-by: David Woodhouse --- fs/jffs2/build.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/jffs2/build.c b/fs/jffs2/build.c index 85c6be2db02f..3005ec4520ad 100644 --- a/fs/jffs2/build.c +++ b/fs/jffs2/build.c @@ -336,14 +336,13 @@ int jffs2_do_mount_fs(struct jffs2_sb_info *c) size = sizeof(struct jffs2_eraseblock) * c->nr_blocks; #ifndef __ECOS if (jffs2_blocks_use_vmalloc(c)) - c->blocks = vmalloc(size); + c->blocks = vzalloc(size); else #endif - c->blocks = kmalloc(size, GFP_KERNEL); + c->blocks = kzalloc(size, GFP_KERNEL); if (!c->blocks) return -ENOMEM; - memset(c->blocks, 0, size); for (i=0; inr_blocks; i++) { INIT_LIST_HEAD(&c->blocks[i].list); c->blocks[i].offset = i * c->sector_size; -- cgit v1.2.2 From f326966b3df47f4fa7e90425f60efdd30c31fe19 Mon Sep 17 00:00:00 2001 From: Vasiliy Kulikov Date: Sun, 14 Nov 2010 23:08:39 +0300 Subject: jffs2: fix error value sign do_verify_xattr_datum(), do_load_xattr_datum(), load_xattr_datum() and verify_xattr_ref() should return negative value on error. Sometimes they return EIO that is positive. Change this to -EIO. Signed-off-by: Vasiliy Kulikov Signed-off-by: Artem Bityutskiy Signed-off-by: David Woodhouse --- fs/jffs2/xattr.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c index 9b572ca40a49..4f9cc0482949 100644 --- a/fs/jffs2/xattr.c +++ b/fs/jffs2/xattr.c @@ -151,7 +151,7 @@ static int do_verify_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_dat JFFS2_ERROR("node CRC failed at %#08x, read=%#08x, calc=%#08x\n", offset, je32_to_cpu(rx.hdr_crc), crc); xd->flags |= JFFS2_XFLAGS_INVALID; - return EIO; + return -EIO; } totlen = PAD(sizeof(rx) + rx.name_len + 1 + je16_to_cpu(rx.value_len)); if (je16_to_cpu(rx.magic) != JFFS2_MAGIC_BITMASK @@ -167,7 +167,7 @@ static int do_verify_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_dat je32_to_cpu(rx.xid), xd->xid, je32_to_cpu(rx.version), xd->version); xd->flags |= JFFS2_XFLAGS_INVALID; - return EIO; + return -EIO; } xd->xprefix = rx.xprefix; xd->name_len = rx.name_len; @@ -230,7 +230,7 @@ static int do_load_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum ref_offset(xd->node), xd->data_crc, crc); kfree(data); xd->flags |= JFFS2_XFLAGS_INVALID; - return EIO; + return -EIO; } xd->flags |= JFFS2_XFLAGS_HOT; @@ -268,7 +268,7 @@ static int load_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *x if (xd->xname) return 0; if (xd->flags & JFFS2_XFLAGS_INVALID) - return EIO; + return -EIO; if (unlikely(is_xattr_datum_unchecked(c, xd))) rc = do_verify_xattr_datum(c, xd); if (!rc) @@ -460,7 +460,7 @@ static int verify_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref if (crc != je32_to_cpu(rr.node_crc)) { JFFS2_ERROR("node CRC failed at %#08x, read=%#08x, calc=%#08x\n", offset, je32_to_cpu(rr.node_crc), crc); - return EIO; + return -EIO; } if (je16_to_cpu(rr.magic) != JFFS2_MAGIC_BITMASK || je16_to_cpu(rr.nodetype) != JFFS2_NODETYPE_XREF @@ -470,7 +470,7 @@ static int verify_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref offset, je16_to_cpu(rr.magic), JFFS2_MAGIC_BITMASK, je16_to_cpu(rr.nodetype), JFFS2_NODETYPE_XREF, je32_to_cpu(rr.totlen), PAD(sizeof(rr))); - return EIO; + return -EIO; } ref->ino = je32_to_cpu(rr.ino); ref->xid = je32_to_cpu(rr.xid); -- cgit v1.2.2 From 027d9ac2c8de9f70b7319e08dee121b8b85c8d88 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 15 Nov 2010 21:20:05 +0300 Subject: jffs2: typo in comment It says FB instead of FS (file system). Signed-off-by: Dan Carpenter Signed-off-by: Artem Bityutskiy Signed-off-by: David Woodhouse --- fs/jffs2/jffs2_fs_sb.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/jffs2/jffs2_fs_sb.h b/fs/jffs2/jffs2_fs_sb.h index f864005de64c..0bc6a6c80a56 100644 --- a/fs/jffs2/jffs2_fs_sb.h +++ b/fs/jffs2/jffs2_fs_sb.h @@ -144,4 +144,4 @@ struct jffs2_sb_info { void *os_priv; }; -#endif /* _JFFS2_FB_SB */ +#endif /* _JFFS2_FS_SB */ -- cgit v1.2.2 From 8ac835056ca39b242d98332f46e4d65428a8b7db Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Tue, 7 Dec 2010 20:16:56 +0100 Subject: fuse: ioctl cleanup Get rid of unnecessary page_address()-es. Signed-off-by: Miklos Szeredi CC: Tejun Heo --- fs/fuse/file.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 8b984a2cebbd..ca3b6bbb3790 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -1740,7 +1740,7 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, struct fuse_ioctl_out outarg; struct fuse_req *req = NULL; struct page **pages = NULL; - struct page *iov_page = NULL; + struct iovec *iov_page = NULL; struct iovec *in_iov = NULL, *out_iov = NULL; unsigned int in_iovs = 0, out_iovs = 0, num_pages = 0, max_pages; size_t in_size, out_size, transferred; @@ -1751,7 +1751,7 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, err = -ENOMEM; pages = kzalloc(sizeof(pages[0]) * FUSE_MAX_PAGES_PER_REQ, GFP_KERNEL); - iov_page = alloc_page(GFP_KERNEL); + iov_page = (struct iovec *) __get_free_page(GFP_KERNEL); if (!pages || !iov_page) goto out; @@ -1760,7 +1760,7 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, * RETRY from server is not allowed. */ if (!(flags & FUSE_IOCTL_UNRESTRICTED)) { - struct iovec *iov = page_address(iov_page); + struct iovec *iov = iov_page; iov->iov_base = (void __user *)arg; iov->iov_len = _IOC_SIZE(cmd); @@ -1841,7 +1841,7 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, /* did it ask for retry? */ if (outarg.flags & FUSE_IOCTL_RETRY) { - char *vaddr; + void *vaddr; /* no retry if in restricted mode */ err = -EIO; @@ -1862,14 +1862,14 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, goto out; vaddr = kmap_atomic(pages[0], KM_USER0); - err = fuse_copy_ioctl_iovec(page_address(iov_page), vaddr, + err = fuse_copy_ioctl_iovec(iov_page, vaddr, transferred, in_iovs + out_iovs, (flags & FUSE_IOCTL_COMPAT) != 0); kunmap_atomic(vaddr, KM_USER0); if (err) goto out; - in_iov = page_address(iov_page); + in_iov = iov_page; out_iov = in_iov + in_iovs; err = fuse_verify_ioctl_iov(in_iov, in_iovs); @@ -1891,8 +1891,7 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, out: if (req) fuse_put_request(fc, req); - if (iov_page) - __free_page(iov_page); + free_page((unsigned long) iov_page); while (num_pages) __free_page(pages[--num_pages]); kfree(pages); -- cgit v1.2.2 From 07e77dca8a1f17a724a9b7449f0ca02e70e9d057 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Tue, 7 Dec 2010 20:16:56 +0100 Subject: fuse: separate queue for FORGET requests Terje Malmedal reports that a fuse filesystem with 32 million inodes on a machine with lots of memory can go unresponsive for up to 30 minutes when all those inodes are evicted from the icache. The reason is that FORGET messages, sent when the inode is evicted, are queued up together with regular filesystem requests, and while the huge queue of FORGET messages are processed no other filesystem operation can proceed. Since a full fuse request structure is allocated for each inode, these take up quite a bit of memory as well. To solve these issues, create a slim 'fuse_forget_link' structure containing just the minimum of information required to send the FORGET request and chain these on a separate queue. When userspace is asking for a request make sure that FORGET and non-FORGET requests are selected fairly: for each 8 non-FORGET allow 16 FORGET requests. This will make sure FORGETs do not pile up, yet other requests are also allowed to proceed while the queued FORGETs are processed. Reported-by: Terje Malmedal Signed-off-by: Miklos Szeredi --- fs/fuse/dev.c | 86 ++++++++++++++++++++++++++++++++++++++++++++++++++------ fs/fuse/dir.c | 53 +++++++++++++++++----------------- fs/fuse/fuse_i.h | 28 ++++++++++++------ fs/fuse/inode.c | 30 ++++++++------------ 4 files changed, 133 insertions(+), 64 deletions(-) (limited to 'fs') diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 6e07696308dc..fed65303eeeb 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -251,6 +251,20 @@ static void queue_request(struct fuse_conn *fc, struct fuse_req *req) kill_fasync(&fc->fasync, SIGIO, POLL_IN); } +void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget, + u64 nodeid, u64 nlookup) +{ + forget->nodeid = nodeid; + forget->nlookup = nlookup; + + spin_lock(&fc->lock); + fc->forget_list_tail->next = forget; + fc->forget_list_tail = forget; + wake_up(&fc->waitq); + kill_fasync(&fc->fasync, SIGIO, POLL_IN); + spin_unlock(&fc->lock); +} + static void flush_bg_queue(struct fuse_conn *fc) { while (fc->active_background < fc->max_background && @@ -438,12 +452,6 @@ static void fuse_request_send_nowait(struct fuse_conn *fc, struct fuse_req *req) } } -void fuse_request_send_noreply(struct fuse_conn *fc, struct fuse_req *req) -{ - req->isreply = 0; - fuse_request_send_nowait(fc, req); -} - void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req) { req->isreply = 1; @@ -896,9 +904,15 @@ static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs, return err; } +static int forget_pending(struct fuse_conn *fc) +{ + return fc->forget_list_head.next != NULL; +} + static int request_pending(struct fuse_conn *fc) { - return !list_empty(&fc->pending) || !list_empty(&fc->interrupts); + return !list_empty(&fc->pending) || !list_empty(&fc->interrupts) || + forget_pending(fc); } /* Wait until a request is available on the pending list */ @@ -960,6 +974,50 @@ __releases(fc->lock) return err ? err : reqsize; } +static struct fuse_forget_link *dequeue_forget(struct fuse_conn *fc) +{ + struct fuse_forget_link *forget = fc->forget_list_head.next; + + fc->forget_list_head.next = forget->next; + if (fc->forget_list_head.next == NULL) + fc->forget_list_tail = &fc->forget_list_head; + + return forget; +} + +static int fuse_read_single_forget(struct fuse_conn *fc, + struct fuse_copy_state *cs, + size_t nbytes) +__releases(fc->lock) +{ + int err; + struct fuse_forget_link *forget = dequeue_forget(fc); + struct fuse_forget_in arg = { + .nlookup = forget->nlookup, + }; + struct fuse_in_header ih = { + .opcode = FUSE_FORGET, + .nodeid = forget->nodeid, + .unique = fuse_get_unique(fc), + .len = sizeof(ih) + sizeof(arg), + }; + + spin_unlock(&fc->lock); + kfree(forget); + if (nbytes < ih.len) + return -EINVAL; + + err = fuse_copy_one(cs, &ih, sizeof(ih)); + if (!err) + err = fuse_copy_one(cs, &arg, sizeof(arg)); + fuse_copy_finish(cs); + + if (err) + return err; + + return ih.len; +} + /* * Read a single request into the userspace filesystem's buffer. This * function waits until a request is available, then removes it from @@ -998,6 +1056,14 @@ static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file, return fuse_read_interrupt(fc, cs, nbytes, req); } + if (forget_pending(fc)) { + if (list_empty(&fc->pending) || fc->forget_batch-- > 0) + return fuse_read_single_forget(fc, cs, nbytes); + + if (fc->forget_batch <= -8) + fc->forget_batch = 16; + } + req = list_entry(fc->pending.next, struct fuse_req, list); req->state = FUSE_REQ_READING; list_move(&req->list, &fc->io); @@ -1090,7 +1156,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos, if (!fc) return -EPERM; - bufs = kmalloc(pipe->buffers * sizeof (struct pipe_buffer), GFP_KERNEL); + bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL); if (!bufs) return -ENOMEM; @@ -1626,7 +1692,7 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe, if (!fc) return -EPERM; - bufs = kmalloc(pipe->buffers * sizeof (struct pipe_buffer), GFP_KERNEL); + bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL); if (!bufs) return -ENOMEM; @@ -1770,6 +1836,8 @@ __acquires(fc->lock) flush_bg_queue(fc); end_requests(fc, &fc->pending); end_requests(fc, &fc->processing); + while (forget_pending(fc)) + kfree(dequeue_forget(fc)); } /* diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index c9627c95482d..6ea42e98cb17 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -10,9 +10,9 @@ #include #include -#include #include #include +#include #if BITS_PER_LONG >= 64 static inline void fuse_dentry_settime(struct dentry *entry, u64 time) @@ -165,7 +165,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd) struct fuse_entry_out outarg; struct fuse_conn *fc; struct fuse_req *req; - struct fuse_req *forget_req; + struct fuse_forget_link *forget; struct dentry *parent; u64 attr_version; @@ -178,8 +178,8 @@ static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd) if (IS_ERR(req)) return 0; - forget_req = fuse_get_req(fc); - if (IS_ERR(forget_req)) { + forget = fuse_alloc_forget(); + if (!forget) { fuse_put_request(fc, req); return 0; } @@ -199,15 +199,14 @@ static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd) if (!err) { struct fuse_inode *fi = get_fuse_inode(inode); if (outarg.nodeid != get_node_id(inode)) { - fuse_send_forget(fc, forget_req, - outarg.nodeid, 1); + fuse_queue_forget(fc, forget, outarg.nodeid, 1); return 0; } spin_lock(&fc->lock); fi->nlookup++; spin_unlock(&fc->lock); } - fuse_put_request(fc, forget_req); + kfree(forget); if (err || (outarg.attr.mode ^ inode->i_mode) & S_IFMT) return 0; @@ -259,7 +258,7 @@ int fuse_lookup_name(struct super_block *sb, u64 nodeid, struct qstr *name, { struct fuse_conn *fc = get_fuse_conn_super(sb); struct fuse_req *req; - struct fuse_req *forget_req; + struct fuse_forget_link *forget; u64 attr_version; int err; @@ -273,9 +272,9 @@ int fuse_lookup_name(struct super_block *sb, u64 nodeid, struct qstr *name, if (IS_ERR(req)) goto out; - forget_req = fuse_get_req(fc); - err = PTR_ERR(forget_req); - if (IS_ERR(forget_req)) { + forget = fuse_alloc_forget(); + err = -ENOMEM; + if (!forget) { fuse_put_request(fc, req); goto out; } @@ -301,13 +300,13 @@ int fuse_lookup_name(struct super_block *sb, u64 nodeid, struct qstr *name, attr_version); err = -ENOMEM; if (!*inode) { - fuse_send_forget(fc, forget_req, outarg->nodeid, 1); + fuse_queue_forget(fc, forget, outarg->nodeid, 1); goto out; } err = 0; out_put_forget: - fuse_put_request(fc, forget_req); + kfree(forget); out: return err; } @@ -374,7 +373,7 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode, struct inode *inode; struct fuse_conn *fc = get_fuse_conn(dir); struct fuse_req *req; - struct fuse_req *forget_req; + struct fuse_forget_link *forget; struct fuse_create_in inarg; struct fuse_open_out outopen; struct fuse_entry_out outentry; @@ -388,9 +387,9 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode, if (flags & O_DIRECT) return -EINVAL; - forget_req = fuse_get_req(fc); - if (IS_ERR(forget_req)) - return PTR_ERR(forget_req); + forget = fuse_alloc_forget(); + if (!forget) + return -ENOMEM; req = fuse_get_req(fc); err = PTR_ERR(req); @@ -448,10 +447,10 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode, if (!inode) { flags &= ~(O_CREAT | O_EXCL | O_TRUNC); fuse_sync_release(ff, flags); - fuse_send_forget(fc, forget_req, outentry.nodeid, 1); + fuse_queue_forget(fc, forget, outentry.nodeid, 1); return -ENOMEM; } - fuse_put_request(fc, forget_req); + kfree(forget); d_instantiate(entry, inode); fuse_change_entry_timeout(entry, &outentry); fuse_invalidate_attr(dir); @@ -469,7 +468,7 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode, out_put_request: fuse_put_request(fc, req); out_put_forget_req: - fuse_put_request(fc, forget_req); + kfree(forget); return err; } @@ -483,12 +482,12 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_req *req, struct fuse_entry_out outarg; struct inode *inode; int err; - struct fuse_req *forget_req; + struct fuse_forget_link *forget; - forget_req = fuse_get_req(fc); - if (IS_ERR(forget_req)) { + forget = fuse_alloc_forget(); + if (!forget) { fuse_put_request(fc, req); - return PTR_ERR(forget_req); + return -ENOMEM; } memset(&outarg, 0, sizeof(outarg)); @@ -515,10 +514,10 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_req *req, inode = fuse_iget(dir->i_sb, outarg.nodeid, outarg.generation, &outarg.attr, entry_attr_timeout(&outarg), 0); if (!inode) { - fuse_send_forget(fc, forget_req, outarg.nodeid, 1); + fuse_queue_forget(fc, forget, outarg.nodeid, 1); return -ENOMEM; } - fuse_put_request(fc, forget_req); + kfree(forget); if (S_ISDIR(inode->i_mode)) { struct dentry *alias; @@ -541,7 +540,7 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_req *req, return 0; out_put_forget_req: - fuse_put_request(fc, forget_req); + kfree(forget); return err; } diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 57d4a3a0f102..33369c63a522 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -53,6 +53,13 @@ extern struct mutex fuse_mutex; extern unsigned max_user_bgreq; extern unsigned max_user_congthresh; +/* One forget request */ +struct fuse_forget_link { + u64 nodeid; + u64 nlookup; + struct fuse_forget_link *next; +}; + /** FUSE inode */ struct fuse_inode { /** Inode data */ @@ -66,7 +73,7 @@ struct fuse_inode { u64 nlookup; /** The request used for sending the FORGET message */ - struct fuse_req *forget_req; + struct fuse_forget_link *forget; /** Time in jiffies until the file attributes are valid */ u64 i_time; @@ -255,7 +262,6 @@ struct fuse_req { /** Data for asynchronous requests */ union { - struct fuse_forget_in forget_in; struct { struct fuse_release_in in; struct path path; @@ -369,6 +375,13 @@ struct fuse_conn { /** Pending interrupts */ struct list_head interrupts; + /** Queue of pending forgets */ + struct fuse_forget_link forget_list_head; + struct fuse_forget_link *forget_list_tail; + + /** Batching of FORGET requests (positive indicates FORGET batch) */ + int forget_batch; + /** Flag indicating if connection is blocked. This will be the case before the INIT reply is received, and if there are too many outstading backgrounds requests */ @@ -543,8 +556,10 @@ int fuse_lookup_name(struct super_block *sb, u64 nodeid, struct qstr *name, /** * Send FORGET command */ -void fuse_send_forget(struct fuse_conn *fc, struct fuse_req *req, - u64 nodeid, u64 nlookup); +void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget, + u64 nodeid, u64 nlookup); + +struct fuse_forget_link *fuse_alloc_forget(void); /** * Initialize READ or READDIR request @@ -655,11 +670,6 @@ void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req); */ void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req); -/** - * Send a request with no reply - */ -void fuse_request_send_noreply(struct fuse_conn *fc, struct fuse_req *req); - /** * Send a request in the background */ diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index cfce3ad86a92..7ba4d351da65 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -71,6 +71,11 @@ struct fuse_mount_data { unsigned blksize; }; +struct fuse_forget_link *fuse_alloc_forget() +{ + return kzalloc(sizeof(struct fuse_forget_link), GFP_KERNEL); +} + static struct inode *fuse_alloc_inode(struct super_block *sb) { struct inode *inode; @@ -90,8 +95,8 @@ static struct inode *fuse_alloc_inode(struct super_block *sb) INIT_LIST_HEAD(&fi->queued_writes); INIT_LIST_HEAD(&fi->writepages); init_waitqueue_head(&fi->page_waitq); - fi->forget_req = fuse_request_alloc(); - if (!fi->forget_req) { + fi->forget = fuse_alloc_forget(); + if (!fi->forget) { kmem_cache_free(fuse_inode_cachep, inode); return NULL; } @@ -104,24 +109,10 @@ static void fuse_destroy_inode(struct inode *inode) struct fuse_inode *fi = get_fuse_inode(inode); BUG_ON(!list_empty(&fi->write_files)); BUG_ON(!list_empty(&fi->queued_writes)); - if (fi->forget_req) - fuse_request_free(fi->forget_req); + kfree(fi->forget); kmem_cache_free(fuse_inode_cachep, inode); } -void fuse_send_forget(struct fuse_conn *fc, struct fuse_req *req, - u64 nodeid, u64 nlookup) -{ - struct fuse_forget_in *inarg = &req->misc.forget_in; - inarg->nlookup = nlookup; - req->in.h.opcode = FUSE_FORGET; - req->in.h.nodeid = nodeid; - req->in.numargs = 1; - req->in.args[0].size = sizeof(struct fuse_forget_in); - req->in.args[0].value = inarg; - fuse_request_send_noreply(fc, req); -} - static void fuse_evict_inode(struct inode *inode) { truncate_inode_pages(&inode->i_data, 0); @@ -129,8 +120,8 @@ static void fuse_evict_inode(struct inode *inode) if (inode->i_sb->s_flags & MS_ACTIVE) { struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_inode *fi = get_fuse_inode(inode); - fuse_send_forget(fc, fi->forget_req, fi->nodeid, fi->nlookup); - fi->forget_req = NULL; + fuse_queue_forget(fc, fi->forget, fi->nodeid, fi->nlookup); + fi->forget = NULL; } } @@ -534,6 +525,7 @@ void fuse_conn_init(struct fuse_conn *fc) INIT_LIST_HEAD(&fc->interrupts); INIT_LIST_HEAD(&fc->bg_queue); INIT_LIST_HEAD(&fc->entry); + fc->forget_list_tail = &fc->forget_list_head; atomic_set(&fc->num_waiting, 0); fc->max_background = FUSE_DEFAULT_MAX_BACKGROUND; fc->congestion_threshold = FUSE_DEFAULT_CONGESTION_THRESHOLD; -- cgit v1.2.2 From 02c048b919455aaa38628563cdcc2e691c8a9f53 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Tue, 7 Dec 2010 20:16:56 +0100 Subject: fuse: allow batching of FORGET requests Terje Malmedal reports that a fuse filesystem with 32 million inodes on a machine with lots of memory can take up to 30 minutes to process FORGET requests when all those inodes are evicted from the icache. To solve this, create a BATCH_FORGET request that allows up to about 8000 FORGET requests to be sent in a single message. This request is only sent if userspace supports interface version 7.16 or later, otherwise fall back to sending individual FORGET messages. Reported-by: Terje Malmedal Signed-off-by: Miklos Szeredi --- fs/fuse/dev.c | 92 +++++++++++++++++++++++++++++++++++++++++++++++++------- fs/fuse/fuse_i.h | 3 +- 2 files changed, 82 insertions(+), 13 deletions(-) (limited to 'fs') diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index fed65303eeeb..cf8d28d1fbad 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -254,8 +254,8 @@ static void queue_request(struct fuse_conn *fc, struct fuse_req *req) void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget, u64 nodeid, u64 nlookup) { - forget->nodeid = nodeid; - forget->nlookup = nlookup; + forget->forget_one.nodeid = nodeid; + forget->forget_one.nlookup = nlookup; spin_lock(&fc->lock); fc->forget_list_tail->next = forget; @@ -974,15 +974,26 @@ __releases(fc->lock) return err ? err : reqsize; } -static struct fuse_forget_link *dequeue_forget(struct fuse_conn *fc) +static struct fuse_forget_link *dequeue_forget(struct fuse_conn *fc, + unsigned max, + unsigned *countp) { - struct fuse_forget_link *forget = fc->forget_list_head.next; + struct fuse_forget_link *head = fc->forget_list_head.next; + struct fuse_forget_link **newhead = &head; + unsigned count; - fc->forget_list_head.next = forget->next; + for (count = 0; *newhead != NULL && count < max; count++) + newhead = &(*newhead)->next; + + fc->forget_list_head.next = *newhead; + *newhead = NULL; if (fc->forget_list_head.next == NULL) fc->forget_list_tail = &fc->forget_list_head; - return forget; + if (countp != NULL) + *countp = count; + + return head; } static int fuse_read_single_forget(struct fuse_conn *fc, @@ -991,13 +1002,13 @@ static int fuse_read_single_forget(struct fuse_conn *fc, __releases(fc->lock) { int err; - struct fuse_forget_link *forget = dequeue_forget(fc); + struct fuse_forget_link *forget = dequeue_forget(fc, 1, NULL); struct fuse_forget_in arg = { - .nlookup = forget->nlookup, + .nlookup = forget->forget_one.nlookup, }; struct fuse_in_header ih = { .opcode = FUSE_FORGET, - .nodeid = forget->nodeid, + .nodeid = forget->forget_one.nodeid, .unique = fuse_get_unique(fc), .len = sizeof(ih) + sizeof(arg), }; @@ -1018,6 +1029,65 @@ __releases(fc->lock) return ih.len; } +static int fuse_read_batch_forget(struct fuse_conn *fc, + struct fuse_copy_state *cs, size_t nbytes) +__releases(fc->lock) +{ + int err; + unsigned max_forgets; + unsigned count; + struct fuse_forget_link *head; + struct fuse_batch_forget_in arg = { .count = 0 }; + struct fuse_in_header ih = { + .opcode = FUSE_BATCH_FORGET, + .unique = fuse_get_unique(fc), + .len = sizeof(ih) + sizeof(arg), + }; + + if (nbytes < ih.len) { + spin_unlock(&fc->lock); + return -EINVAL; + } + + max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one); + head = dequeue_forget(fc, max_forgets, &count); + spin_unlock(&fc->lock); + + arg.count = count; + ih.len += count * sizeof(struct fuse_forget_one); + err = fuse_copy_one(cs, &ih, sizeof(ih)); + if (!err) + err = fuse_copy_one(cs, &arg, sizeof(arg)); + + while (head) { + struct fuse_forget_link *forget = head; + + if (!err) { + err = fuse_copy_one(cs, &forget->forget_one, + sizeof(forget->forget_one)); + } + head = forget->next; + kfree(forget); + } + + fuse_copy_finish(cs); + + if (err) + return err; + + return ih.len; +} + +static int fuse_read_forget(struct fuse_conn *fc, struct fuse_copy_state *cs, + size_t nbytes) +__releases(fc->lock) +{ + if (fc->minor < 16 || fc->forget_list_head.next->next == NULL) + return fuse_read_single_forget(fc, cs, nbytes); + else + return fuse_read_batch_forget(fc, cs, nbytes); +} + /* * Read a single request into the userspace filesystem's buffer. This * function waits until a request is available, then removes it from @@ -1058,7 +1128,7 @@ static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file, if (forget_pending(fc)) { if (list_empty(&fc->pending) || fc->forget_batch-- > 0) - return fuse_read_single_forget(fc, cs, nbytes); + return fuse_read_forget(fc, cs, nbytes); if (fc->forget_batch <= -8) fc->forget_batch = 16; @@ -1837,7 +1907,7 @@ __acquires(fc->lock) end_requests(fc, &fc->pending); end_requests(fc, &fc->processing); while (forget_pending(fc)) - kfree(dequeue_forget(fc)); + kfree(dequeue_forget(fc, 1, NULL)); } /* diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 33369c63a522..ae5744a2f9e9 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -55,8 +55,7 @@ extern unsigned max_user_congthresh; /* One forget request */ struct fuse_forget_link { - u64 nodeid; - u64 nlookup; + struct fuse_forget_one forget_one; struct fuse_forget_link *next; }; -- cgit v1.2.2 From 1baa26b2be92fe9917e2f7ef46d423b5dfa4da71 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Tue, 7 Dec 2010 20:16:56 +0100 Subject: fuse: fix ioctl ABI In kernel ABI version 7.16 and later FUSE_IOCTL_RETRY reply from a unrestricted IOCTL request shall return with an array of 'struct fuse_ioctl_iovec' instead of 'struct iovec'. This fixes the ABI ambiguity of 32bit vs. 64bit. Reported-by: "ccmail111" Signed-off-by: Miklos Szeredi CC: Tejun Heo --- fs/fuse/file.c | 53 ++++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 48 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/fuse/file.c b/fs/fuse/file.c index ca3b6bbb3790..95da1bc1c826 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -1634,9 +1634,9 @@ static int fuse_ioctl_copy_user(struct page **pages, struct iovec *iov, * and 64bit. Fortunately we can determine which structure the server * used from the size of the reply. */ -static int fuse_copy_ioctl_iovec(struct iovec *dst, void *src, - size_t transferred, unsigned count, - bool is_compat) +static int fuse_copy_ioctl_iovec_old(struct iovec *dst, void *src, + size_t transferred, unsigned count, + bool is_compat) { #ifdef CONFIG_COMPAT if (count * sizeof(struct compat_iovec) == transferred) { @@ -1680,6 +1680,42 @@ static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count) return 0; } +static int fuse_copy_ioctl_iovec(struct fuse_conn *fc, struct iovec *dst, + void *src, size_t transferred, unsigned count, + bool is_compat) +{ + unsigned i; + struct fuse_ioctl_iovec *fiov = src; + + if (fc->minor < 16) { + return fuse_copy_ioctl_iovec_old(dst, src, transferred, + count, is_compat); + } + + if (count * sizeof(struct fuse_ioctl_iovec) != transferred) + return -EIO; + + for (i = 0; i < count; i++) { + /* Did the server supply an inappropriate value? */ + if (fiov[i].base != (unsigned long) fiov[i].base || + fiov[i].len != (unsigned long) fiov[i].len) + return -EIO; + + dst[i].iov_base = (void __user *) (unsigned long) fiov[i].base; + dst[i].iov_len = (size_t) fiov[i].len; + +#ifdef CONFIG_COMPAT + if (is_compat && + (ptr_to_compat(dst[i].iov_base) != fiov[i].base || + (compat_size_t) dst[i].iov_len != fiov[i].len)) + return -EIO; +#endif + } + + return 0; +} + + /* * For ioctls, there is no generic way to determine how much memory * needs to be read and/or written. Furthermore, ioctls are allowed @@ -1746,8 +1782,15 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, size_t in_size, out_size, transferred; int err; +#if BITS_PER_LONG == 32 + inarg.flags |= FUSE_IOCTL_32BIT; +#else + if (flags & FUSE_IOCTL_COMPAT) + inarg.flags |= FUSE_IOCTL_32BIT; +#endif + /* assume all the iovs returned by client always fits in a page */ - BUILD_BUG_ON(sizeof(struct iovec) * FUSE_IOCTL_MAX_IOV > PAGE_SIZE); + BUILD_BUG_ON(sizeof(struct fuse_ioctl_iovec) * FUSE_IOCTL_MAX_IOV > PAGE_SIZE); err = -ENOMEM; pages = kzalloc(sizeof(pages[0]) * FUSE_MAX_PAGES_PER_REQ, GFP_KERNEL); @@ -1862,7 +1905,7 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, goto out; vaddr = kmap_atomic(pages[0], KM_USER0); - err = fuse_copy_ioctl_iovec(iov_page, vaddr, + err = fuse_copy_ioctl_iovec(fc, iov_page, vaddr, transferred, in_iovs + out_iovs, (flags & FUSE_IOCTL_COMPAT) != 0); kunmap_atomic(vaddr, KM_USER0); -- cgit v1.2.2 From 747fecab32e47180e2668c2b22b25752d371e636 Mon Sep 17 00:00:00 2001 From: Jesper Juhl Date: Thu, 9 Dec 2010 23:25:09 +0100 Subject: coda: kill redundant cast in coda_alloc_inode() kmem_cache_alloc() returns a void pointer which there is no need to cast. Signed-off-by: Jesper Juhl Signed-off-by: Jiri Kosina --- fs/coda/inode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/coda/inode.c b/fs/coda/inode.c index 5ea57c8c7f97..1a49c1708a54 100644 --- a/fs/coda/inode.c +++ b/fs/coda/inode.c @@ -45,7 +45,7 @@ static struct kmem_cache * coda_inode_cachep; static struct inode *coda_alloc_inode(struct super_block *sb) { struct coda_inode_info *ei; - ei = (struct coda_inode_info *)kmem_cache_alloc(coda_inode_cachep, GFP_KERNEL); + ei = kmem_cache_alloc(coda_inode_cachep, GFP_KERNEL); if (!ei) return NULL; memset(&ei->c_fid, 0, sizeof(struct CodaFid)); -- cgit v1.2.2 From a34f0b31398020e2d3be653eb695bd17a9cf3b55 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Fri, 10 Dec 2010 14:55:42 +0100 Subject: fix comment typos concerning "consistent" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Uwe Kleine-König Signed-off-by: Jiri Kosina --- fs/jbd/transaction.c | 2 +- fs/jbd2/transaction.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c index 846a3f314111..5b2e4c30a2a1 100644 --- a/fs/jbd/transaction.c +++ b/fs/jbd/transaction.c @@ -207,7 +207,7 @@ repeat_locked: * the committing transaction. Really, we only need to give it * committing_transaction->t_outstanding_credits plus "enough" for * the log control blocks. - * Also, this test is inconsitent with the matching one in + * Also, this test is inconsistent with the matching one in * journal_extend(). */ if (__log_space_left(journal) < jbd_space_needed(journal)) { diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 6bf0a242613e..c7934900dcdd 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -251,7 +251,7 @@ repeat: * the committing transaction. Really, we only need to give it * committing_transaction->t_outstanding_credits plus "enough" for * the log control blocks. - * Also, this test is inconsitent with the matching one in + * Also, this test is inconsistent with the matching one in * jbd2_journal_extend(). */ if (__jbd2_log_space_left(journal) < jbd_space_needed(journal)) { -- cgit v1.2.2 From c0d8768af260e2cbb4bf659ae6094a262c86b085 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Fri, 10 Dec 2010 12:11:50 +0900 Subject: anon_inodes: fix wrong function name in comment Signed-off-by: Namhyung Kim Signed-off-by: Jiri Kosina --- fs/anon_inodes.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c index 57ce55b2564c..73097336ea2a 100644 --- a/fs/anon_inodes.c +++ b/fs/anon_inodes.c @@ -64,9 +64,9 @@ static const struct address_space_operations anon_aops = { }; /** - * anon_inode_getfd - creates a new file instance by hooking it up to an - * anonymous inode, and a dentry that describe the "class" - * of the file + * anon_inode_getfile - creates a new file instance by hooking it up to an + * anonymous inode, and a dentry that describe the "class" + * of the file * * @name: [in] name of the "class" of the new file * @fops: [in] file operations for the new file -- cgit v1.2.2 From b9d41052794385f9d47ebb7acf4a772f3ad02398 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Mon, 13 Dec 2010 13:42:24 -0600 Subject: dlm: sanitize work_start() in lowcomms.c The create_workqueue() returns NULL if failed rather than ERR_PTR(). Fix error checking and remove unnecessary variable 'error'. Signed-off-by: Namhyung Kim Cc: Tejun Heo Signed-off-by: David Teigland --- fs/dlm/lowcomms.c | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c index 0e75f152eac2..9c64ae9e4c1a 100644 --- a/fs/dlm/lowcomms.c +++ b/fs/dlm/lowcomms.c @@ -1468,22 +1468,19 @@ static void work_stop(void) static int work_start(void) { - int error; recv_workqueue = alloc_workqueue("dlm_recv", WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_FREEZEABLE, 0); - error = IS_ERR(recv_workqueue); - if (error) { - log_print("can't start dlm_recv %d", error); - return error; + if (!recv_workqueue) { + log_print("can't start dlm_recv"); + return -ENOMEM; } send_workqueue = alloc_workqueue("dlm_send", WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_FREEZEABLE, 0); - error = IS_ERR(send_workqueue); - if (error) { - log_print("can't start dlm_send %d", error); + if (!send_workqueue) { + log_print("can't start dlm_send"); destroy_workqueue(recv_workqueue); - return error; + return -ENOMEM; } return 0; -- cgit v1.2.2 From fd8c37eccdda21153298997417144b38b1623196 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Wed, 15 Dec 2010 20:26:48 -0500 Subject: ext4: Simplify the usage of clear_opt() and set_opt() macros Change clear_opt() and set_opt() to take a superblock pointer instead of a pointer to EXT4_SB(sb)->s_mount_opt. This makes it easier for us to support a second mount option field. Signed-off-by: "Theodore Ts'o" --- fs/ext4/ext4.h | 6 +- fs/ext4/mballoc.c | 2 +- fs/ext4/super.c | 162 +++++++++++++++++++++++++++--------------------------- 3 files changed, 86 insertions(+), 84 deletions(-) (limited to 'fs') diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 94ce3d7a1c4b..2d93620d092e 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -917,8 +917,10 @@ struct ext4_inode_info { #define EXT4_MOUNT_DISCARD 0x40000000 /* Issue DISCARD requests */ #define EXT4_MOUNT_INIT_INODE_TABLE 0x80000000 /* Initialize uninitialized itables */ -#define clear_opt(o, opt) o &= ~EXT4_MOUNT_##opt -#define set_opt(o, opt) o |= EXT4_MOUNT_##opt +#define clear_opt(sb, opt) EXT4_SB(sb)->s_mount_opt &= \ + ~EXT4_MOUNT_##opt +#define set_opt(sb, opt) EXT4_SB(sb)->s_mount_opt |= \ + EXT4_MOUNT_##opt #define test_opt(sb, opt) (EXT4_SB(sb)->s_mount_opt & \ EXT4_MOUNT_##opt) diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 5b4d4e3a4d58..731b6f738a03 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -2617,7 +2617,7 @@ static inline int ext4_issue_discard(struct super_block *sb, ret = sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0); if (ret == -EOPNOTSUPP) { ext4_warning(sb, "discard not supported, disabling"); - clear_opt(EXT4_SB(sb)->s_mount_opt, DISCARD); + clear_opt(sb, DISCARD); } return ret; } diff --git a/fs/ext4/super.c b/fs/ext4/super.c index fb15c9c0be74..cf7d9131d785 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1386,7 +1386,7 @@ static int set_qf_name(struct super_block *sb, int qtype, substring_t *args) sbi->s_qf_names[qtype] = NULL; return 0; } - set_opt(sbi->s_mount_opt, QUOTA); + set_opt(sb, QUOTA); return 1; } @@ -1441,21 +1441,21 @@ static int parse_options(char *options, struct super_block *sb, switch (token) { case Opt_bsd_df: ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38"); - clear_opt(sbi->s_mount_opt, MINIX_DF); + clear_opt(sb, MINIX_DF); break; case Opt_minix_df: ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38"); - set_opt(sbi->s_mount_opt, MINIX_DF); + set_opt(sb, MINIX_DF); break; case Opt_grpid: ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38"); - set_opt(sbi->s_mount_opt, GRPID); + set_opt(sb, GRPID); break; case Opt_nogrpid: ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38"); - clear_opt(sbi->s_mount_opt, GRPID); + clear_opt(sb, GRPID); break; case Opt_resuid: @@ -1473,38 +1473,38 @@ static int parse_options(char *options, struct super_block *sb, /* *sb_block = match_int(&args[0]); */ break; case Opt_err_panic: - clear_opt(sbi->s_mount_opt, ERRORS_CONT); - clear_opt(sbi->s_mount_opt, ERRORS_RO); - set_opt(sbi->s_mount_opt, ERRORS_PANIC); + clear_opt(sb, ERRORS_CONT); + clear_opt(sb, ERRORS_RO); + set_opt(sb, ERRORS_PANIC); break; case Opt_err_ro: - clear_opt(sbi->s_mount_opt, ERRORS_CONT); - clear_opt(sbi->s_mount_opt, ERRORS_PANIC); - set_opt(sbi->s_mount_opt, ERRORS_RO); + clear_opt(sb, ERRORS_CONT); + clear_opt(sb, ERRORS_PANIC); + set_opt(sb, ERRORS_RO); break; case Opt_err_cont: - clear_opt(sbi->s_mount_opt, ERRORS_RO); - clear_opt(sbi->s_mount_opt, ERRORS_PANIC); - set_opt(sbi->s_mount_opt, ERRORS_CONT); + clear_opt(sb, ERRORS_RO); + clear_opt(sb, ERRORS_PANIC); + set_opt(sb, ERRORS_CONT); break; case Opt_nouid32: - set_opt(sbi->s_mount_opt, NO_UID32); + set_opt(sb, NO_UID32); break; case Opt_debug: - set_opt(sbi->s_mount_opt, DEBUG); + set_opt(sb, DEBUG); break; case Opt_oldalloc: - set_opt(sbi->s_mount_opt, OLDALLOC); + set_opt(sb, OLDALLOC); break; case Opt_orlov: - clear_opt(sbi->s_mount_opt, OLDALLOC); + clear_opt(sb, OLDALLOC); break; #ifdef CONFIG_EXT4_FS_XATTR case Opt_user_xattr: - set_opt(sbi->s_mount_opt, XATTR_USER); + set_opt(sb, XATTR_USER); break; case Opt_nouser_xattr: - clear_opt(sbi->s_mount_opt, XATTR_USER); + clear_opt(sb, XATTR_USER); break; #else case Opt_user_xattr: @@ -1514,10 +1514,10 @@ static int parse_options(char *options, struct super_block *sb, #endif #ifdef CONFIG_EXT4_FS_POSIX_ACL case Opt_acl: - set_opt(sbi->s_mount_opt, POSIX_ACL); + set_opt(sb, POSIX_ACL); break; case Opt_noacl: - clear_opt(sbi->s_mount_opt, POSIX_ACL); + clear_opt(sb, POSIX_ACL); break; #else case Opt_acl: @@ -1536,7 +1536,7 @@ static int parse_options(char *options, struct super_block *sb, "Cannot specify journal on remount"); return 0; } - set_opt(sbi->s_mount_opt, UPDATE_JOURNAL); + set_opt(sb, UPDATE_JOURNAL); break; case Opt_journal_dev: if (is_remount) { @@ -1549,14 +1549,14 @@ static int parse_options(char *options, struct super_block *sb, *journal_devnum = option; break; case Opt_journal_checksum: - set_opt(sbi->s_mount_opt, JOURNAL_CHECKSUM); + set_opt(sb, JOURNAL_CHECKSUM); break; case Opt_journal_async_commit: - set_opt(sbi->s_mount_opt, JOURNAL_ASYNC_COMMIT); - set_opt(sbi->s_mount_opt, JOURNAL_CHECKSUM); + set_opt(sb, JOURNAL_ASYNC_COMMIT); + set_opt(sb, JOURNAL_CHECKSUM); break; case Opt_noload: - set_opt(sbi->s_mount_opt, NOLOAD); + set_opt(sb, NOLOAD); break; case Opt_commit: if (match_int(&args[0], &option)) @@ -1599,15 +1599,15 @@ static int parse_options(char *options, struct super_block *sb, return 0; } } else { - clear_opt(sbi->s_mount_opt, DATA_FLAGS); + clear_opt(sb, DATA_FLAGS); sbi->s_mount_opt |= data_opt; } break; case Opt_data_err_abort: - set_opt(sbi->s_mount_opt, DATA_ERR_ABORT); + set_opt(sb, DATA_ERR_ABORT); break; case Opt_data_err_ignore: - clear_opt(sbi->s_mount_opt, DATA_ERR_ABORT); + clear_opt(sb, DATA_ERR_ABORT); break; #ifdef CONFIG_QUOTA case Opt_usrjquota: @@ -1647,12 +1647,12 @@ set_qf_format: break; case Opt_quota: case Opt_usrquota: - set_opt(sbi->s_mount_opt, QUOTA); - set_opt(sbi->s_mount_opt, USRQUOTA); + set_opt(sb, QUOTA); + set_opt(sb, USRQUOTA); break; case Opt_grpquota: - set_opt(sbi->s_mount_opt, QUOTA); - set_opt(sbi->s_mount_opt, GRPQUOTA); + set_opt(sb, QUOTA); + set_opt(sb, GRPQUOTA); break; case Opt_noquota: if (sb_any_quota_loaded(sb)) { @@ -1660,9 +1660,9 @@ set_qf_format: "options when quota turned on"); return 0; } - clear_opt(sbi->s_mount_opt, QUOTA); - clear_opt(sbi->s_mount_opt, USRQUOTA); - clear_opt(sbi->s_mount_opt, GRPQUOTA); + clear_opt(sb, QUOTA); + clear_opt(sb, USRQUOTA); + clear_opt(sb, GRPQUOTA); break; #else case Opt_quota: @@ -1688,7 +1688,7 @@ set_qf_format: sbi->s_mount_flags |= EXT4_MF_FS_ABORTED; break; case Opt_nobarrier: - clear_opt(sbi->s_mount_opt, BARRIER); + clear_opt(sb, BARRIER); break; case Opt_barrier: if (args[0].from) { @@ -1697,9 +1697,9 @@ set_qf_format: } else option = 1; /* No argument, default to 1 */ if (option) - set_opt(sbi->s_mount_opt, BARRIER); + set_opt(sb, BARRIER); else - clear_opt(sbi->s_mount_opt, BARRIER); + clear_opt(sb, BARRIER); break; case Opt_ignore: break; @@ -1723,17 +1723,17 @@ set_qf_format: "Ignoring deprecated bh option"); break; case Opt_i_version: - set_opt(sbi->s_mount_opt, I_VERSION); + set_opt(sb, I_VERSION); sb->s_flags |= MS_I_VERSION; break; case Opt_nodelalloc: - clear_opt(sbi->s_mount_opt, DELALLOC); + clear_opt(sb, DELALLOC); break; case Opt_mblk_io_submit: - set_opt(sbi->s_mount_opt, MBLK_IO_SUBMIT); + set_opt(sb, MBLK_IO_SUBMIT); break; case Opt_nomblk_io_submit: - clear_opt(sbi->s_mount_opt, MBLK_IO_SUBMIT); + clear_opt(sb, MBLK_IO_SUBMIT); break; case Opt_stripe: if (match_int(&args[0], &option)) @@ -1743,13 +1743,13 @@ set_qf_format: sbi->s_stripe = option; break; case Opt_delalloc: - set_opt(sbi->s_mount_opt, DELALLOC); + set_opt(sb, DELALLOC); break; case Opt_block_validity: - set_opt(sbi->s_mount_opt, BLOCK_VALIDITY); + set_opt(sb, BLOCK_VALIDITY); break; case Opt_noblock_validity: - clear_opt(sbi->s_mount_opt, BLOCK_VALIDITY); + clear_opt(sb, BLOCK_VALIDITY); break; case Opt_inode_readahead_blks: if (match_int(&args[0], &option)) @@ -1773,7 +1773,7 @@ set_qf_format: option); break; case Opt_noauto_da_alloc: - set_opt(sbi->s_mount_opt,NO_AUTO_DA_ALLOC); + set_opt(sb, NO_AUTO_DA_ALLOC); break; case Opt_auto_da_alloc: if (args[0].from) { @@ -1782,24 +1782,24 @@ set_qf_format: } else option = 1; /* No argument, default to 1 */ if (option) - clear_opt(sbi->s_mount_opt, NO_AUTO_DA_ALLOC); + clear_opt(sb, NO_AUTO_DA_ALLOC); else - set_opt(sbi->s_mount_opt,NO_AUTO_DA_ALLOC); + set_opt(sb,NO_AUTO_DA_ALLOC); break; case Opt_discard: - set_opt(sbi->s_mount_opt, DISCARD); + set_opt(sb, DISCARD); break; case Opt_nodiscard: - clear_opt(sbi->s_mount_opt, DISCARD); + clear_opt(sb, DISCARD); break; case Opt_dioread_nolock: - set_opt(sbi->s_mount_opt, DIOREAD_NOLOCK); + set_opt(sb, DIOREAD_NOLOCK); break; case Opt_dioread_lock: - clear_opt(sbi->s_mount_opt, DIOREAD_NOLOCK); + clear_opt(sb, DIOREAD_NOLOCK); break; case Opt_init_inode_table: - set_opt(sbi->s_mount_opt, INIT_INODE_TABLE); + set_opt(sb, INIT_INODE_TABLE); if (args[0].from) { if (match_int(&args[0], &option)) return 0; @@ -1810,7 +1810,7 @@ set_qf_format: sbi->s_li_wait_mult = option; break; case Opt_noinit_inode_table: - clear_opt(sbi->s_mount_opt, INIT_INODE_TABLE); + clear_opt(sb, INIT_INODE_TABLE); break; default: ext4_msg(sb, KERN_ERR, @@ -1822,10 +1822,10 @@ set_qf_format: #ifdef CONFIG_QUOTA if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) { if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA]) - clear_opt(sbi->s_mount_opt, USRQUOTA); + clear_opt(sb, USRQUOTA); if (test_opt(sb, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA]) - clear_opt(sbi->s_mount_opt, GRPQUOTA); + clear_opt(sb, GRPQUOTA); if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) { ext4_msg(sb, KERN_ERR, "old and new quota " @@ -3071,41 +3071,41 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) /* Set defaults before we parse the mount options */ def_mount_opts = le32_to_cpu(es->s_default_mount_opts); - set_opt(sbi->s_mount_opt, INIT_INODE_TABLE); + set_opt(sb, INIT_INODE_TABLE); if (def_mount_opts & EXT4_DEFM_DEBUG) - set_opt(sbi->s_mount_opt, DEBUG); + set_opt(sb, DEBUG); if (def_mount_opts & EXT4_DEFM_BSDGROUPS) { ext4_msg(sb, KERN_WARNING, deprecated_msg, "bsdgroups", "2.6.38"); - set_opt(sbi->s_mount_opt, GRPID); + set_opt(sb, GRPID); } if (def_mount_opts & EXT4_DEFM_UID16) - set_opt(sbi->s_mount_opt, NO_UID32); + set_opt(sb, NO_UID32); #ifdef CONFIG_EXT4_FS_XATTR if (def_mount_opts & EXT4_DEFM_XATTR_USER) - set_opt(sbi->s_mount_opt, XATTR_USER); + set_opt(sb, XATTR_USER); #endif #ifdef CONFIG_EXT4_FS_POSIX_ACL if (def_mount_opts & EXT4_DEFM_ACL) - set_opt(sbi->s_mount_opt, POSIX_ACL); + set_opt(sb, POSIX_ACL); #endif if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA) - set_opt(sbi->s_mount_opt, JOURNAL_DATA); + set_opt(sb, JOURNAL_DATA); else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED) - set_opt(sbi->s_mount_opt, ORDERED_DATA); + set_opt(sb, ORDERED_DATA); else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK) - set_opt(sbi->s_mount_opt, WRITEBACK_DATA); + set_opt(sb, WRITEBACK_DATA); if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_PANIC) - set_opt(sbi->s_mount_opt, ERRORS_PANIC); + set_opt(sb, ERRORS_PANIC); else if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_CONTINUE) - set_opt(sbi->s_mount_opt, ERRORS_CONT); + set_opt(sb, ERRORS_CONT); else - set_opt(sbi->s_mount_opt, ERRORS_RO); + set_opt(sb, ERRORS_RO); if (def_mount_opts & EXT4_DEFM_BLOCK_VALIDITY) - set_opt(sbi->s_mount_opt, BLOCK_VALIDITY); + set_opt(sb, BLOCK_VALIDITY); if (def_mount_opts & EXT4_DEFM_DISCARD) - set_opt(sbi->s_mount_opt, DISCARD); + set_opt(sb, DISCARD); sbi->s_resuid = le16_to_cpu(es->s_def_resuid); sbi->s_resgid = le16_to_cpu(es->s_def_resgid); @@ -3114,7 +3114,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME; if ((def_mount_opts & EXT4_DEFM_NOBARRIER) == 0) - set_opt(sbi->s_mount_opt, BARRIER); + set_opt(sb, BARRIER); /* * enable delayed allocation by default @@ -3122,7 +3122,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) */ if (!IS_EXT3_SB(sb) && ((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0)) - set_opt(sbi->s_mount_opt, DELALLOC); + set_opt(sb, DELALLOC); if (!parse_options((char *) sbi->s_es->s_mount_opts, sb, &journal_devnum, &journal_ioprio, NULL, 0)) { @@ -3425,8 +3425,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) "suppressed and not mounted read-only"); goto failed_mount_wq; } else { - clear_opt(sbi->s_mount_opt, DATA_FLAGS); - set_opt(sbi->s_mount_opt, WRITEBACK_DATA); + clear_opt(sb, DATA_FLAGS); + set_opt(sb, WRITEBACK_DATA); sbi->s_journal = NULL; needs_recovery = 0; goto no_journal; @@ -3464,9 +3464,9 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) */ if (jbd2_journal_check_available_features (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) - set_opt(sbi->s_mount_opt, ORDERED_DATA); + set_opt(sb, ORDERED_DATA); else - set_opt(sbi->s_mount_opt, JOURNAL_DATA); + set_opt(sb, JOURNAL_DATA); break; case EXT4_MOUNT_ORDERED_DATA: @@ -3556,18 +3556,18 @@ no_journal: (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)) { ext4_msg(sb, KERN_WARNING, "Ignoring delalloc option - " "requested data journaling mode"); - clear_opt(sbi->s_mount_opt, DELALLOC); + clear_opt(sb, DELALLOC); } if (test_opt(sb, DIOREAD_NOLOCK)) { if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) { ext4_msg(sb, KERN_WARNING, "Ignoring dioread_nolock " "option - requested data journaling mode"); - clear_opt(sbi->s_mount_opt, DIOREAD_NOLOCK); + clear_opt(sb, DIOREAD_NOLOCK); } if (sb->s_blocksize < PAGE_SIZE) { ext4_msg(sb, KERN_WARNING, "Ignoring dioread_nolock " "option - block size is too small"); - clear_opt(sbi->s_mount_opt, DIOREAD_NOLOCK); + clear_opt(sb, DIOREAD_NOLOCK); } } -- cgit v1.2.2 From 673c610033a8202c037ecd068c7a235495acda17 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Wed, 15 Dec 2010 20:28:48 -0500 Subject: ext4: Move struct ext4_mount_options from ext4.h to super.c Move the ext4_mount_options structure definition from ext4.h, since it is only used in super.c. Signed-off-by: "Theodore Ts'o" --- fs/ext4/ext4.h | 16 ---------------- fs/ext4/super.c | 15 +++++++++++++++ 2 files changed, 15 insertions(+), 16 deletions(-) (limited to 'fs') diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 2d93620d092e..ddae3c435138 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -561,22 +561,6 @@ struct ext4_new_group_data { #define EXT4_IOC32_SETVERSION_OLD FS_IOC32_SETVERSION #endif - -/* - * Mount options - */ -struct ext4_mount_options { - unsigned long s_mount_opt; - uid_t s_resuid; - gid_t s_resgid; - unsigned long s_commit_interval; - u32 s_min_batch_time, s_max_batch_time; -#ifdef CONFIG_QUOTA - int s_jquota_fmt; - char *s_qf_names[MAXQUOTAS]; -#endif -}; - /* Max physical block we can addres w/o extents */ #define EXT4_MAX_BLOCK_FILE_PHYS 0xFFFFFFFF diff --git a/fs/ext4/super.c b/fs/ext4/super.c index cf7d9131d785..7aa3a790363a 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -4166,6 +4166,21 @@ static int ext4_unfreeze(struct super_block *sb) return 0; } +/* + * Structure to save mount options for ext4_remount's benefit + */ +struct ext4_mount_options { + unsigned long s_mount_opt; + uid_t s_resuid; + gid_t s_resgid; + unsigned long s_commit_interval; + u32 s_min_batch_time, s_max_batch_time; +#ifdef CONFIG_QUOTA + int s_jquota_fmt; + char *s_qf_names[MAXQUOTAS]; +#endif +}; + static int ext4_remount(struct super_block *sb, int *flags, char *data) { struct ext4_super_block *es; -- cgit v1.2.2 From a2595b8aa67011419dae26b47e474f46df902989 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Wed, 15 Dec 2010 20:30:48 -0500 Subject: ext4: Add second mount options field since the s_mount_opt is full up Signed-off-by: "Theodore Ts'o" --- fs/ext4/ext4.h | 8 ++++++++ fs/ext4/super.c | 7 +++++-- 2 files changed, 13 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index ddae3c435138..17baecbf8cda 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -908,6 +908,13 @@ struct ext4_inode_info { #define test_opt(sb, opt) (EXT4_SB(sb)->s_mount_opt & \ EXT4_MOUNT_##opt) +#define clear_opt2(sb, opt) EXT4_SB(sb)->s_mount_opt2 &= \ + ~EXT4_MOUNT2_##opt +#define set_opt2(sb, opt) EXT4_SB(sb)->s_mount_opt2 |= \ + EXT4_MOUNT2_##opt +#define test_opt2(sb, opt) (EXT4_SB(sb)->s_mount_opt2 & \ + EXT4_MOUNT2_##opt) + #define ext4_set_bit ext2_set_bit #define ext4_set_bit_atomic ext2_set_bit_atomic #define ext4_clear_bit ext2_clear_bit @@ -1073,6 +1080,7 @@ struct ext4_sb_info { struct ext4_super_block *s_es; /* Pointer to the super block in the buffer */ struct buffer_head **s_group_desc; unsigned int s_mount_opt; + unsigned int s_mount_opt2; unsigned int s_mount_flags; ext4_fsblk_t s_sb_block; uid_t s_resuid; diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 7aa3a790363a..072ff973ff2b 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1895,12 +1895,12 @@ static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es, ext4_commit_super(sb, 1); if (test_opt(sb, DEBUG)) printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%u, " - "bpg=%lu, ipg=%lu, mo=%04x]\n", + "bpg=%lu, ipg=%lu, mo=%04x, mo2=%04x]\n", sb->s_blocksize, sbi->s_groups_count, EXT4_BLOCKS_PER_GROUP(sb), EXT4_INODES_PER_GROUP(sb), - sbi->s_mount_opt); + sbi->s_mount_opt, sbi->s_mount_opt2); return res; } @@ -4171,6 +4171,7 @@ static int ext4_unfreeze(struct super_block *sb) */ struct ext4_mount_options { unsigned long s_mount_opt; + unsigned long s_mount_opt2; uid_t s_resuid; gid_t s_resgid; unsigned long s_commit_interval; @@ -4201,6 +4202,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) lock_super(sb); old_sb_flags = sb->s_flags; old_opts.s_mount_opt = sbi->s_mount_opt; + old_opts.s_mount_opt2 = sbi->s_mount_opt2; old_opts.s_resuid = sbi->s_resuid; old_opts.s_resgid = sbi->s_resgid; old_opts.s_commit_interval = sbi->s_commit_interval; @@ -4354,6 +4356,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) restore_opts: sb->s_flags = old_sb_flags; sbi->s_mount_opt = old_opts.s_mount_opt; + sbi->s_mount_opt2 = old_opts.s_mount_opt2; sbi->s_resuid = old_opts.s_resuid; sbi->s_resgid = old_opts.s_resgid; sbi->s_commit_interval = old_opts.s_commit_interval; -- cgit v1.2.2 From 50308d813bf26500fed671882469939fd19403a3 Mon Sep 17 00:00:00 2001 From: Tao Ma Date: Thu, 4 Nov 2010 15:14:11 +0800 Subject: ocfs2: Try to free truncate log when meeting ENOSPC in write. Recently, one of our colleagues meet with a problem that if we write/delete a 32mb files repeatly, we will get an ENOSPC in the end. And the corresponding bug is 1288. http://oss.oracle.com/bugzilla/show_bug.cgi?id=1288 The real problem is that although we have freed the clusters, they are in truncate log and they will be summed up so that we can free them once in a whole. So this patch just try to resolve it. In case we see -ENOSPC in ocfs2_write_begin_no_lock, we will check whether the truncate log has enough clusters for our need, if yes, we will try to flush the truncate log at that point and try again. This method is inspired by Mark Fasheh . Thanks. Cc: Mark Fasheh Signed-off-by: Tao Ma Signed-off-by: Joel Becker --- fs/ocfs2/alloc.c | 3 +++ fs/ocfs2/aops.c | 59 +++++++++++++++++++++++++++++++++++++++++++++++++++++++- fs/ocfs2/ocfs2.h | 5 +++++ 3 files changed, 66 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index 592fae5007d1..8ec418dd9e36 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -5858,6 +5858,7 @@ int ocfs2_truncate_log_append(struct ocfs2_super *osb, ocfs2_journal_dirty(handle, tl_bh); + osb->truncated_clusters += num_clusters; bail: mlog_exit(status); return status; @@ -5929,6 +5930,8 @@ static int ocfs2_replay_truncate_records(struct ocfs2_super *osb, i--; } + osb->truncated_clusters = 0; + bail: mlog_exit(status); return status; diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index f1e962cb3b73..d55a10e2f300 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -1627,6 +1627,43 @@ static int ocfs2_zero_tail(struct inode *inode, struct buffer_head *di_bh, return ret; } +/* + * Try to flush truncate logs if we can free enough clusters from it. + * As for return value, "< 0" means error, "0" no space and "1" means + * we have freed enough spaces and let the caller try to allocate again. + */ +static int ocfs2_try_to_free_truncate_log(struct ocfs2_super *osb, + unsigned int needed) +{ + tid_t target; + int ret = 0; + unsigned int truncated_clusters; + + mutex_lock(&osb->osb_tl_inode->i_mutex); + truncated_clusters = osb->truncated_clusters; + mutex_unlock(&osb->osb_tl_inode->i_mutex); + + /* + * Check whether we can succeed in allocating if we free + * the truncate log. + */ + if (truncated_clusters < needed) + goto out; + + ret = ocfs2_flush_truncate_log(osb); + if (ret) { + mlog_errno(ret); + goto out; + } + + if (jbd2_journal_start_commit(osb->journal->j_journal, &target)) { + jbd2_log_wait_commit(osb->journal->j_journal, target); + ret = 1; + } +out: + return ret; +} + int ocfs2_write_begin_nolock(struct file *filp, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, @@ -1634,7 +1671,7 @@ int ocfs2_write_begin_nolock(struct file *filp, struct buffer_head *di_bh, struct page *mmap_page) { int ret, cluster_of_pages, credits = OCFS2_INODE_UPDATE_CREDITS; - unsigned int clusters_to_alloc, extents_to_split; + unsigned int clusters_to_alloc, extents_to_split, clusters_need = 0; struct ocfs2_write_ctxt *wc; struct inode *inode = mapping->host; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); @@ -1643,7 +1680,9 @@ int ocfs2_write_begin_nolock(struct file *filp, struct ocfs2_alloc_context *meta_ac = NULL; handle_t *handle; struct ocfs2_extent_tree et; + int try_free = 1, ret1; +try_again: ret = ocfs2_alloc_write_ctxt(&wc, osb, pos, len, di_bh); if (ret) { mlog_errno(ret); @@ -1678,6 +1717,7 @@ int ocfs2_write_begin_nolock(struct file *filp, mlog_errno(ret); goto out; } else if (ret == 1) { + clusters_need = wc->w_clen; ret = ocfs2_refcount_cow(inode, filp, di_bh, wc->w_cpos, wc->w_clen, UINT_MAX); if (ret) { @@ -1692,6 +1732,7 @@ int ocfs2_write_begin_nolock(struct file *filp, mlog_errno(ret); goto out; } + clusters_need += clusters_to_alloc; di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; @@ -1814,6 +1855,22 @@ out: ocfs2_free_alloc_context(data_ac); if (meta_ac) ocfs2_free_alloc_context(meta_ac); + + if (ret == -ENOSPC && try_free) { + /* + * Try to free some truncate log so that we can have enough + * clusters to allocate. + */ + try_free = 0; + + ret1 = ocfs2_try_to_free_truncate_log(osb, clusters_need); + if (ret1 == 1) + goto try_again; + + if (ret1 < 0) + mlog_errno(ret1); + } + return ret; } diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index 70dd3b1798f1..51cd6898e7f1 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h @@ -420,6 +420,11 @@ struct ocfs2_super struct inode *osb_tl_inode; struct buffer_head *osb_tl_bh; struct delayed_work osb_truncate_log_wq; + /* + * How many clusters in our truncate log. + * It must be protected by osb_tl_inode->i_mutex. + */ + unsigned int truncated_clusters; struct ocfs2_node_map osb_recovering_orphan_dirs; unsigned int *osb_orphan_wipes; -- cgit v1.2.2 From 8e17d16f401f7c60908726e070bfa5cbdf31e2f3 Mon Sep 17 00:00:00 2001 From: Sunil Mushran Date: Fri, 19 Nov 2010 15:06:49 -0800 Subject: ocfs2/dlm: Cleanup mlogs in dlmthread.c, dlmast.c and dlmdomain.c Add the domain name and the resource name in the mlogs. Signed-off-by: Sunil Mushran Signed-off-by: Joel Becker --- fs/ocfs2/dlm/dlmast.c | 76 ++++++++++++++++++--------- fs/ocfs2/dlm/dlmdomain.c | 2 - fs/ocfs2/dlm/dlmthread.c | 132 ++++++++++++++++++++++++----------------------- 3 files changed, 120 insertions(+), 90 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c index f44999156839..3a3ed4bb794b 100644 --- a/fs/ocfs2/dlm/dlmast.c +++ b/fs/ocfs2/dlm/dlmast.c @@ -90,19 +90,29 @@ static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock) { - mlog_entry_void(); + struct dlm_lock_resource *res; BUG_ON(!dlm); BUG_ON(!lock); + res = lock->lockres; + assert_spin_locked(&dlm->ast_lock); + if (!list_empty(&lock->ast_list)) { - mlog(ML_ERROR, "ast list not empty!! pending=%d, newlevel=%d\n", + mlog(ML_ERROR, "%s: res %.*s, lock %u:%llu, " + "AST list not empty, pending %d, newlevel %d\n", + dlm->name, res->lockname.len, res->lockname.name, + dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), + dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), lock->ast_pending, lock->ml.type); BUG(); } if (lock->ast_pending) - mlog(0, "lock has an ast getting flushed right now\n"); + mlog(0, "%s: res %.*s, lock %u:%llu, AST getting flushed\n", + dlm->name, res->lockname.len, res->lockname.name, + dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), + dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie))); /* putting lock on list, add a ref */ dlm_lock_get(lock); @@ -110,9 +120,10 @@ void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock) /* check to see if this ast obsoletes the bast */ if (dlm_should_cancel_bast(dlm, lock)) { - struct dlm_lock_resource *res = lock->lockres; - mlog(0, "%s: cancelling bast for %.*s\n", - dlm->name, res->lockname.len, res->lockname.name); + mlog(0, "%s: res %.*s, lock %u:%llu, Cancelling BAST\n", + dlm->name, res->lockname.len, res->lockname.name, + dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), + dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie))); lock->bast_pending = 0; list_del_init(&lock->bast_list); lock->ml.highest_blocked = LKM_IVMODE; @@ -134,8 +145,6 @@ void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock) void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock) { - mlog_entry_void(); - BUG_ON(!dlm); BUG_ON(!lock); @@ -147,15 +156,21 @@ void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock) void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) { - mlog_entry_void(); + struct dlm_lock_resource *res; BUG_ON(!dlm); BUG_ON(!lock); + assert_spin_locked(&dlm->ast_lock); + res = lock->lockres; + BUG_ON(!list_empty(&lock->bast_list)); if (lock->bast_pending) - mlog(0, "lock has a bast getting flushed right now\n"); + mlog(0, "%s: res %.*s, lock %u:%llu, BAST getting flushed\n", + dlm->name, res->lockname.len, res->lockname.name, + dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), + dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie))); /* putting lock on list, add a ref */ dlm_lock_get(lock); @@ -167,8 +182,6 @@ void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) { - mlog_entry_void(); - BUG_ON(!dlm); BUG_ON(!lock); @@ -213,7 +226,10 @@ void dlm_do_local_ast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, dlm_astlockfunc_t *fn; struct dlm_lockstatus *lksb; - mlog_entry_void(); + mlog(0, "%s: res %.*s, lock %u:%llu, Local AST\n", dlm->name, + res->lockname.len, res->lockname.name, + dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), + dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie))); lksb = lock->lksb; fn = lock->ast; @@ -231,7 +247,10 @@ int dlm_do_remote_ast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lockstatus *lksb; int lksbflags; - mlog_entry_void(); + mlog(0, "%s: res %.*s, lock %u:%llu, Remote AST\n", dlm->name, + res->lockname.len, res->lockname.name, + dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), + dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie))); lksb = lock->lksb; BUG_ON(lock->ml.node == dlm->node_num); @@ -250,9 +269,14 @@ void dlm_do_local_bast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, { dlm_bastlockfunc_t *fn = lock->bast; - mlog_entry_void(); BUG_ON(lock->ml.node != dlm->node_num); + mlog(0, "%s: res %.*s, lock %u:%llu, Local BAST, blocked %d\n", + dlm->name, res->lockname.len, res->lockname.name, + dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), + dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), + blocked_type); + (*fn)(lock->astdata, blocked_type); } @@ -332,7 +356,8 @@ int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data, /* cannot get a proxy ast message if this node owns it */ BUG_ON(res->owner == dlm->node_num); - mlog(0, "lockres %.*s\n", res->lockname.len, res->lockname.name); + mlog(0, "%s: res %.*s\n", dlm->name, res->lockname.len, + res->lockname.name); spin_lock(&res->spinlock); if (res->state & DLM_LOCK_RES_RECOVERING) { @@ -382,8 +407,12 @@ do_ast: if (past->type == DLM_AST) { /* do not alter lock refcount. switching lists. */ list_move_tail(&lock->list, &res->granted); - mlog(0, "ast: Adding to granted list... type=%d, " - "convert_type=%d\n", lock->ml.type, lock->ml.convert_type); + mlog(0, "%s: res %.*s, lock %u:%llu, Granted type %d => %d\n", + dlm->name, res->lockname.len, res->lockname.name, + dlm_get_lock_cookie_node(be64_to_cpu(cookie)), + dlm_get_lock_cookie_seq(be64_to_cpu(cookie)), + lock->ml.type, lock->ml.convert_type); + if (lock->ml.convert_type != LKM_IVMODE) { lock->ml.type = lock->ml.convert_type; lock->ml.convert_type = LKM_IVMODE; @@ -426,9 +455,9 @@ int dlm_send_proxy_ast_msg(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, size_t veclen = 1; int status; - mlog_entry("res %.*s, to=%u, type=%d, blocked_type=%d\n", - res->lockname.len, res->lockname.name, lock->ml.node, - msg_type, blocked_type); + mlog(0, "%s: res %.*s, to %u, type %d, blocked_type %d\n", dlm->name, + res->lockname.len, res->lockname.name, lock->ml.node, msg_type, + blocked_type); memset(&past, 0, sizeof(struct dlm_proxy_ast)); past.node_idx = dlm->node_num; @@ -441,7 +470,6 @@ int dlm_send_proxy_ast_msg(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, vec[0].iov_len = sizeof(struct dlm_proxy_ast); vec[0].iov_base = &past; if (flags & DLM_LKSB_GET_LVB) { - mlog(0, "returning requested LVB data\n"); be32_add_cpu(&past.flags, LKM_GET_LVB); vec[1].iov_len = DLM_LVB_LEN; vec[1].iov_base = lock->lksb->lvb; @@ -451,8 +479,8 @@ int dlm_send_proxy_ast_msg(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, ret = o2net_send_message_vec(DLM_PROXY_AST_MSG, dlm->key, vec, veclen, lock->ml.node, &status); if (ret < 0) - mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to " - "node %u\n", ret, DLM_PROXY_AST_MSG, dlm->key, + mlog(ML_ERROR, "%s: res %.*s, error %d send AST to node %u\n", + dlm->name, res->lockname.len, res->lockname.name, ret, lock->ml.node); else { if (status == DLM_RECOVERING) { diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c index cc2aaa96cfe5..fcc40c33489d 100644 --- a/fs/ocfs2/dlm/dlmdomain.c +++ b/fs/ocfs2/dlm/dlmdomain.c @@ -460,8 +460,6 @@ redo_bucket: } cond_resched_lock(&dlm->spinlock); num += n; - mlog(0, "%s: touched %d lockreses in bucket %d " - "(tot=%d)\n", dlm->name, n, i, num); } spin_unlock(&dlm->spinlock); wake_up(&dlm->dlm_thread_wq); diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c index 2211acf33d9b..1d6d1d22c471 100644 --- a/fs/ocfs2/dlm/dlmthread.c +++ b/fs/ocfs2/dlm/dlmthread.c @@ -122,15 +122,13 @@ int __dlm_lockres_unused(struct dlm_lock_resource *res) void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) { - mlog_entry("%.*s\n", res->lockname.len, res->lockname.name); - assert_spin_locked(&dlm->spinlock); assert_spin_locked(&res->spinlock); if (__dlm_lockres_unused(res)){ if (list_empty(&res->purge)) { - mlog(0, "putting lockres %.*s:%p onto purge list\n", - res->lockname.len, res->lockname.name, res); + mlog(0, "%s: Adding res %.*s to purge list\n", + dlm->name, res->lockname.len, res->lockname.name); res->last_used = jiffies; dlm_lockres_get(res); @@ -138,8 +136,8 @@ void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm, dlm->purge_count++; } } else if (!list_empty(&res->purge)) { - mlog(0, "removing lockres %.*s:%p from purge list, owner=%u\n", - res->lockname.len, res->lockname.name, res, res->owner); + mlog(0, "%s: Removing res %.*s from purge list\n", + dlm->name, res->lockname.len, res->lockname.name); list_del_init(&res->purge); dlm_lockres_put(res); @@ -150,7 +148,6 @@ void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm, void dlm_lockres_calc_usage(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) { - mlog_entry("%.*s\n", res->lockname.len, res->lockname.name); spin_lock(&dlm->spinlock); spin_lock(&res->spinlock); @@ -171,9 +168,8 @@ static void dlm_purge_lockres(struct dlm_ctxt *dlm, master = (res->owner == dlm->node_num); - - mlog(0, "purging lockres %.*s, master = %d\n", res->lockname.len, - res->lockname.name, master); + mlog(0, "%s: Purging res %.*s, master %d\n", dlm->name, + res->lockname.len, res->lockname.name, master); if (!master) { res->state |= DLM_LOCK_RES_DROPPING_REF; @@ -189,27 +185,25 @@ static void dlm_purge_lockres(struct dlm_ctxt *dlm, /* clear our bit from the master's refmap, ignore errors */ ret = dlm_drop_lockres_ref(dlm, res); if (ret < 0) { - mlog_errno(ret); + mlog(ML_ERROR, "%s: deref %.*s failed %d\n", dlm->name, + res->lockname.len, res->lockname.name, ret); if (!dlm_is_host_down(ret)) BUG(); } - mlog(0, "%s:%.*s: dlm_deref_lockres returned %d\n", - dlm->name, res->lockname.len, res->lockname.name, ret); spin_lock(&dlm->spinlock); spin_lock(&res->spinlock); } if (!list_empty(&res->purge)) { - mlog(0, "removing lockres %.*s:%p from purgelist, " - "master = %d\n", res->lockname.len, res->lockname.name, - res, master); + mlog(0, "%s: Removing res %.*s from purgelist, master %d\n", + dlm->name, res->lockname.len, res->lockname.name, master); list_del_init(&res->purge); dlm_lockres_put(res); dlm->purge_count--; } if (!__dlm_lockres_unused(res)) { - mlog(ML_ERROR, "found lockres %s:%.*s: in use after deref\n", + mlog(ML_ERROR, "%s: res %.*s in use after deref\n", dlm->name, res->lockname.len, res->lockname.name); __dlm_print_one_lock_resource(res); BUG(); @@ -266,10 +260,10 @@ static void dlm_run_purge_list(struct dlm_ctxt *dlm, unused = __dlm_lockres_unused(lockres); if (!unused || (lockres->state & DLM_LOCK_RES_MIGRATING)) { - mlog(0, "lockres %s:%.*s: is in use or " - "being remastered, used %d, state %d\n", - dlm->name, lockres->lockname.len, - lockres->lockname.name, !unused, lockres->state); + mlog(0, "%s: res %.*s is in use or being remastered, " + "used %d, state %d\n", dlm->name, + lockres->lockname.len, lockres->lockname.name, + !unused, lockres->state); list_move_tail(&dlm->purge_list, &lockres->purge); spin_unlock(&lockres->spinlock); continue; @@ -296,15 +290,12 @@ static void dlm_shuffle_lists(struct dlm_ctxt *dlm, struct list_head *head; int can_grant = 1; - //mlog(0, "res->lockname.len=%d\n", res->lockname.len); - //mlog(0, "res->lockname.name=%p\n", res->lockname.name); - //mlog(0, "shuffle res %.*s\n", res->lockname.len, - // res->lockname.name); - - /* because this function is called with the lockres + /* + * Because this function is called with the lockres * spinlock, and because we know that it is not migrating/ * recovering/in-progress, it is fine to reserve asts and - * basts right before queueing them all throughout */ + * basts right before queueing them all throughout + */ assert_spin_locked(&dlm->ast_lock); assert_spin_locked(&res->spinlock); BUG_ON((res->state & (DLM_LOCK_RES_MIGRATING| @@ -314,13 +305,13 @@ static void dlm_shuffle_lists(struct dlm_ctxt *dlm, converting: if (list_empty(&res->converting)) goto blocked; - mlog(0, "res %.*s has locks on a convert queue\n", res->lockname.len, - res->lockname.name); + mlog(0, "%s: res %.*s has locks on the convert queue\n", dlm->name, + res->lockname.len, res->lockname.name); target = list_entry(res->converting.next, struct dlm_lock, list); if (target->ml.convert_type == LKM_IVMODE) { - mlog(ML_ERROR, "%.*s: converting a lock with no " - "convert_type!\n", res->lockname.len, res->lockname.name); + mlog(ML_ERROR, "%s: res %.*s converting lock to invalid mode\n", + dlm->name, res->lockname.len, res->lockname.name); BUG(); } head = &res->granted; @@ -365,9 +356,12 @@ converting: spin_lock(&target->spinlock); BUG_ON(target->ml.highest_blocked != LKM_IVMODE); - mlog(0, "calling ast for converting lock: %.*s, have: %d, " - "granting: %d, node: %u\n", res->lockname.len, - res->lockname.name, target->ml.type, + mlog(0, "%s: res %.*s, AST for Converting lock %u:%llu, type " + "%d => %d, node %u\n", dlm->name, res->lockname.len, + res->lockname.name, + dlm_get_lock_cookie_node(be64_to_cpu(target->ml.cookie)), + dlm_get_lock_cookie_seq(be64_to_cpu(target->ml.cookie)), + target->ml.type, target->ml.convert_type, target->ml.node); target->ml.type = target->ml.convert_type; @@ -428,11 +422,14 @@ blocked: spin_lock(&target->spinlock); BUG_ON(target->ml.highest_blocked != LKM_IVMODE); - mlog(0, "calling ast for blocked lock: %.*s, granting: %d, " - "node: %u\n", res->lockname.len, res->lockname.name, + mlog(0, "%s: res %.*s, AST for Blocked lock %u:%llu, type %d, " + "node %u\n", dlm->name, res->lockname.len, + res->lockname.name, + dlm_get_lock_cookie_node(be64_to_cpu(target->ml.cookie)), + dlm_get_lock_cookie_seq(be64_to_cpu(target->ml.cookie)), target->ml.type, target->ml.node); - // target->ml.type is already correct + /* target->ml.type is already correct */ list_move_tail(&target->list, &res->granted); BUG_ON(!target->lksb); @@ -453,7 +450,6 @@ leave: /* must have NO locks when calling this with res !=NULL * */ void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) { - mlog_entry("dlm=%p, res=%p\n", dlm, res); if (res) { spin_lock(&dlm->spinlock); spin_lock(&res->spinlock); @@ -466,8 +462,6 @@ void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) { - mlog_entry("dlm=%p, res=%p\n", dlm, res); - assert_spin_locked(&dlm->spinlock); assert_spin_locked(&res->spinlock); @@ -484,13 +478,16 @@ void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) res->state |= DLM_LOCK_RES_DIRTY; } } + + mlog(0, "%s: res %.*s\n", dlm->name, res->lockname.len, + res->lockname.name); } /* Launch the NM thread for the mounted volume */ int dlm_launch_thread(struct dlm_ctxt *dlm) { - mlog(0, "starting dlm thread...\n"); + mlog(0, "Starting dlm_thread...\n"); dlm->dlm_thread_task = kthread_run(dlm_thread, dlm, "dlm_thread"); if (IS_ERR(dlm->dlm_thread_task)) { @@ -505,7 +502,7 @@ int dlm_launch_thread(struct dlm_ctxt *dlm) void dlm_complete_thread(struct dlm_ctxt *dlm) { if (dlm->dlm_thread_task) { - mlog(ML_KTHREAD, "waiting for dlm thread to exit\n"); + mlog(ML_KTHREAD, "Waiting for dlm thread to exit\n"); kthread_stop(dlm->dlm_thread_task); dlm->dlm_thread_task = NULL; } @@ -536,7 +533,12 @@ static void dlm_flush_asts(struct dlm_ctxt *dlm) /* get an extra ref on lock */ dlm_lock_get(lock); res = lock->lockres; - mlog(0, "delivering an ast for this lockres\n"); + mlog(0, "%s: res %.*s, Flush AST for lock %u:%llu, type %d, " + "node %u\n", dlm->name, res->lockname.len, + res->lockname.name, + dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), + dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), + lock->ml.type, lock->ml.node); BUG_ON(!lock->ast_pending); @@ -557,9 +559,9 @@ static void dlm_flush_asts(struct dlm_ctxt *dlm) /* possible that another ast was queued while * we were delivering the last one */ if (!list_empty(&lock->ast_list)) { - mlog(0, "aha another ast got queued while " - "we were finishing the last one. will " - "keep the ast_pending flag set.\n"); + mlog(0, "%s: res %.*s, AST queued while flushing last " + "one\n", dlm->name, res->lockname.len, + res->lockname.name); } else lock->ast_pending = 0; @@ -590,8 +592,12 @@ static void dlm_flush_asts(struct dlm_ctxt *dlm) dlm_lock_put(lock); spin_unlock(&dlm->ast_lock); - mlog(0, "delivering a bast for this lockres " - "(blocked = %d\n", hi); + mlog(0, "%s: res %.*s, Flush BAST for lock %u:%llu, " + "blocked %d, node %u\n", + dlm->name, res->lockname.len, res->lockname.name, + dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), + dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), + hi, lock->ml.node); if (lock->ml.node != dlm->node_num) { ret = dlm_send_proxy_bast(dlm, res, lock, hi); @@ -605,9 +611,9 @@ static void dlm_flush_asts(struct dlm_ctxt *dlm) /* possible that another bast was queued while * we were delivering the last one */ if (!list_empty(&lock->bast_list)) { - mlog(0, "aha another bast got queued while " - "we were finishing the last one. will " - "keep the bast_pending flag set.\n"); + mlog(0, "%s: res %.*s, BAST queued while flushing last " + "one\n", dlm->name, res->lockname.len, + res->lockname.name); } else lock->bast_pending = 0; @@ -675,11 +681,12 @@ static int dlm_thread(void *data) spin_lock(&res->spinlock); if (res->owner != dlm->node_num) { __dlm_print_one_lock_resource(res); - mlog(ML_ERROR, "inprog:%s, mig:%s, reco:%s, dirty:%s\n", - res->state & DLM_LOCK_RES_IN_PROGRESS ? "yes" : "no", - res->state & DLM_LOCK_RES_MIGRATING ? "yes" : "no", - res->state & DLM_LOCK_RES_RECOVERING ? "yes" : "no", - res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no"); + mlog(ML_ERROR, "%s: inprog %d, mig %d, reco %d," + " dirty %d\n", dlm->name, + !!(res->state & DLM_LOCK_RES_IN_PROGRESS), + !!(res->state & DLM_LOCK_RES_MIGRATING), + !!(res->state & DLM_LOCK_RES_RECOVERING), + !!(res->state & DLM_LOCK_RES_DIRTY)); } BUG_ON(res->owner != dlm->node_num); @@ -693,8 +700,8 @@ static int dlm_thread(void *data) res->state &= ~DLM_LOCK_RES_DIRTY; spin_unlock(&res->spinlock); spin_unlock(&dlm->ast_lock); - mlog(0, "delaying list shuffling for in-" - "progress lockres %.*s, state=%d\n", + mlog(0, "%s: res %.*s, inprogress, delay list " + "shuffle, state %d\n", dlm->name, res->lockname.len, res->lockname.name, res->state); delay = 1; @@ -706,10 +713,6 @@ static int dlm_thread(void *data) * spinlock and do NOT have the dlm lock. * safe to reserve/queue asts and run the lists. */ - mlog(0, "calling dlm_shuffle_lists with dlm=%s, " - "res=%.*s\n", dlm->name, - res->lockname.len, res->lockname.name); - /* called while holding lockres lock */ dlm_shuffle_lists(dlm, res); res->state &= ~DLM_LOCK_RES_DIRTY; @@ -733,7 +736,8 @@ in_progress: /* unlikely, but we may need to give time to * other tasks */ if (!--n) { - mlog(0, "throttling dlm_thread\n"); + mlog(0, "%s: Throttling dlm thread\n", + dlm->name); break; } } -- cgit v1.2.2 From 66f4500573fe5a1b455e5f7b30068a623a94117f Mon Sep 17 00:00:00 2001 From: Wengang Wang Date: Wed, 8 Dec 2010 20:34:39 +0800 Subject: ocfs2/dlm: make existing convertion precedent over new lock Make existing convertion precedent over new lock. It makes o2dlm locking more like fair locking. Signed-off-by: Wengang Wang Signed-off-by: Joel Becker --- fs/ocfs2/dlm/dlmlock.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'fs') diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c index 69cf369961c4..7009292aac5a 100644 --- a/fs/ocfs2/dlm/dlmlock.c +++ b/fs/ocfs2/dlm/dlmlock.c @@ -106,6 +106,9 @@ static int dlm_can_grant_new_lock(struct dlm_lock_resource *res, if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type)) return 0; + if (!dlm_lock_compatible(tmplock->ml.convert_type, + lock->ml.type)) + return 0; } return 1; -- cgit v1.2.2 From 2b190ce9bf923fb1fd8ccff18e9bab72da40da06 Mon Sep 17 00:00:00 2001 From: Sunil Mushran Date: Tue, 14 Dec 2010 14:14:27 -0800 Subject: ocfs2/cluster: Pin the remote node item in configfs o2net pins the node item of the remote node in configfs before initiating the connection. It is unpinned on disconnect. This is to prevent the node item from being unlinked while it is still in use. Signed-off-by: Sunil Mushran Signed-off-by: Joel Becker --- fs/ocfs2/cluster/tcp.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'fs') diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c index 9aa426e42123..92de96cd247d 100644 --- a/fs/ocfs2/cluster/tcp.c +++ b/fs/ocfs2/cluster/tcp.c @@ -355,6 +355,7 @@ static void sc_kref_release(struct kref *kref) sc->sc_sock = NULL; } + o2nm_undepend_item(&sc->sc_node->nd_item); o2nm_node_put(sc->sc_node); sc->sc_node = NULL; @@ -376,6 +377,7 @@ static struct o2net_sock_container *sc_alloc(struct o2nm_node *node) { struct o2net_sock_container *sc, *ret = NULL; struct page *page = NULL; + int status = 0; page = alloc_page(GFP_NOFS); sc = kzalloc(sizeof(*sc), GFP_NOFS); @@ -386,6 +388,13 @@ static struct o2net_sock_container *sc_alloc(struct o2nm_node *node) o2nm_node_get(node); sc->sc_node = node; + /* pin the node item of the remote node */ + status = o2nm_depend_item(&node->nd_item); + if (status) { + mlog_errno(status); + o2nm_node_put(node); + goto out; + } INIT_WORK(&sc->sc_connect_work, o2net_sc_connect_completed); INIT_WORK(&sc->sc_rx_work, o2net_rx_until_empty); INIT_WORK(&sc->sc_shutdown_work, o2net_shutdown_sc); -- cgit v1.2.2 From ffee223a9af4c5124beb56fa5c84132949923d23 Mon Sep 17 00:00:00 2001 From: Sunil Mushran Date: Tue, 14 Dec 2010 14:14:28 -0800 Subject: ocfs2/cluster: Remove dropped region from o2hb quorum region bitmap Patch removes a dropped region from the quorum region bitmap maintained by o2hb. Signed-off-by: Sunil Mushran Signed-off-by: Joel Becker --- fs/ocfs2/cluster/heartbeat.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index 9f26ac9be2a4..9f21dd785364 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c @@ -2009,6 +2009,7 @@ static void o2hb_heartbeat_group_drop_item(struct config_group *group, if (o2hb_global_heartbeat_active()) { clear_bit(reg->hr_region_num, o2hb_region_bitmap); clear_bit(reg->hr_region_num, o2hb_live_region_bitmap); + clear_bit(reg->hr_region_num, o2hb_quorum_region_bitmap); } hb_task = reg->hr_task; reg->hr_task = NULL; -- cgit v1.2.2 From 58a3158a5d17ddf4894db9e8ccaf92093ff8e42e Mon Sep 17 00:00:00 2001 From: Sunil Mushran Date: Tue, 14 Dec 2010 14:14:29 -0800 Subject: ocfs2/cluster: Pin/unpin o2hb regions This patch adds support for pinning o2hb regions in configfs. Pinning disallows a region to be cleanly stopped as long as it has an active dependent user (read o2dlm). In local heartbeat mode, the region uuid matching the domain name is pinned as long as the o2dlm domain is active. In global heartbeat mode, all regions are pinned as long as there is atleast one dependent user and the region count is 3 or less. All regions are unpinned if the number of dependent users is zero or region count is greater than 3. Signed-off-by: Sunil Mushran Signed-off-by: Joel Becker --- fs/ocfs2/cluster/heartbeat.c | 216 +++++++++++++++++++++++++++++++++++-------- fs/ocfs2/dlm/dlmdomain.c | 8 +- 2 files changed, 182 insertions(+), 42 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index 9f21dd785364..ad2e41d6879d 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c @@ -132,6 +132,33 @@ char *o2hb_heartbeat_mode_desc[O2HB_HEARTBEAT_NUM_MODES] = { unsigned int o2hb_dead_threshold = O2HB_DEFAULT_DEAD_THRESHOLD; unsigned int o2hb_heartbeat_mode = O2HB_HEARTBEAT_LOCAL; +/* + * o2hb_dependent_users tracks the number of registered callbacks that depend + * on heartbeat. o2net and o2dlm are two entities that register this callback. + * However only o2dlm depends on the heartbeat. It does not want the heartbeat + * to stop while a dlm domain is still active. + */ +unsigned int o2hb_dependent_users; + +/* + * In global heartbeat mode, all regions are pinned if there are one or more + * dependent users and the quorum region count is <= O2HB_PIN_CUT_OFF. All + * regions are unpinned if the region count exceeds the cut off or the number + * of dependent users falls to zero. + */ +#define O2HB_PIN_CUT_OFF 3 + +/* + * In local heartbeat mode, we assume the dlm domain name to be the same as + * region uuid. This is true for domains created for the file system but not + * necessarily true for userdlm domains. This is a known limitation. + * + * In global heartbeat mode, we pin/unpin all o2hb regions. This solution + * works for both file system and userdlm domains. + */ +static int o2hb_region_pin(const char *region_uuid); +static void o2hb_region_unpin(const char *region_uuid); + /* Only sets a new threshold if there are no active regions. * * No locking or otherwise interesting code is required for reading @@ -186,7 +213,9 @@ struct o2hb_region { struct config_item hr_item; struct list_head hr_all_item; - unsigned hr_unclean_stop:1; + unsigned hr_unclean_stop:1, + hr_item_pinned:1, + hr_item_dropped:1; /* protected by the hr_callback_sem */ struct task_struct *hr_task; @@ -702,6 +731,14 @@ static void o2hb_set_quorum_device(struct o2hb_region *reg, config_item_name(®->hr_item)); set_bit(reg->hr_region_num, o2hb_quorum_region_bitmap); + + /* + * If global heartbeat active, unpin all regions if the + * region count > CUT_OFF + */ + if (o2hb_pop_count(&o2hb_quorum_region_bitmap, + O2NM_MAX_REGIONS) > O2HB_PIN_CUT_OFF) + o2hb_region_unpin(NULL); } static int o2hb_check_slot(struct o2hb_region *reg, @@ -1316,6 +1353,8 @@ int o2hb_init(void) memset(o2hb_quorum_region_bitmap, 0, sizeof(o2hb_quorum_region_bitmap)); memset(o2hb_failed_region_bitmap, 0, sizeof(o2hb_failed_region_bitmap)); + o2hb_dependent_users = 0; + return o2hb_debug_init(); } @@ -2003,16 +2042,20 @@ static void o2hb_heartbeat_group_drop_item(struct config_group *group, { struct task_struct *hb_task; struct o2hb_region *reg = to_o2hb_region(item); + int quorum_region = 0; /* stop the thread when the user removes the region dir */ spin_lock(&o2hb_live_lock); if (o2hb_global_heartbeat_active()) { clear_bit(reg->hr_region_num, o2hb_region_bitmap); clear_bit(reg->hr_region_num, o2hb_live_region_bitmap); + if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap)) + quorum_region = 1; clear_bit(reg->hr_region_num, o2hb_quorum_region_bitmap); } hb_task = reg->hr_task; reg->hr_task = NULL; + reg->hr_item_dropped = 1; spin_unlock(&o2hb_live_lock); if (hb_task) @@ -2030,7 +2073,27 @@ static void o2hb_heartbeat_group_drop_item(struct config_group *group, if (o2hb_global_heartbeat_active()) printk(KERN_NOTICE "o2hb: Heartbeat stopped on region %s\n", config_item_name(®->hr_item)); + config_item_put(item); + + if (!o2hb_global_heartbeat_active() || !quorum_region) + return; + + /* + * If global heartbeat active and there are dependent users, + * pin all regions if quorum region count <= CUT_OFF + */ + spin_lock(&o2hb_live_lock); + + if (!o2hb_dependent_users) + goto unlock; + + if (o2hb_pop_count(&o2hb_quorum_region_bitmap, + O2NM_MAX_REGIONS) <= O2HB_PIN_CUT_OFF) + o2hb_region_pin(NULL); + +unlock: + spin_unlock(&o2hb_live_lock); } struct o2hb_heartbeat_group_attribute { @@ -2216,63 +2279,138 @@ void o2hb_setup_callback(struct o2hb_callback_func *hc, } EXPORT_SYMBOL_GPL(o2hb_setup_callback); -static struct o2hb_region *o2hb_find_region(const char *region_uuid) +/* + * In local heartbeat mode, region_uuid passed matches the dlm domain name. + * In global heartbeat mode, region_uuid passed is NULL. + * + * In local, we only pin the matching region. In global we pin all the active + * regions. + */ +static int o2hb_region_pin(const char *region_uuid) { - struct o2hb_region *p, *reg = NULL; + int ret = 0, found = 0; + struct o2hb_region *reg; + char *uuid; assert_spin_locked(&o2hb_live_lock); - list_for_each_entry(p, &o2hb_all_regions, hr_all_item) { - if (!strcmp(region_uuid, config_item_name(&p->hr_item))) { - reg = p; - break; + list_for_each_entry(reg, &o2hb_all_regions, hr_all_item) { + uuid = config_item_name(®->hr_item); + + /* local heartbeat */ + if (region_uuid) { + if (strcmp(region_uuid, uuid)) + continue; + found = 1; } + + if (reg->hr_item_pinned || reg->hr_item_dropped) + goto skip_pin; + + /* Ignore ENOENT only for local hb (userdlm domain) */ + ret = o2nm_depend_item(®->hr_item); + if (!ret) { + mlog(ML_CLUSTER, "Pin region %s\n", uuid); + reg->hr_item_pinned = 1; + } else { + if (ret == -ENOENT && found) + ret = 0; + else { + mlog(ML_ERROR, "Pin region %s fails with %d\n", + uuid, ret); + break; + } + } +skip_pin: + if (found) + break; } - return reg; + return ret; } -static int o2hb_region_get(const char *region_uuid) +/* + * In local heartbeat mode, region_uuid passed matches the dlm domain name. + * In global heartbeat mode, region_uuid passed is NULL. + * + * In local, we only unpin the matching region. In global we unpin all the + * active regions. + */ +static void o2hb_region_unpin(const char *region_uuid) { - int ret = 0; struct o2hb_region *reg; + char *uuid; + int found = 0; - spin_lock(&o2hb_live_lock); + assert_spin_locked(&o2hb_live_lock); - reg = o2hb_find_region(region_uuid); - if (!reg) - ret = -ENOENT; - spin_unlock(&o2hb_live_lock); + list_for_each_entry(reg, &o2hb_all_regions, hr_all_item) { + uuid = config_item_name(®->hr_item); + if (region_uuid) { + if (strcmp(region_uuid, uuid)) + continue; + found = 1; + } - if (ret) - goto out; + if (reg->hr_item_pinned) { + mlog(ML_CLUSTER, "Unpin region %s\n", uuid); + o2nm_undepend_item(®->hr_item); + reg->hr_item_pinned = 0; + } + if (found) + break; + } +} - ret = o2nm_depend_this_node(); - if (ret) - goto out; +static int o2hb_region_inc_user(const char *region_uuid) +{ + int ret = 0; - ret = o2nm_depend_item(®->hr_item); - if (ret) - o2nm_undepend_this_node(); + spin_lock(&o2hb_live_lock); -out: + /* local heartbeat */ + if (!o2hb_global_heartbeat_active()) { + ret = o2hb_region_pin(region_uuid); + goto unlock; + } + + /* + * if global heartbeat active and this is the first dependent user, + * pin all regions if quorum region count <= CUT_OFF + */ + o2hb_dependent_users++; + if (o2hb_dependent_users > 1) + goto unlock; + + if (o2hb_pop_count(&o2hb_quorum_region_bitmap, + O2NM_MAX_REGIONS) <= O2HB_PIN_CUT_OFF) + ret = o2hb_region_pin(NULL); + +unlock: + spin_unlock(&o2hb_live_lock); return ret; } -static void o2hb_region_put(const char *region_uuid) +void o2hb_region_dec_user(const char *region_uuid) { - struct o2hb_region *reg; - spin_lock(&o2hb_live_lock); - reg = o2hb_find_region(region_uuid); + /* local heartbeat */ + if (!o2hb_global_heartbeat_active()) { + o2hb_region_unpin(region_uuid); + goto unlock; + } - spin_unlock(&o2hb_live_lock); + /* + * if global heartbeat active and there are no dependent users, + * unpin all quorum regions + */ + o2hb_dependent_users--; + if (!o2hb_dependent_users) + o2hb_region_unpin(NULL); - if (reg) { - o2nm_undepend_item(®->hr_item); - o2nm_undepend_this_node(); - } +unlock: + spin_unlock(&o2hb_live_lock); } int o2hb_register_callback(const char *region_uuid, @@ -2293,9 +2431,11 @@ int o2hb_register_callback(const char *region_uuid, } if (region_uuid) { - ret = o2hb_region_get(region_uuid); - if (ret) + ret = o2hb_region_inc_user(region_uuid); + if (ret) { + mlog_errno(ret); goto out; + } } down_write(&o2hb_callback_sem); @@ -2313,7 +2453,7 @@ int o2hb_register_callback(const char *region_uuid, up_write(&o2hb_callback_sem); ret = 0; out: - mlog(ML_HEARTBEAT, "returning %d on behalf of %p for funcs %p\n", + mlog(ML_CLUSTER, "returning %d on behalf of %p for funcs %p\n", ret, __builtin_return_address(0), hc); return ret; } @@ -2324,7 +2464,7 @@ void o2hb_unregister_callback(const char *region_uuid, { BUG_ON(hc->hc_magic != O2HB_CB_MAGIC); - mlog(ML_HEARTBEAT, "on behalf of %p for funcs %p\n", + mlog(ML_CLUSTER, "on behalf of %p for funcs %p\n", __builtin_return_address(0), hc); /* XXX Can this happen _with_ a region reference? */ @@ -2332,7 +2472,7 @@ void o2hb_unregister_callback(const char *region_uuid, return; if (region_uuid) - o2hb_region_put(region_uuid); + o2hb_region_dec_user(region_uuid); down_write(&o2hb_callback_sem); diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c index fcc40c33489d..7e38a072d720 100644 --- a/fs/ocfs2/dlm/dlmdomain.c +++ b/fs/ocfs2/dlm/dlmdomain.c @@ -1659,8 +1659,8 @@ bail: static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm) { - o2hb_unregister_callback(NULL, &dlm->dlm_hb_up); - o2hb_unregister_callback(NULL, &dlm->dlm_hb_down); + o2hb_unregister_callback(dlm->name, &dlm->dlm_hb_up); + o2hb_unregister_callback(dlm->name, &dlm->dlm_hb_down); o2net_unregister_handler_list(&dlm->dlm_domain_handlers); } @@ -1672,13 +1672,13 @@ static int dlm_register_domain_handlers(struct dlm_ctxt *dlm) o2hb_setup_callback(&dlm->dlm_hb_down, O2HB_NODE_DOWN_CB, dlm_hb_node_down_cb, dlm, DLM_HB_NODE_DOWN_PRI); - status = o2hb_register_callback(NULL, &dlm->dlm_hb_down); + status = o2hb_register_callback(dlm->name, &dlm->dlm_hb_down); if (status) goto bail; o2hb_setup_callback(&dlm->dlm_hb_up, O2HB_NODE_UP_CB, dlm_hb_node_up_cb, dlm, DLM_HB_NODE_UP_PRI); - status = o2hb_register_callback(NULL, &dlm->dlm_hb_up); + status = o2hb_register_callback(dlm->name, &dlm->dlm_hb_up); if (status) goto bail; -- cgit v1.2.2 From cb0586bd4c77c531fe0be4ae860ec642450eeda5 Mon Sep 17 00:00:00 2001 From: Sunil Mushran Date: Tue, 14 Dec 2010 14:14:30 -0800 Subject: ocfs2/cluster: Show pin state for each o2hb region This patch adds a per o2hb region debugfs file that shows whether that region is pinned or not. Signed-off-by: Sunil Mushran Signed-off-by: Joel Becker --- fs/ocfs2/cluster/heartbeat.c | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) (limited to 'fs') diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index ad2e41d6879d..3722e3850a1f 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c @@ -82,6 +82,7 @@ static unsigned long o2hb_failed_region_bitmap[BITS_TO_LONGS(O2NM_MAX_REGIONS)]; #define O2HB_DB_TYPE_REGION_LIVENODES 4 #define O2HB_DB_TYPE_REGION_NUMBER 5 #define O2HB_DB_TYPE_REGION_ELAPSED_TIME 6 +#define O2HB_DB_TYPE_REGION_PINNED 7 struct o2hb_debug_buf { int db_type; int db_size; @@ -101,6 +102,7 @@ static struct o2hb_debug_buf *o2hb_db_failedregions; #define O2HB_DEBUG_FAILEDREGIONS "failed_regions" #define O2HB_DEBUG_REGION_NUMBER "num" #define O2HB_DEBUG_REGION_ELAPSED_TIME "elapsed_time_in_ms" +#define O2HB_DEBUG_REGION_PINNED "pinned" static struct dentry *o2hb_debug_dir; static struct dentry *o2hb_debug_livenodes; @@ -241,9 +243,11 @@ struct o2hb_region { struct dentry *hr_debug_livenodes; struct dentry *hr_debug_regnum; struct dentry *hr_debug_elapsed_time; + struct dentry *hr_debug_pinned; struct o2hb_debug_buf *hr_db_livenodes; struct o2hb_debug_buf *hr_db_regnum; struct o2hb_debug_buf *hr_db_elapsed_time; + struct o2hb_debug_buf *hr_db_pinned; /* let the person setting up hb wait for it to return until it * has reached a 'steady' state. This will be fixed when we have @@ -1180,6 +1184,12 @@ static int o2hb_debug_open(struct inode *inode, struct file *file) reg->hr_last_timeout_start)); goto done; + case O2HB_DB_TYPE_REGION_PINNED: + reg = (struct o2hb_region *)db->db_data; + out += snprintf(buf + out, PAGE_SIZE - out, "%u\n", + !!reg->hr_item_pinned); + goto done; + default: goto done; } @@ -1424,6 +1434,7 @@ static void o2hb_region_release(struct config_item *item) debugfs_remove(reg->hr_debug_livenodes); debugfs_remove(reg->hr_debug_regnum); debugfs_remove(reg->hr_debug_elapsed_time); + debugfs_remove(reg->hr_debug_pinned); debugfs_remove(reg->hr_debug_dir); spin_lock(&o2hb_live_lock); @@ -1988,6 +1999,18 @@ static int o2hb_debug_region_init(struct o2hb_region *reg, struct dentry *dir) goto bail; } + reg->hr_debug_pinned = + o2hb_debug_create(O2HB_DEBUG_REGION_PINNED, + reg->hr_debug_dir, + &(reg->hr_db_pinned), + sizeof(*(reg->hr_db_pinned)), + O2HB_DB_TYPE_REGION_PINNED, + 0, 0, reg); + if (!reg->hr_debug_pinned) { + mlog_errno(ret); + goto bail; + } + ret = 0; bail: return ret; -- cgit v1.2.2 From cfc069d3fa24d7c6357e3b731e97f94db495bf0c Mon Sep 17 00:00:00 2001 From: Sunil Mushran Date: Tue, 14 Dec 2010 14:14:31 -0800 Subject: ocfs2/cluster: Pin the local node when o2hb thread starts The patch pins the node item of the local node when the o2hb thread starts and unpins on stop. An earlier patch pinned the node item of the remote node on o2net connect and unpinned on disconnect. Signed-off-by Sunil Mushran Signed-off-by: Joel Becker --- fs/ocfs2/cluster/heartbeat.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'fs') diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index 3722e3850a1f..8b50c1ad7a69 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c @@ -1083,6 +1083,9 @@ static int o2hb_thread(void *data) set_user_nice(current, -20); + /* Pin node */ + o2nm_depend_this_node(); + while (!kthread_should_stop() && !reg->hr_unclean_stop) { /* We track the time spent inside * o2hb_do_disk_heartbeat so that we avoid more than @@ -1132,6 +1135,9 @@ static int o2hb_thread(void *data) mlog_errno(ret); } + /* Unpin node */ + o2nm_undepend_this_node(); + mlog(ML_HEARTBEAT|ML_KTHREAD, "hb thread exiting\n"); return 0; -- cgit v1.2.2 From d2bf1b6723ed0eab378363649d15b7893bf14e91 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 8 Dec 2010 20:57:36 +0100 Subject: block: move register_disk() and del_gendisk() to block/genhd.c There's no reason for register_disk() and del_gendisk() to be in fs/partitions/check.c. Move both to genhd.c. While at it, collapse unlink_gendisk(), which was artificially in a separate function due to genhd.c / check.c split, into del_gendisk(). Signed-off-by: Tejun Heo Signed-off-by: Jens Axboe --- fs/partitions/check.c | 89 --------------------------------------------------- 1 file changed, 89 deletions(-) (limited to 'fs') diff --git a/fs/partitions/check.c b/fs/partitions/check.c index bdf8d3cc95a4..9a48d65d9855 100644 --- a/fs/partitions/check.c +++ b/fs/partitions/check.c @@ -516,65 +516,6 @@ out_put: return ERR_PTR(err); } -/* Not exported, helper to add_disk(). */ -void register_disk(struct gendisk *disk) -{ - struct device *ddev = disk_to_dev(disk); - struct block_device *bdev; - struct disk_part_iter piter; - struct hd_struct *part; - int err; - - ddev->parent = disk->driverfs_dev; - - dev_set_name(ddev, disk->disk_name); - - /* delay uevents, until we scanned partition table */ - dev_set_uevent_suppress(ddev, 1); - - if (device_add(ddev)) - return; - if (!sysfs_deprecated) { - err = sysfs_create_link(block_depr, &ddev->kobj, - kobject_name(&ddev->kobj)); - if (err) { - device_del(ddev); - return; - } - } - disk->part0.holder_dir = kobject_create_and_add("holders", &ddev->kobj); - disk->slave_dir = kobject_create_and_add("slaves", &ddev->kobj); - - /* No minors to use for partitions */ - if (!disk_partitionable(disk)) - goto exit; - - /* No such device (e.g., media were just removed) */ - if (!get_capacity(disk)) - goto exit; - - bdev = bdget_disk(disk, 0); - if (!bdev) - goto exit; - - bdev->bd_invalidated = 1; - err = blkdev_get(bdev, FMODE_READ, NULL); - if (err < 0) - goto exit; - blkdev_put(bdev, FMODE_READ); - -exit: - /* announce disk after possible partitions are created */ - dev_set_uevent_suppress(ddev, 0); - kobject_uevent(&ddev->kobj, KOBJ_ADD); - - /* announce possible partitions */ - disk_part_iter_init(&piter, disk, 0); - while ((part = disk_part_iter_next(&piter))) - kobject_uevent(&part_to_dev(part)->kobj, KOBJ_ADD); - disk_part_iter_exit(&piter); -} - static bool disk_unlock_native_capacity(struct gendisk *disk) { const struct block_device_operations *bdops = disk->fops; @@ -737,33 +678,3 @@ fail: } EXPORT_SYMBOL(read_dev_sector); - -void del_gendisk(struct gendisk *disk) -{ - struct disk_part_iter piter; - struct hd_struct *part; - - /* invalidate stuff */ - disk_part_iter_init(&piter, disk, - DISK_PITER_INCL_EMPTY | DISK_PITER_REVERSE); - while ((part = disk_part_iter_next(&piter))) { - invalidate_partition(disk, part->partno); - delete_partition(disk, part->partno); - } - disk_part_iter_exit(&piter); - - invalidate_partition(disk, 0); - blk_free_devt(disk_to_dev(disk)->devt); - set_capacity(disk, 0); - disk->flags &= ~GENHD_FL_UP; - unlink_gendisk(disk); - part_stat_set_all(&disk->part0, 0); - disk->part0.stamp = 0; - - kobject_put(disk->part0.holder_dir); - kobject_put(disk->slave_dir); - disk->driverfs_dev = NULL; - if (!sysfs_deprecated) - sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk))); - device_del(disk_to_dev(disk)); -} -- cgit v1.2.2 From 77ea887e433ad8389d416826936c110fa7910f80 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 8 Dec 2010 20:57:37 +0100 Subject: implement in-kernel gendisk events handling Currently, media presence polling for removeable block devices is done from userland. There are several issues with this. * Polling is done by periodically opening the device. For SCSI devices, the command sequence generated by such action involves a few different commands including TEST_UNIT_READY. This behavior, while perfectly legal, is different from Windows which only issues single command, GET_EVENT_STATUS_NOTIFICATION. Unfortunately, some ATAPI devices lock up after being periodically queried such command sequences. * There is no reliable and unintrusive way for a userland program to tell whether the target device is safe for media presence polling. For example, polling for media presence during an on-going burning session can make it fail. The polling program can avoid this by opening the device with O_EXCL but then it risks making a valid exclusive user of the device fail w/ -EBUSY. * Userland polling is unnecessarily heavy and in-kernel implementation is lighter and better coordinated (workqueue, timer slack). This patch implements framework for in-kernel disk event handling, which includes media presence polling. * bdops->check_events() is added, which supercedes ->media_changed(). It should check whether there's any pending event and return if so. Currently, two events are defined - DISK_EVENT_MEDIA_CHANGE and DISK_EVENT_EJECT_REQUEST. ->check_events() is guaranteed not to be called parallelly. * gendisk->events and ->async_events are added. These should be initialized by block driver before passing the device to add_disk(). The former contains the mask of all supported events and the latter the mask of all events which the device can report without polling. /sys/block/*/events[_async] export these to userland. * Kernel parameter block.events_dfl_poll_msecs controls the system polling interval (default is 0 which means disable) and /sys/block/*/events_poll_msecs control polling intervals for individual devices (default is -1 meaning use system setting). Note that if a device can report all supported events asynchronously and its polling interval isn't explicitly set, the device won't be polled regardless of the system polling interval. * If a device is opened exclusively with write access, event checking is automatically disabled until all write exclusive accesses are released. * There are event 'clearing' events. For example, both of currently defined events are cleared after the device has been successfully opened. This information is passed to ->check_events() callback using @clearing argument as a hint. * Event checking is always performed from system_nrt_wq and timer slack is set to 25% for polling. * Nothing changes for drivers which implement ->media_changed() but not ->check_events(). Going forward, all drivers will be converted to ->check_events() and ->media_change() will be dropped. Signed-off-by: Tejun Heo Cc: Kay Sievers Cc: Jan Kara Signed-off-by: Jens Axboe --- fs/block_dev.c | 41 ++++++++++++++++++++++++++++++++++------- 1 file changed, 34 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/block_dev.c b/fs/block_dev.c index c1c1b8c3fb99..6017389711ee 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -948,10 +948,11 @@ int check_disk_change(struct block_device *bdev) { struct gendisk *disk = bdev->bd_disk; const struct block_device_operations *bdops = disk->fops; + unsigned int events; - if (!bdops->media_changed) - return 0; - if (!bdops->media_changed(bdev->bd_disk)) + events = disk_clear_events(disk, DISK_EVENT_MEDIA_CHANGE | + DISK_EVENT_EJECT_REQUEST); + if (!(events & DISK_EVENT_MEDIA_CHANGE)) return 0; flush_disk(bdev); @@ -1158,9 +1159,10 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder) if (whole) { /* finish claiming */ + mutex_lock(&bdev->bd_mutex); spin_lock(&bdev_lock); - if (res == 0) { + if (!res) { BUG_ON(!bd_may_claim(bdev, whole, holder)); /* * Note that for a whole device bd_holders @@ -1180,6 +1182,20 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder) wake_up_bit(&whole->bd_claiming, 0); spin_unlock(&bdev_lock); + + /* + * Block event polling for write claims. Any write + * holder makes the write_holder state stick until all + * are released. This is good enough and tracking + * individual writeable reference is too fragile given + * the way @mode is used in blkdev_get/put(). + */ + if (!res && (mode & FMODE_WRITE) && !bdev->bd_write_holder) { + bdev->bd_write_holder = true; + disk_block_events(bdev->bd_disk); + } + + mutex_unlock(&bdev->bd_mutex); bdput(whole); } @@ -1353,12 +1369,23 @@ int blkdev_put(struct block_device *bdev, fmode_t mode) spin_unlock(&bdev_lock); - /* if this was the last claim, holder link should go too */ - if (bdev_free) + /* + * If this was the last claim, remove holder link and + * unblock evpoll if it was a write holder. + */ + if (bdev_free) { bd_unlink_disk_holder(bdev); + if (bdev->bd_write_holder) { + disk_unblock_events(bdev->bd_disk); + bdev->bd_write_holder = false; + } else + disk_check_events(bdev->bd_disk); + } mutex_unlock(&bdev->bd_mutex); - } + } else + disk_check_events(bdev->bd_disk); + return __blkdev_put(bdev, mode, 0); } EXPORT_SYMBOL(blkdev_put); -- cgit v1.2.2 From 25a0866cc63281b480cc0c11ddeaccb2ffc57dc9 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 14 Dec 2010 14:54:30 +0000 Subject: NFS: Introduce new-style XDR encoding functions for NFSv2 We're interested in taking advantage of the safety benefits of xdr_streams. These data structures allow more careful checking for buffer overflow while encoding. More careful type checking is also introduced in the new functions. For efficiency, we also eventually want to be able to pass xdr_streams from call_encode() to all XDR encoding functions, rather than building an xdr_stream in every XDR encoding function in the kernel. To do this means all encoders must be ready to handle a passed-in xdr_stream. The new encoders follow the modern paradigm for XDR encoders: BUG on any error, and always return a zero status code. Static helper functions are left without the "inline" directive. This allows the compiler to choose automatically how to optimize these for size or speed. Signed-off-by: Chuck Lever Tested-by: J. Bruce Fields Signed-off-by: Trond Myklebust --- fs/nfs/nfs2xdr.c | 406 ++++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 403 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c index 5914a1911c95..869e2151a2b1 100644 --- a/fs/nfs/nfs2xdr.c +++ b/fs/nfs/nfs2xdr.c @@ -61,6 +61,23 @@ #define NFS_readdirres_sz (1) #define NFS_statfsres_sz (1+NFS_info_sz) + +/* + * While encoding arguments, set up the reply buffer in advance to + * receive reply data directly into the page cache. + */ +static void prepare_reply_buffer(struct rpc_rqst *req, struct page **pages, + unsigned int base, unsigned int len, + unsigned int bufsize) +{ + struct rpc_auth *auth = req->rq_cred->cr_auth; + unsigned int replen; + + replen = RPC_REPHDRSIZE + auth->au_rslack + bufsize; + xdr_inline_pages(&req->rq_rcv_buf, replen << 2, pages, base, len); +} + + /* * Common NFS XDR functions as inlines */ @@ -81,7 +98,7 @@ xdr_decode_fhandle(__be32 *p, struct nfs_fh *fhandle) } static inline __be32* -xdr_encode_time(__be32 *p, struct timespec *timep) +xdr_encode_time(__be32 *p, const struct timespec *timep) { *p++ = htonl(timep->tv_sec); /* Convert nanoseconds into microseconds */ @@ -90,7 +107,7 @@ xdr_encode_time(__be32 *p, struct timespec *timep) } static inline __be32* -xdr_encode_current_server_time(__be32 *p, struct timespec *timep) +xdr_encode_current_server_time(__be32 *p, const struct timespec *timep) { /* * Passing the invalid value useconds=1000000 is a @@ -173,6 +190,136 @@ xdr_encode_sattr(__be32 *p, struct iattr *attr) return p; } +/* + * Encode/decode NFSv2 basic data types + * + * Basic NFSv2 data types are defined in section 2.3 of RFC 1094: + * "NFS: Network File System Protocol Specification". + * + * Not all basic data types have their own encoding and decoding + * functions. For run-time efficiency, some data types are encoded + * or decoded inline. + */ + +/* + * 2.3.3. fhandle + * + * typedef opaque fhandle[FHSIZE]; + */ +static void encode_fhandle(struct xdr_stream *xdr, const struct nfs_fh *fh) +{ + __be32 *p; + + BUG_ON(fh->size != NFS2_FHSIZE); + p = xdr_reserve_space(xdr, NFS2_FHSIZE); + memcpy(p, fh->data, NFS2_FHSIZE); +} + +/* + * 2.3.6. sattr + * + * struct sattr { + * unsigned int mode; + * unsigned int uid; + * unsigned int gid; + * unsigned int size; + * timeval atime; + * timeval mtime; + * }; + */ + +#define NFS2_SATTR_NOT_SET (0xffffffff) + +static __be32 *xdr_time_not_set(__be32 *p) +{ + *p++ = cpu_to_be32(NFS2_SATTR_NOT_SET); + *p++ = cpu_to_be32(NFS2_SATTR_NOT_SET); + return p; +} + +static void encode_sattr(struct xdr_stream *xdr, const struct iattr *attr) +{ + __be32 *p; + + p = xdr_reserve_space(xdr, NFS_sattr_sz << 2); + + if (attr->ia_valid & ATTR_MODE) + *p++ = cpu_to_be32(attr->ia_mode); + else + *p++ = cpu_to_be32(NFS2_SATTR_NOT_SET); + if (attr->ia_valid & ATTR_UID) + *p++ = cpu_to_be32(attr->ia_uid); + else + *p++ = cpu_to_be32(NFS2_SATTR_NOT_SET); + if (attr->ia_valid & ATTR_GID) + *p++ = cpu_to_be32(attr->ia_gid); + else + *p++ = cpu_to_be32(NFS2_SATTR_NOT_SET); + if (attr->ia_valid & ATTR_SIZE) + *p++ = cpu_to_be32((u32)attr->ia_size); + else + *p++ = cpu_to_be32(NFS2_SATTR_NOT_SET); + + if (attr->ia_valid & ATTR_ATIME_SET) + p = xdr_encode_time(p, &attr->ia_atime); + else if (attr->ia_valid & ATTR_ATIME) + p = xdr_encode_current_server_time(p, &attr->ia_atime); + else + p = xdr_time_not_set(p); + if (attr->ia_valid & ATTR_MTIME_SET) + xdr_encode_time(p, &attr->ia_mtime); + else if (attr->ia_valid & ATTR_MTIME) + xdr_encode_current_server_time(p, &attr->ia_mtime); + else + xdr_time_not_set(p); +} + +/* + * 2.3.7. filename + * + * typedef string filename; + */ +static void encode_filename(struct xdr_stream *xdr, + const char *name, u32 length) +{ + __be32 *p; + + BUG_ON(length > NFS2_MAXNAMLEN); + p = xdr_reserve_space(xdr, 4 + length); + xdr_encode_opaque(p, name, length); +} + +/* + * 2.3.8. path + * + * typedef string path; + */ +static void encode_path(struct xdr_stream *xdr, struct page **pages, u32 length) +{ + __be32 *p; + + BUG_ON(length > NFS2_MAXPATHLEN); + p = xdr_reserve_space(xdr, 4); + *p = cpu_to_be32(length); + xdr_write_pages(xdr, pages, 0, length); +} + +/* + * 2.3.10. diropargs + * + * struct diropargs { + * fhandle dir; + * filename name; + * }; + */ +static void encode_diropargs(struct xdr_stream *xdr, const struct nfs_fh *fh, + const char *name, u32 length) +{ + encode_fhandle(xdr, fh); + encode_filename(xdr, name, length); +} + + /* * NFS encode functions */ @@ -188,6 +335,16 @@ nfs_xdr_fhandle(struct rpc_rqst *req, __be32 *p, struct nfs_fh *fh) return 0; } +static int nfs2_xdr_enc_fhandle(struct rpc_rqst *req, __be32 *p, + const struct nfs_fh *fh) +{ + struct xdr_stream xdr; + + xdr_init_encode(&xdr, &req->rq_snd_buf, p); + encode_fhandle(&xdr, fh); + return 0; +} + /* * Encode SETATTR arguments */ @@ -200,6 +357,25 @@ nfs_xdr_sattrargs(struct rpc_rqst *req, __be32 *p, struct nfs_sattrargs *args) return 0; } +/* + * 2.2.3. sattrargs + * + * struct sattrargs { + * fhandle file; + * sattr attributes; + * }; + */ +static int nfs2_xdr_enc_sattrargs(struct rpc_rqst *req, __be32 *p, + const struct nfs_sattrargs *args) +{ + struct xdr_stream xdr; + + xdr_init_encode(&xdr, &req->rq_snd_buf, p); + encode_fhandle(&xdr, args->fh); + encode_sattr(&xdr, args->sattr); + return 0; +} + /* * Encode directory ops argument * LOOKUP, RMDIR @@ -213,6 +389,16 @@ nfs_xdr_diropargs(struct rpc_rqst *req, __be32 *p, struct nfs_diropargs *args) return 0; } +static int nfs2_xdr_enc_diropargs(struct rpc_rqst *req, __be32 *p, + const struct nfs_diropargs *args) +{ + struct xdr_stream xdr; + + xdr_init_encode(&xdr, &req->rq_snd_buf, p); + encode_diropargs(&xdr, args->fh, args->name, args->len); + return 0; +} + /* * Encode REMOVE argument */ @@ -225,6 +411,18 @@ nfs_xdr_removeargs(struct rpc_rqst *req, __be32 *p, const struct nfs_removeargs return 0; } +static int nfs2_xdr_enc_readlinkargs(struct rpc_rqst *req, __be32 *p, + const struct nfs_readlinkargs *args) +{ + struct xdr_stream xdr; + + xdr_init_encode(&xdr, &req->rq_snd_buf, p); + encode_fhandle(&xdr, args->fh); + prepare_reply_buffer(req, args->pages, args->pgbase, + args->pglen, NFS_readlinkres_sz); + return 0; +} + /* * Arguments to a READ call. Since we read data directly into the page * cache, we also set up the reply iovec here so that iov[1] points @@ -252,6 +450,44 @@ nfs_xdr_readargs(struct rpc_rqst *req, __be32 *p, struct nfs_readargs *args) return 0; } +/* + * 2.2.7. readargs + * + * struct readargs { + * fhandle file; + * unsigned offset; + * unsigned count; + * unsigned totalcount; + * }; + */ +static void encode_readargs(struct xdr_stream *xdr, + const struct nfs_readargs *args) +{ + u32 offset = args->offset; + u32 count = args->count; + __be32 *p; + + encode_fhandle(xdr, args->fh); + + p = xdr_reserve_space(xdr, 4 + 4 + 4); + *p++ = cpu_to_be32(offset); + *p++ = cpu_to_be32(count); + *p = cpu_to_be32(count); +} + +static int nfs2_xdr_enc_readargs(struct rpc_rqst *req, __be32 *p, + const struct nfs_readargs *args) +{ + struct xdr_stream xdr; + + xdr_init_encode(&xdr, &req->rq_snd_buf, p); + encode_readargs(&xdr, args); + prepare_reply_buffer(req, args->pages, args->pgbase, + args->count, NFS_readres_sz); + req->rq_rcv_buf.flags |= XDRBUF_READ; + return 0; +} + /* * Decode READ reply */ @@ -317,6 +553,47 @@ nfs_xdr_writeargs(struct rpc_rqst *req, __be32 *p, struct nfs_writeargs *args) return 0; } +/* + * 2.2.9. writeargs + * + * struct writeargs { + * fhandle file; + * unsigned beginoffset; + * unsigned offset; + * unsigned totalcount; + * nfsdata data; + * }; + */ +static void encode_writeargs(struct xdr_stream *xdr, + const struct nfs_writeargs *args) +{ + u32 offset = args->offset; + u32 count = args->count; + __be32 *p; + + encode_fhandle(xdr, args->fh); + + p = xdr_reserve_space(xdr, 4 + 4 + 4 + 4); + *p++ = cpu_to_be32(offset); + *p++ = cpu_to_be32(offset); + *p++ = cpu_to_be32(count); + + /* nfsdata */ + *p = cpu_to_be32(count); + xdr_write_pages(xdr, args->pages, args->pgbase, count); +} + +static int nfs2_xdr_enc_writeargs(struct rpc_rqst *req, __be32 *p, + const struct nfs_writeargs *args) +{ + struct xdr_stream xdr; + + xdr_init_encode(&xdr, &req->rq_snd_buf, p); + encode_writeargs(&xdr, args); + xdr.buf->flags |= XDRBUF_WRITE; + return 0; +} + /* * Encode create arguments * CREATE, MKDIR @@ -331,6 +608,35 @@ nfs_xdr_createargs(struct rpc_rqst *req, __be32 *p, struct nfs_createargs *args) return 0; } +/* + * 2.2.10. createargs + * + * struct createargs { + * diropargs where; + * sattr attributes; + * }; + */ +static int nfs2_xdr_enc_createargs(struct rpc_rqst *req, __be32 *p, + const struct nfs_createargs *args) +{ + struct xdr_stream xdr; + + xdr_init_encode(&xdr, &req->rq_snd_buf, p); + encode_diropargs(&xdr, args->fh, args->name, args->len); + encode_sattr(&xdr, args->sattr); + return 0; +} + +static int nfs2_xdr_enc_removeargs(struct rpc_rqst *req, __be32 *p, + const struct nfs_removeargs *args) +{ + struct xdr_stream xdr; + + xdr_init_encode(&xdr, &req->rq_snd_buf, p); + encode_diropargs(&xdr, args->fh, args->name.name, args->name.len); + return 0; +} + /* * Encode RENAME arguments */ @@ -345,6 +651,27 @@ nfs_xdr_renameargs(struct rpc_rqst *req, __be32 *p, struct nfs_renameargs *args) return 0; } +/* + * 2.2.12. renameargs + * + * struct renameargs { + * diropargs from; + * diropargs to; + * }; + */ +static int nfs2_xdr_enc_renameargs(struct rpc_rqst *req, __be32 *p, + const struct nfs_renameargs *args) +{ + const struct qstr *old = args->old_name; + const struct qstr *new = args->new_name; + struct xdr_stream xdr; + + xdr_init_encode(&xdr, &req->rq_snd_buf, p); + encode_diropargs(&xdr, args->old_dir, old->name, old->len); + encode_diropargs(&xdr, args->new_dir, new->name, new->len); + return 0; +} + /* * Encode LINK arguments */ @@ -358,6 +685,25 @@ nfs_xdr_linkargs(struct rpc_rqst *req, __be32 *p, struct nfs_linkargs *args) return 0; } +/* + * 2.2.13. linkargs + * + * struct linkargs { + * fhandle from; + * diropargs to; + * }; + */ +static int nfs2_xdr_enc_linkargs(struct rpc_rqst *req, __be32 *p, + const struct nfs_linkargs *args) +{ + struct xdr_stream xdr; + + xdr_init_encode(&xdr, &req->rq_snd_buf, p); + encode_fhandle(&xdr, args->fromfh); + encode_diropargs(&xdr, args->tofh, args->toname, args->tolen); + return 0; +} + /* * Encode SYMLINK arguments */ @@ -387,6 +733,27 @@ nfs_xdr_symlinkargs(struct rpc_rqst *req, __be32 *p, struct nfs_symlinkargs *arg return 0; } +/* + * 2.2.14. symlinkargs + * + * struct symlinkargs { + * diropargs from; + * path to; + * sattr attributes; + * }; + */ +static int nfs2_xdr_enc_symlinkargs(struct rpc_rqst *req, __be32 *p, + const struct nfs_symlinkargs *args) +{ + struct xdr_stream xdr; + + xdr_init_encode(&xdr, &req->rq_snd_buf, p); + encode_diropargs(&xdr, args->fromfh, args->fromname, args->fromlen); + encode_path(&xdr, args->pages, args->pathlen); + encode_sattr(&xdr, args->sattr); + return 0; +} + /* * Encode arguments to readdir call */ @@ -408,6 +775,39 @@ nfs_xdr_readdirargs(struct rpc_rqst *req, __be32 *p, struct nfs_readdirargs *arg return 0; } +/* + * 2.2.17. readdirargs + * + * struct readdirargs { + * fhandle dir; + * nfscookie cookie; + * unsigned count; + * }; + */ +static void encode_readdirargs(struct xdr_stream *xdr, + const struct nfs_readdirargs *args) +{ + __be32 *p; + + encode_fhandle(xdr, args->fh); + + p = xdr_reserve_space(xdr, 4 + 4); + *p++ = cpu_to_be32(args->cookie); + *p = cpu_to_be32(args->count); +} + +static int nfs2_xdr_enc_readdirargs(struct rpc_rqst *req, __be32 *p, + const struct nfs_readdirargs *args) +{ + struct xdr_stream xdr; + + xdr_init_encode(&xdr, &req->rq_snd_buf, p); + encode_readdirargs(&xdr, args); + prepare_reply_buffer(req, args->pages, 0, + args->count, NFS_readdirres_sz); + return 0; +} + /* * Decode the result of a readdir call. * We're not really decoding anymore, we just leave the buffer untouched @@ -698,7 +1098,7 @@ nfs_stat_to_errno(int stat) #define PROC(proc, argtype, restype, timer) \ [NFSPROC_##proc] = { \ .p_proc = NFSPROC_##proc, \ - .p_encode = (kxdrproc_t) nfs_xdr_##argtype, \ + .p_encode = (kxdrproc_t)nfs2_xdr_enc_##argtype, \ .p_decode = (kxdrproc_t) nfs_xdr_##restype, \ .p_arglen = NFS_##argtype##_sz, \ .p_replen = NFS_##restype##_sz, \ -- cgit v1.2.2 From 2d70f533eab0a0cabd05ee878b6709707bf63c86 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 14 Dec 2010 14:54:40 +0000 Subject: NFS: Remove old NFSv2 encoder functions Clean up: Remove unused legacy argument encoder functions, and any now unused encoder helper functions. Signed-off-by: Chuck Lever Tested-by: J. Bruce Fields Signed-off-by: Trond Myklebust --- fs/nfs/nfs2xdr.c | 249 +------------------------------------------------------ 1 file changed, 4 insertions(+), 245 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c index 869e2151a2b1..f5ea9dcf08d6 100644 --- a/fs/nfs/nfs2xdr.c +++ b/fs/nfs/nfs2xdr.c @@ -81,13 +81,6 @@ static void prepare_reply_buffer(struct rpc_rqst *req, struct page **pages, /* * Common NFS XDR functions as inlines */ -static inline __be32 * -xdr_encode_fhandle(__be32 *p, const struct nfs_fh *fhandle) -{ - memcpy(p, fhandle->data, NFS2_FHSIZE); - return p + XDR_QUADLEN(NFS2_FHSIZE); -} - static inline __be32 * xdr_decode_fhandle(__be32 *p, struct nfs_fh *fhandle) { @@ -160,36 +153,6 @@ xdr_decode_fattr(__be32 *p, struct nfs_fattr *fattr) return p; } -static inline __be32 * -xdr_encode_sattr(__be32 *p, struct iattr *attr) -{ - const __be32 not_set = __constant_htonl(0xFFFFFFFF); - - *p++ = (attr->ia_valid & ATTR_MODE) ? htonl(attr->ia_mode) : not_set; - *p++ = (attr->ia_valid & ATTR_UID) ? htonl(attr->ia_uid) : not_set; - *p++ = (attr->ia_valid & ATTR_GID) ? htonl(attr->ia_gid) : not_set; - *p++ = (attr->ia_valid & ATTR_SIZE) ? htonl(attr->ia_size) : not_set; - - if (attr->ia_valid & ATTR_ATIME_SET) { - p = xdr_encode_time(p, &attr->ia_atime); - } else if (attr->ia_valid & ATTR_ATIME) { - p = xdr_encode_current_server_time(p, &attr->ia_atime); - } else { - *p++ = not_set; - *p++ = not_set; - } - - if (attr->ia_valid & ATTR_MTIME_SET) { - p = xdr_encode_time(p, &attr->ia_mtime); - } else if (attr->ia_valid & ATTR_MTIME) { - p = xdr_encode_current_server_time(p, &attr->ia_mtime); - } else { - *p++ = not_set; - *p++ = not_set; - } - return p; -} - /* * Encode/decode NFSv2 basic data types * @@ -321,19 +284,11 @@ static void encode_diropargs(struct xdr_stream *xdr, const struct nfs_fh *fh, /* - * NFS encode functions - */ -/* - * Encode file handle argument - * GETATTR, READLINK, STATFS + * NFSv2 XDR encode functions + * + * NFSv2 argument types are defined in section 2.2 of RFC 1094: + * "NFS: Network File System Protocol Specification". */ -static int -nfs_xdr_fhandle(struct rpc_rqst *req, __be32 *p, struct nfs_fh *fh) -{ - p = xdr_encode_fhandle(p, fh); - req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); - return 0; -} static int nfs2_xdr_enc_fhandle(struct rpc_rqst *req, __be32 *p, const struct nfs_fh *fh) @@ -345,18 +300,6 @@ static int nfs2_xdr_enc_fhandle(struct rpc_rqst *req, __be32 *p, return 0; } -/* - * Encode SETATTR arguments - */ -static int -nfs_xdr_sattrargs(struct rpc_rqst *req, __be32 *p, struct nfs_sattrargs *args) -{ - p = xdr_encode_fhandle(p, args->fh); - p = xdr_encode_sattr(p, args->sattr); - req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); - return 0; -} - /* * 2.2.3. sattrargs * @@ -376,19 +319,6 @@ static int nfs2_xdr_enc_sattrargs(struct rpc_rqst *req, __be32 *p, return 0; } -/* - * Encode directory ops argument - * LOOKUP, RMDIR - */ -static int -nfs_xdr_diropargs(struct rpc_rqst *req, __be32 *p, struct nfs_diropargs *args) -{ - p = xdr_encode_fhandle(p, args->fh); - p = xdr_encode_array(p, args->name, args->len); - req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); - return 0; -} - static int nfs2_xdr_enc_diropargs(struct rpc_rqst *req, __be32 *p, const struct nfs_diropargs *args) { @@ -399,18 +329,6 @@ static int nfs2_xdr_enc_diropargs(struct rpc_rqst *req, __be32 *p, return 0; } -/* - * Encode REMOVE argument - */ -static int -nfs_xdr_removeargs(struct rpc_rqst *req, __be32 *p, const struct nfs_removeargs *args) -{ - p = xdr_encode_fhandle(p, args->fh); - p = xdr_encode_array(p, args->name.name, args->name.len); - req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); - return 0; -} - static int nfs2_xdr_enc_readlinkargs(struct rpc_rqst *req, __be32 *p, const struct nfs_readlinkargs *args) { @@ -423,33 +341,6 @@ static int nfs2_xdr_enc_readlinkargs(struct rpc_rqst *req, __be32 *p, return 0; } -/* - * Arguments to a READ call. Since we read data directly into the page - * cache, we also set up the reply iovec here so that iov[1] points - * exactly to the page we want to fetch. - */ -static int -nfs_xdr_readargs(struct rpc_rqst *req, __be32 *p, struct nfs_readargs *args) -{ - struct rpc_auth *auth = req->rq_cred->cr_auth; - unsigned int replen; - u32 offset = (u32)args->offset; - u32 count = args->count; - - p = xdr_encode_fhandle(p, args->fh); - *p++ = htonl(offset); - *p++ = htonl(count); - *p++ = htonl(count); - req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); - - /* Inline the page array */ - replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS_readres_sz) << 2; - xdr_inline_pages(&req->rq_rcv_buf, replen, - args->pages, args->pgbase, count); - req->rq_rcv_buf.flags |= XDRBUF_READ; - return 0; -} - /* * 2.2.7. readargs * @@ -530,29 +421,6 @@ nfs_xdr_readres(struct rpc_rqst *req, __be32 *p, struct nfs_readres *res) } -/* - * Write arguments. Splice the buffer to be written into the iovec. - */ -static int -nfs_xdr_writeargs(struct rpc_rqst *req, __be32 *p, struct nfs_writeargs *args) -{ - struct xdr_buf *sndbuf = &req->rq_snd_buf; - u32 offset = (u32)args->offset; - u32 count = args->count; - - p = xdr_encode_fhandle(p, args->fh); - *p++ = htonl(offset); - *p++ = htonl(offset); - *p++ = htonl(count); - *p++ = htonl(count); - sndbuf->len = xdr_adjust_iovec(sndbuf->head, p); - - /* Copy the page array */ - xdr_encode_pages(sndbuf, args->pages, args->pgbase, count); - sndbuf->flags |= XDRBUF_WRITE; - return 0; -} - /* * 2.2.9. writeargs * @@ -594,20 +462,6 @@ static int nfs2_xdr_enc_writeargs(struct rpc_rqst *req, __be32 *p, return 0; } -/* - * Encode create arguments - * CREATE, MKDIR - */ -static int -nfs_xdr_createargs(struct rpc_rqst *req, __be32 *p, struct nfs_createargs *args) -{ - p = xdr_encode_fhandle(p, args->fh); - p = xdr_encode_array(p, args->name, args->len); - p = xdr_encode_sattr(p, args->sattr); - req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); - return 0; -} - /* * 2.2.10. createargs * @@ -637,20 +491,6 @@ static int nfs2_xdr_enc_removeargs(struct rpc_rqst *req, __be32 *p, return 0; } -/* - * Encode RENAME arguments - */ -static int -nfs_xdr_renameargs(struct rpc_rqst *req, __be32 *p, struct nfs_renameargs *args) -{ - p = xdr_encode_fhandle(p, args->old_dir); - p = xdr_encode_array(p, args->old_name->name, args->old_name->len); - p = xdr_encode_fhandle(p, args->new_dir); - p = xdr_encode_array(p, args->new_name->name, args->new_name->len); - req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); - return 0; -} - /* * 2.2.12. renameargs * @@ -672,19 +512,6 @@ static int nfs2_xdr_enc_renameargs(struct rpc_rqst *req, __be32 *p, return 0; } -/* - * Encode LINK arguments - */ -static int -nfs_xdr_linkargs(struct rpc_rqst *req, __be32 *p, struct nfs_linkargs *args) -{ - p = xdr_encode_fhandle(p, args->fromfh); - p = xdr_encode_fhandle(p, args->tofh); - p = xdr_encode_array(p, args->toname, args->tolen); - req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); - return 0; -} - /* * 2.2.13. linkargs * @@ -704,35 +531,6 @@ static int nfs2_xdr_enc_linkargs(struct rpc_rqst *req, __be32 *p, return 0; } -/* - * Encode SYMLINK arguments - */ -static int -nfs_xdr_symlinkargs(struct rpc_rqst *req, __be32 *p, struct nfs_symlinkargs *args) -{ - struct xdr_buf *sndbuf = &req->rq_snd_buf; - size_t pad; - - p = xdr_encode_fhandle(p, args->fromfh); - p = xdr_encode_array(p, args->fromname, args->fromlen); - *p++ = htonl(args->pathlen); - sndbuf->len = xdr_adjust_iovec(sndbuf->head, p); - - xdr_encode_pages(sndbuf, args->pages, 0, args->pathlen); - - /* - * xdr_encode_pages may have added a few bytes to ensure the - * pathname ends on a 4-byte boundary. Start encoding the - * attributes after the pad bytes. - */ - pad = sndbuf->tail->iov_len; - if (pad > 0) - p++; - p = xdr_encode_sattr(p, args->sattr); - sndbuf->len += xdr_adjust_iovec(sndbuf->tail, p) - pad; - return 0; -} - /* * 2.2.14. symlinkargs * @@ -754,27 +552,6 @@ static int nfs2_xdr_enc_symlinkargs(struct rpc_rqst *req, __be32 *p, return 0; } -/* - * Encode arguments to readdir call - */ -static int -nfs_xdr_readdirargs(struct rpc_rqst *req, __be32 *p, struct nfs_readdirargs *args) -{ - struct rpc_auth *auth = req->rq_cred->cr_auth; - unsigned int replen; - u32 count = args->count; - - p = xdr_encode_fhandle(p, args->fh); - *p++ = htonl(args->cookie); - *p++ = htonl(count); /* see above */ - req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); - - /* Inline the page array */ - replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS_readdirres_sz) << 2; - xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages, 0, count); - return 0; -} - /* * 2.2.17. readdirargs * @@ -947,24 +724,6 @@ nfs_xdr_diropres(struct rpc_rqst *req, __be32 *p, struct nfs_diropok *res) return 0; } -/* - * Encode READLINK args - */ -static int -nfs_xdr_readlinkargs(struct rpc_rqst *req, __be32 *p, struct nfs_readlinkargs *args) -{ - struct rpc_auth *auth = req->rq_cred->cr_auth; - unsigned int replen; - - p = xdr_encode_fhandle(p, args->fh); - req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); - - /* Inline the page array */ - replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS_readlinkres_sz) << 2; - xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages, args->pgbase, args->pglen); - return 0; -} - /* * Decode READLINK reply */ -- cgit v1.2.2 From 282ac2a573dd1be4230710932cd471ed5a3a94b8 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 14 Dec 2010 14:54:50 +0000 Subject: NFS: Update xdr_encode_foo() functions that we're keeping Clean up. The new helper functions are kept in order by section of RFC 1094. Move the two timestamp encoders we're keeping, update their coding style, and refresh their documenting comments. Signed-off-by: Chuck Lever Tested-by: J. Bruce Fields Signed-off-by: Trond Myklebust --- fs/nfs/nfs2xdr.c | 59 +++++++++++++++++++++++++++++++------------------------- 1 file changed, 33 insertions(+), 26 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c index f5ea9dcf08d6..c79977304af8 100644 --- a/fs/nfs/nfs2xdr.c +++ b/fs/nfs/nfs2xdr.c @@ -90,32 +90,6 @@ xdr_decode_fhandle(__be32 *p, struct nfs_fh *fhandle) return p + XDR_QUADLEN(NFS2_FHSIZE); } -static inline __be32* -xdr_encode_time(__be32 *p, const struct timespec *timep) -{ - *p++ = htonl(timep->tv_sec); - /* Convert nanoseconds into microseconds */ - *p++ = htonl(timep->tv_nsec ? timep->tv_nsec / 1000 : 0); - return p; -} - -static inline __be32* -xdr_encode_current_server_time(__be32 *p, const struct timespec *timep) -{ - /* - * Passing the invalid value useconds=1000000 is a - * Sun convention for "set to current server time". - * It's needed to make permissions checks for the - * "touch" program across v2 mounts to Solaris and - * Irix boxes work correctly. See description of - * sattr in section 6.1 of "NFS Illustrated" by - * Brent Callaghan, Addison-Wesley, ISBN 0-201-32750-5 - */ - *p++ = htonl(timep->tv_sec); - *p++ = htonl(1000000); - return p; -} - static inline __be32* xdr_decode_time(__be32 *p, struct timespec *timep) { @@ -178,6 +152,39 @@ static void encode_fhandle(struct xdr_stream *xdr, const struct nfs_fh *fh) memcpy(p, fh->data, NFS2_FHSIZE); } +/* + * 2.3.4. timeval + * + * struct timeval { + * unsigned int seconds; + * unsigned int useconds; + * }; + */ +static __be32 *xdr_encode_time(__be32 *p, const struct timespec *timep) +{ + *p++ = cpu_to_be32(timep->tv_sec); + if (timep->tv_nsec != 0) + *p++ = cpu_to_be32(timep->tv_nsec / NSEC_PER_USEC); + else + *p++ = cpu_to_be32(0); + return p; +} + +/* + * Passing the invalid value useconds=1000000 is a Sun convention for + * "set to current server time". It's needed to make permissions checks + * for the "touch" program across v2 mounts to Solaris and Irix servers + * work correctly. See description of sattr in section 6.1 of "NFS + * Illustrated" by Brent Callaghan, Addison-Wesley, ISBN 0-201-32750-5. + */ +static __be32 *xdr_encode_current_server_time(__be32 *p, + const struct timespec *timep) +{ + *p++ = cpu_to_be32(timep->tv_sec); + *p++ = cpu_to_be32(1000000); + return p; +} + /* * 2.3.6. sattr * -- cgit v1.2.2 From 858284932462cec260f3d1d7426aeb03f5dbc2ad Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 14 Dec 2010 14:55:00 +0000 Subject: NFS: Use the "nfs_stat" enum for nfs_stat_to_errno()'s argument Clean up. To distinguish more clearly between the on-the-wire NFSERR_ value and our local errno values, use the proper type for the argument of nfs_stat_to_errno(). Add a documenting comment appropriate for a global function shared outside this source file. Signed-off-by: Chuck Lever Tested-by: J. Bruce Fields Signed-off-by: Trond Myklebust --- fs/nfs/internal.h | 2 +- fs/nfs/nfs2xdr.c | 18 ++++++++++-------- 2 files changed, 11 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index e6356b750b77..8c2d9d83771e 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -185,7 +185,7 @@ extern int __init nfs_init_directcache(void); extern void nfs_destroy_directcache(void); /* nfs2xdr.c */ -extern int nfs_stat_to_errno(int); +extern int nfs_stat_to_errno(enum nfs_stat); extern struct rpc_procinfo nfs_procedures[]; extern __be32 *nfs_decode_dirent(struct xdr_stream *, struct nfs_entry *, struct nfs_server *, int); diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c index c79977304af8..2da9824d432a 100644 --- a/fs/nfs/nfs2xdr.c +++ b/fs/nfs/nfs2xdr.c @@ -804,7 +804,7 @@ nfs_xdr_statfsres(struct rpc_rqst *req, __be32 *p, struct nfs2_fsstat *res) * We need to translate between nfs status return values and * the local errno values which may not be the same. */ -static struct { +static const struct { int stat; int errno; } nfs_errtbl[] = { @@ -844,20 +844,22 @@ static struct { { -1, -EIO } }; -/* - * Convert an NFS error code to a local one. - * This one is used jointly by NFSv2 and NFSv3. +/** + * nfs_stat_to_errno - convert an NFS status code to a local errno + * @status: NFS status code to convert + * + * Returns a local errno value, or -EIO if the NFS status code is + * not recognized. This function is used jointly by NFSv2 and NFSv3. */ -int -nfs_stat_to_errno(int stat) +int nfs_stat_to_errno(enum nfs_stat status) { int i; for (i = 0; nfs_errtbl[i].stat != -1; i++) { - if (nfs_errtbl[i].stat == stat) + if (nfs_errtbl[i].stat == (int)status) return nfs_errtbl[i].errno; } - dprintk("nfs_stat_to_errno: bad nfs status return value: %d\n", stat); + dprintk("NFS: Unrecognized nfs status value: %u\n", status); return nfs_errtbl[i].errno; } -- cgit v1.2.2 From f796f8b3ae292abb9cb2931e8db6fc1d69bba09d Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 14 Dec 2010 14:55:10 +0000 Subject: NFS: Introduce new-style XDR decoding functions for NFSv2 We'd like to prevent local buffer overflows caused by malicious or broken servers. New xdr_stream style decoders can do that. For efficiency, we also eventually want to be able to pass xdr_streams from call_decode() to all XDR decoding functions, rather than building an xdr_stream in every XDR decoding function in the kernel. nfs_decode_dirent() is renamed to follow the naming convention of the other two dirent decoders. Static helper functions are left without the "inline" directive. This allows the compiler to choose automatically how to optimize these for size or speed. Signed-off-by: Chuck Lever Tested-by: J. Bruce Fields Signed-off-by: Trond Myklebust --- fs/nfs/internal.h | 2 +- fs/nfs/nfs2xdr.c | 564 +++++++++++++++++++++++++++++++++++++++++++++++++++++- fs/nfs/proc.c | 2 +- 3 files changed, 558 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 8c2d9d83771e..6c6a9955bae9 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -187,7 +187,7 @@ extern void nfs_destroy_directcache(void); /* nfs2xdr.c */ extern int nfs_stat_to_errno(enum nfs_stat); extern struct rpc_procinfo nfs_procedures[]; -extern __be32 *nfs_decode_dirent(struct xdr_stream *, struct nfs_entry *, struct nfs_server *, int); +extern __be32 *nfs2_decode_dirent(struct xdr_stream *, struct nfs_entry *, struct nfs_server *, int); /* nfs3xdr.c */ extern struct rpc_procinfo nfs3_procedures[]; diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c index 2da9824d432a..827d1b8ad55b 100644 --- a/fs/nfs/nfs2xdr.c +++ b/fs/nfs/nfs2xdr.c @@ -77,6 +77,16 @@ static void prepare_reply_buffer(struct rpc_rqst *req, struct page **pages, xdr_inline_pages(&req->rq_rcv_buf, replen << 2, pages, base, len); } +/* + * Handle decode buffer overflows out-of-line. + */ +static void print_overflow_msg(const char *func, const struct xdr_stream *xdr) +{ + dprintk("NFS: %s prematurely hit the end of our receive buffer. " + "Remaining buffer length is %tu words.\n", + func, xdr->end - xdr->p); +} + /* * Common NFS XDR functions as inlines @@ -138,6 +148,74 @@ xdr_decode_fattr(__be32 *p, struct nfs_fattr *fattr) * or decoded inline. */ +/* + * typedef opaque nfsdata<>; + */ +static int decode_nfsdata(struct xdr_stream *xdr, struct nfs_readres *result) +{ + u32 recvd, count; + size_t hdrlen; + __be32 *p; + + p = xdr_inline_decode(xdr, 4); + if (unlikely(p == NULL)) + goto out_overflow; + count = be32_to_cpup(p); + hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base; + recvd = xdr->buf->len - hdrlen; + if (unlikely(count > recvd)) + goto out_cheating; +out: + xdr_read_pages(xdr, count); + result->eof = 0; /* NFSv2 does not pass EOF flag on the wire. */ + result->count = count; + return count; +out_cheating: + dprintk("NFS: server cheating in read result: " + "count %u > recvd %u\n", count, recvd); + count = recvd; + goto out; +out_overflow: + print_overflow_msg(__func__, xdr); + return -EIO; +} + +/* + * enum stat { + * NFS_OK = 0, + * NFSERR_PERM = 1, + * NFSERR_NOENT = 2, + * NFSERR_IO = 5, + * NFSERR_NXIO = 6, + * NFSERR_ACCES = 13, + * NFSERR_EXIST = 17, + * NFSERR_NODEV = 19, + * NFSERR_NOTDIR = 20, + * NFSERR_ISDIR = 21, + * NFSERR_FBIG = 27, + * NFSERR_NOSPC = 28, + * NFSERR_ROFS = 30, + * NFSERR_NAMETOOLONG = 63, + * NFSERR_NOTEMPTY = 66, + * NFSERR_DQUOT = 69, + * NFSERR_STALE = 70, + * NFSERR_WFLUSH = 99 + * }; + */ +static int decode_stat(struct xdr_stream *xdr, enum nfs_stat *status) +{ + __be32 *p; + + p = xdr_inline_decode(xdr, 4); + if (unlikely(p == NULL)) + goto out_overflow; + *status = be32_to_cpup(p); + return 0; +out_overflow: + print_overflow_msg(__func__, xdr); + return -EIO; +} + /* * 2.3.3. fhandle * @@ -152,6 +230,21 @@ static void encode_fhandle(struct xdr_stream *xdr, const struct nfs_fh *fh) memcpy(p, fh->data, NFS2_FHSIZE); } +static int decode_fhandle(struct xdr_stream *xdr, struct nfs_fh *fh) +{ + __be32 *p; + + p = xdr_inline_decode(xdr, NFS2_FHSIZE); + if (unlikely(p == NULL)) + goto out_overflow; + fh->size = NFS2_FHSIZE; + memcpy(fh->data, p, NFS2_FHSIZE); + return 0; +out_overflow: + print_overflow_msg(__func__, xdr); + return -EIO; +} + /* * 2.3.4. timeval * @@ -185,6 +278,41 @@ static __be32 *xdr_encode_current_server_time(__be32 *p, return p; } +/* + * 2.3.5. fattr + * + * struct fattr { + * ftype type; + * unsigned int mode; + * unsigned int nlink; + * unsigned int uid; + * unsigned int gid; + * unsigned int size; + * unsigned int blocksize; + * unsigned int rdev; + * unsigned int blocks; + * unsigned int fsid; + * unsigned int fileid; + * timeval atime; + * timeval mtime; + * timeval ctime; + * }; + * + */ +static int decode_fattr(struct xdr_stream *xdr, struct nfs_fattr *fattr) +{ + __be32 *p; + + p = xdr_inline_decode(xdr, NFS_fattr_sz << 2); + if (unlikely(p == NULL)) + goto out_overflow; + xdr_decode_fattr(p, fattr); + return 0; +out_overflow: + print_overflow_msg(__func__, xdr); + return -EIO; +} + /* * 2.3.6. sattr * @@ -259,6 +387,32 @@ static void encode_filename(struct xdr_stream *xdr, xdr_encode_opaque(p, name, length); } +static int decode_filename_inline(struct xdr_stream *xdr, + const char **name, u32 *length) +{ + __be32 *p; + u32 count; + + p = xdr_inline_decode(xdr, 4); + if (unlikely(p == NULL)) + goto out_overflow; + count = be32_to_cpup(p); + if (count > NFS3_MAXNAMLEN) + goto out_nametoolong; + p = xdr_inline_decode(xdr, count); + if (unlikely(p == NULL)) + goto out_overflow; + *name = (const char *)p; + *length = count; + return 0; +out_nametoolong: + dprintk("NFS: returned filename too long: %u\n", count); + return -ENAMETOOLONG; +out_overflow: + print_overflow_msg(__func__, xdr); + return -EIO; +} + /* * 2.3.8. path * @@ -274,6 +428,65 @@ static void encode_path(struct xdr_stream *xdr, struct page **pages, u32 length) xdr_write_pages(xdr, pages, 0, length); } +static int decode_path(struct xdr_stream *xdr) +{ + u32 length, recvd; + size_t hdrlen; + __be32 *p; + + p = xdr_inline_decode(xdr, 4); + if (unlikely(p == NULL)) + goto out_overflow; + length = be32_to_cpup(p); + if (unlikely(length >= xdr->buf->page_len || length > NFS_MAXPATHLEN)) + goto out_size; + hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base; + recvd = xdr->buf->len - hdrlen; + if (unlikely(length > recvd)) + goto out_cheating; + + xdr_read_pages(xdr, length); + xdr_terminate_string(xdr->buf, length); + return 0; +out_size: + dprintk("NFS: returned pathname too long: %u\n", length); + return -ENAMETOOLONG; +out_cheating: + dprintk("NFS: server cheating in pathname result: " + "length %u > received %u\n", length, recvd); + return -EIO; +out_overflow: + print_overflow_msg(__func__, xdr); + return -EIO; +} + +/* + * 2.3.9. attrstat + * + * union attrstat switch (stat status) { + * case NFS_OK: + * fattr attributes; + * default: + * void; + * }; + */ +static int decode_attrstat(struct xdr_stream *xdr, struct nfs_fattr *result) +{ + enum nfs_stat status; + int error; + + error = decode_stat(xdr, &status); + if (unlikely(error)) + goto out; + if (status != NFS_OK) + goto out_default; + error = decode_fattr(xdr, result); +out: + return error; +out_default: + return nfs_stat_to_errno(status); +} + /* * 2.3.10. diropargs * @@ -289,6 +502,48 @@ static void encode_diropargs(struct xdr_stream *xdr, const struct nfs_fh *fh, encode_filename(xdr, name, length); } +/* + * 2.3.11. diropres + * + * union diropres switch (stat status) { + * case NFS_OK: + * struct { + * fhandle file; + * fattr attributes; + * } diropok; + * default: + * void; + * }; + */ +static int decode_diropok(struct xdr_stream *xdr, struct nfs_diropok *result) +{ + int error; + + error = decode_fhandle(xdr, result->fh); + if (unlikely(error)) + goto out; + error = decode_fattr(xdr, result->fattr); +out: + return error; +} + +static int decode_diropres(struct xdr_stream *xdr, struct nfs_diropok *result) +{ + enum nfs_stat status; + int error; + + error = decode_stat(xdr, &status); + if (unlikely(error)) + goto out; + if (status != NFS_OK) + goto out_default; + error = decode_diropok(xdr, result); +out: + return error; +out_default: + return nfs_stat_to_errno(status); +} + /* * NFSv2 XDR encode functions @@ -630,13 +885,6 @@ nfs_xdr_readdirres(struct rpc_rqst *req, __be32 *p, void *dummy) return pglen; } -static void print_overflow_msg(const char *func, const struct xdr_stream *xdr) -{ - dprintk("nfs: %s: prematurely hit end of receive buffer. " - "Remaining buffer length is %tu words.\n", - func, xdr->end - xdr->p); -} - __be32 * nfs_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, struct nfs_server *server, int plus) { @@ -700,6 +948,25 @@ nfs_xdr_stat(struct rpc_rqst *req, __be32 *p, void *dummy) return status; } +static int nfs2_xdr_dec_stat(struct rpc_rqst *req, __be32 *p, + void *__unused) +{ + struct xdr_stream xdr; + enum nfs_stat status; + int error; + + xdr_init_decode(&xdr, &req->rq_rcv_buf, p); + error = decode_stat(&xdr, &status); + if (unlikely(error)) + goto out; + if (status != NFS_OK) + goto out_default; +out: + return error; +out_default: + return nfs_stat_to_errno(status); +} + /* * Decode attrstat reply * GETATTR, SETATTR, WRITE @@ -715,6 +982,15 @@ nfs_xdr_attrstat(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr) return 0; } +static int nfs2_xdr_dec_attrstat(struct rpc_rqst *req, __be32 *p, + struct nfs_fattr *result) +{ + struct xdr_stream xdr; + + xdr_init_decode(&xdr, &req->rq_rcv_buf, p); + return decode_attrstat(&xdr, result); +} + /* * Decode diropres reply * LOOKUP, CREATE, MKDIR @@ -731,6 +1007,15 @@ nfs_xdr_diropres(struct rpc_rqst *req, __be32 *p, struct nfs_diropok *res) return 0; } +static int nfs2_xdr_dec_diropres(struct rpc_rqst *req, __be32 *p, + struct nfs_diropok *result) +{ + struct xdr_stream xdr; + + xdr_init_decode(&xdr, &req->rq_rcv_buf, p); + return decode_diropres(&xdr, result); +} + /* * Decode READLINK reply */ @@ -771,6 +1056,70 @@ nfs_xdr_readlinkres(struct rpc_rqst *req, __be32 *p, void *dummy) return 0; } +/* + * 2.2.6. readlinkres + * + * union readlinkres switch (stat status) { + * case NFS_OK: + * path data; + * default: + * void; + * }; + */ +static int nfs2_xdr_dec_readlinkres(struct rpc_rqst *req, __be32 *p, + void *__unused) +{ + struct xdr_stream xdr; + enum nfs_stat status; + int error; + + xdr_init_decode(&xdr, &req->rq_rcv_buf, p); + error = decode_stat(&xdr, &status); + if (unlikely(error)) + goto out; + if (status != NFS_OK) + goto out_default; + error = decode_path(&xdr); +out: + return error; +out_default: + return nfs_stat_to_errno(status); +} + +/* + * 2.2.7. readres + * + * union readres switch (stat status) { + * case NFS_OK: + * fattr attributes; + * nfsdata data; + * default: + * void; + * }; + */ +static int nfs2_xdr_dec_readres(struct rpc_rqst *req, __be32 *p, + struct nfs_readres *result) +{ + struct xdr_stream xdr; + enum nfs_stat status; + int error; + + xdr_init_decode(&xdr, &req->rq_rcv_buf, p); + error = decode_stat(&xdr, &status); + if (unlikely(error)) + goto out; + if (status != NFS_OK) + goto out_default; + error = decode_fattr(&xdr, result->fattr); + if (unlikely(error)) + goto out; + error = decode_nfsdata(&xdr, result); +out: + return error; +out_default: + return nfs_stat_to_errno(status); +} + /* * Decode WRITE reply */ @@ -781,6 +1130,150 @@ nfs_xdr_writeres(struct rpc_rqst *req, __be32 *p, struct nfs_writeres *res) return nfs_xdr_attrstat(req, p, res->fattr); } +static int nfs2_xdr_dec_writeres(struct rpc_rqst *req, __be32 *p, + struct nfs_writeres *result) +{ + struct xdr_stream xdr; + + /* All NFSv2 writes are "file sync" writes */ + result->verf->committed = NFS_FILE_SYNC; + + xdr_init_decode(&xdr, &req->rq_rcv_buf, p); + return decode_attrstat(&xdr, result->fattr); +} + +/** + * nfs2_decode_dirent - Decode a single NFSv2 directory entry stored in + * the local page cache. + * @xdr: XDR stream where entry resides + * @entry: buffer to fill in with entry data + * @server: nfs_server data for this directory + * @plus: boolean indicating whether this should be a readdirplus entry + * + * Returns the position of the next item in the buffer, or an ERR_PTR. + * + * This function is not invoked during READDIR reply decoding, but + * rather whenever an application invokes the getdents(2) system call + * on a directory already in our cache. + * + * 2.2.17. entry + * + * struct entry { + * unsigned fileid; + * filename name; + * nfscookie cookie; + * entry *nextentry; + * }; + */ +__be32 *nfs2_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, + struct nfs_server *server, int plus) +{ + __be32 *p; + int error; + + p = xdr_inline_decode(xdr, 4); + if (unlikely(p == NULL)) + goto out_overflow; + if (*p++ == xdr_zero) { + p = xdr_inline_decode(xdr, 4); + if (unlikely(p == NULL)) + goto out_overflow; + if (*p++ == xdr_zero) + return ERR_PTR(-EAGAIN); + entry->eof = 1; + return ERR_PTR(-EBADCOOKIE); + } + + p = xdr_inline_decode(xdr, 4); + if (unlikely(p == NULL)) + goto out_overflow; + entry->ino = be32_to_cpup(p); + + error = decode_filename_inline(xdr, &entry->name, &entry->len); + if (unlikely(error)) + return ERR_PTR(error); + + /* + * The type (size and byte order) of nfscookie isn't defined in + * RFC 1094. This implementation assumes that it's an XDR uint32. + */ + entry->prev_cookie = entry->cookie; + p = xdr_inline_decode(xdr, 4); + if (unlikely(p == NULL)) + goto out_overflow; + entry->cookie = be32_to_cpup(p); + + entry->d_type = DT_UNKNOWN; + + /* Peek at the next entry to see if we're at EOD */ + p = xdr_inline_peek(xdr, 4 + 4); + entry->eof = 0; + if (p != NULL) + entry->eof = (p[0] == xdr_zero) && (p[1] != xdr_zero); + return p; + +out_overflow: + print_overflow_msg(__func__, xdr); + return ERR_PTR(-EAGAIN); +} + +/* + * 2.2.17. readdirres + * + * union readdirres switch (stat status) { + * case NFS_OK: + * struct { + * entry *entries; + * bool eof; + * } readdirok; + * default: + * void; + * }; + * + * Read the directory contents into the page cache, but don't + * touch them. The actual decoding is done by nfs2_decode_dirent() + * during subsequent nfs_readdir() calls. + */ +static int decode_readdirok(struct xdr_stream *xdr) +{ + u32 recvd, pglen; + size_t hdrlen; + + pglen = xdr->buf->page_len; + hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base; + recvd = xdr->buf->len - hdrlen; + if (unlikely(pglen > recvd)) + goto out_cheating; +out: + xdr_read_pages(xdr, pglen); + return pglen; +out_cheating: + dprintk("NFS: server cheating in readdir result: " + "pglen %u > recvd %u\n", pglen, recvd); + pglen = recvd; + goto out; +} + +static int nfs2_xdr_dec_readdirres(struct rpc_rqst *req, __be32 *p, + void *__unused) +{ + struct xdr_stream xdr; + enum nfs_stat status; + int error; + + xdr_init_decode(&xdr, &req->rq_rcv_buf, p); + error = decode_stat(&xdr, &status); + if (unlikely(error)) + goto out; + if (status != NFS_OK) + goto out_default; + error = decode_readdirok(&xdr); +out: + return error; +out_default: + return nfs_stat_to_errno(status); +} + /* * Decode STATFS reply */ @@ -800,6 +1293,61 @@ nfs_xdr_statfsres(struct rpc_rqst *req, __be32 *p, struct nfs2_fsstat *res) return 0; } +/* + * 2.2.18. statfsres + * + * union statfsres (stat status) { + * case NFS_OK: + * struct { + * unsigned tsize; + * unsigned bsize; + * unsigned blocks; + * unsigned bfree; + * unsigned bavail; + * } info; + * default: + * void; + * }; + */ +static int decode_info(struct xdr_stream *xdr, struct nfs2_fsstat *result) +{ + __be32 *p; + + p = xdr_inline_decode(xdr, NFS_info_sz << 2); + if (unlikely(p == NULL)) + goto out_overflow; + result->tsize = be32_to_cpup(p++); + result->bsize = be32_to_cpup(p++); + result->blocks = be32_to_cpup(p++); + result->bfree = be32_to_cpup(p++); + result->bavail = be32_to_cpup(p); + return 0; +out_overflow: + print_overflow_msg(__func__, xdr); + return -EIO; +} + +static int nfs2_xdr_dec_statfsres(struct rpc_rqst *req, __be32 *p, + struct nfs2_fsstat *result) +{ + struct xdr_stream xdr; + enum nfs_stat status; + int error; + + xdr_init_decode(&xdr, &req->rq_rcv_buf, p); + error = decode_stat(&xdr, &status); + if (unlikely(error)) + goto out; + if (status != NFS_OK) + goto out_default; + error = decode_info(&xdr, result); +out: + return error; +out_default: + return nfs_stat_to_errno(status); +} + + /* * We need to translate between nfs status return values and * the local errno values which may not be the same. @@ -867,7 +1415,7 @@ int nfs_stat_to_errno(enum nfs_stat status) [NFSPROC_##proc] = { \ .p_proc = NFSPROC_##proc, \ .p_encode = (kxdrproc_t)nfs2_xdr_enc_##argtype, \ - .p_decode = (kxdrproc_t) nfs_xdr_##restype, \ + .p_decode = (kxdrproc_t)nfs2_xdr_dec_##restype, \ .p_arglen = NFS_##argtype##_sz, \ .p_replen = NFS_##restype##_sz, \ .p_timer = timer, \ diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c index 58e7f84fc1fd..00df60523aac 100644 --- a/fs/nfs/proc.c +++ b/fs/nfs/proc.c @@ -731,7 +731,7 @@ const struct nfs_rpc_ops nfs_v2_clientops = { .statfs = nfs_proc_statfs, .fsinfo = nfs_proc_fsinfo, .pathconf = nfs_proc_pathconf, - .decode_dirent = nfs_decode_dirent, + .decode_dirent = nfs2_decode_dirent, .read_setup = nfs_proc_read_setup, .read_done = nfs_read_done, .write_setup = nfs_proc_write_setup, -- cgit v1.2.2 From 661ad4239a51a2169a366a227c68cf3b654ab936 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 14 Dec 2010 14:55:20 +0000 Subject: NFS: Replace old NFSv2 decoder functions with xdr_stream-based ones Clean up. Remove unused legacy result decoder functions, and any now unused decoder helper functions. Signed-off-by: Chuck Lever Tested-by: J. Bruce Fields Signed-off-by: Trond Myklebust --- fs/nfs/nfs2xdr.c | 253 +------------------------------------------------------ 1 file changed, 4 insertions(+), 249 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c index 827d1b8ad55b..ae751163da8b 100644 --- a/fs/nfs/nfs2xdr.c +++ b/fs/nfs/nfs2xdr.c @@ -91,15 +91,6 @@ static void print_overflow_msg(const char *func, const struct xdr_stream *xdr) /* * Common NFS XDR functions as inlines */ -static inline __be32 * -xdr_decode_fhandle(__be32 *p, struct nfs_fh *fhandle) -{ - /* NFSv2 handles have a fixed length */ - fhandle->size = NFS2_FHSIZE; - memcpy(fhandle->data, p, NFS2_FHSIZE); - return p + XDR_QUADLEN(NFS2_FHSIZE); -} - static inline __be32* xdr_decode_time(__be32 *p, struct timespec *timep) { @@ -641,48 +632,6 @@ static int nfs2_xdr_enc_readargs(struct rpc_rqst *req, __be32 *p, return 0; } -/* - * Decode READ reply - */ -static int -nfs_xdr_readres(struct rpc_rqst *req, __be32 *p, struct nfs_readres *res) -{ - struct kvec *iov = req->rq_rcv_buf.head; - size_t hdrlen; - u32 count, recvd; - int status; - - if ((status = ntohl(*p++))) - return nfs_stat_to_errno(status); - p = xdr_decode_fattr(p, res->fattr); - - count = ntohl(*p++); - res->eof = 0; - hdrlen = (u8 *) p - (u8 *) iov->iov_base; - if (iov->iov_len < hdrlen) { - dprintk("NFS: READ reply header overflowed:" - "length %Zu > %Zu\n", hdrlen, iov->iov_len); - return -errno_NFSERR_IO; - } else if (iov->iov_len != hdrlen) { - dprintk("NFS: READ header is short. iovec will be shifted.\n"); - xdr_shift_buf(&req->rq_rcv_buf, iov->iov_len - hdrlen); - } - - recvd = req->rq_rcv_buf.len - hdrlen; - if (count > recvd) { - dprintk("NFS: server cheating in read reply: " - "count %u > recvd %u\n", count, recvd); - count = recvd; - } - - dprintk("RPC: readres OK count %u\n", count); - if (count < res->count) - res->count = count; - - return count; -} - - /* * 2.2.9. writeargs * @@ -848,105 +797,11 @@ static int nfs2_xdr_enc_readdirargs(struct rpc_rqst *req, __be32 *p, } /* - * Decode the result of a readdir call. - * We're not really decoding anymore, we just leave the buffer untouched - * and only check that it is syntactically correct. - * The real decoding happens in nfs_decode_entry below, called directly - * from nfs_readdir for each entry. - */ -static int -nfs_xdr_readdirres(struct rpc_rqst *req, __be32 *p, void *dummy) -{ - struct xdr_buf *rcvbuf = &req->rq_rcv_buf; - struct kvec *iov = rcvbuf->head; - struct page **page; - size_t hdrlen; - unsigned int pglen, recvd; - int status; - - if ((status = ntohl(*p++))) - return nfs_stat_to_errno(status); - - hdrlen = (u8 *) p - (u8 *) iov->iov_base; - if (iov->iov_len < hdrlen) { - dprintk("NFS: READDIR reply header overflowed:" - "length %Zu > %Zu\n", hdrlen, iov->iov_len); - return -errno_NFSERR_IO; - } else if (iov->iov_len != hdrlen) { - dprintk("NFS: READDIR header is short. iovec will be shifted.\n"); - xdr_shift_buf(rcvbuf, iov->iov_len - hdrlen); - } - - pglen = rcvbuf->page_len; - recvd = rcvbuf->len - hdrlen; - if (pglen > recvd) - pglen = recvd; - page = rcvbuf->pages; - return pglen; -} - -__be32 * -nfs_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, struct nfs_server *server, int plus) -{ - __be32 *p; - p = xdr_inline_decode(xdr, 4); - if (unlikely(!p)) - goto out_overflow; - if (!ntohl(*p++)) { - p = xdr_inline_decode(xdr, 4); - if (unlikely(!p)) - goto out_overflow; - if (!ntohl(*p++)) - return ERR_PTR(-EAGAIN); - entry->eof = 1; - return ERR_PTR(-EBADCOOKIE); - } - - p = xdr_inline_decode(xdr, 8); - if (unlikely(!p)) - goto out_overflow; - - entry->ino = ntohl(*p++); - entry->len = ntohl(*p++); - - p = xdr_inline_decode(xdr, entry->len + 4); - if (unlikely(!p)) - goto out_overflow; - entry->name = (const char *) p; - p += XDR_QUADLEN(entry->len); - entry->prev_cookie = entry->cookie; - entry->cookie = ntohl(*p++); - - entry->d_type = DT_UNKNOWN; - - p = xdr_inline_peek(xdr, 8); - if (p != NULL) - entry->eof = !p[0] && p[1]; - else - entry->eof = 0; - - return p; - -out_overflow: - print_overflow_msg(__func__, xdr); - return ERR_PTR(-EAGAIN); -} - -/* - * NFS XDR decode functions - */ -/* - * Decode simple status reply + * NFSv2 XDR decode functions + * + * NFSv2 result types are defined in section 2.2 of RFC 1094: + * "NFS: Network File System Protocol Specification". */ -static int -nfs_xdr_stat(struct rpc_rqst *req, __be32 *p, void *dummy) -{ - int status; - - if ((status = ntohl(*p++)) != 0) - status = nfs_stat_to_errno(status); - return status; -} static int nfs2_xdr_dec_stat(struct rpc_rqst *req, __be32 *p, void *__unused) @@ -967,21 +822,6 @@ out_default: return nfs_stat_to_errno(status); } -/* - * Decode attrstat reply - * GETATTR, SETATTR, WRITE - */ -static int -nfs_xdr_attrstat(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr) -{ - int status; - - if ((status = ntohl(*p++))) - return nfs_stat_to_errno(status); - xdr_decode_fattr(p, fattr); - return 0; -} - static int nfs2_xdr_dec_attrstat(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *result) { @@ -991,22 +831,6 @@ static int nfs2_xdr_dec_attrstat(struct rpc_rqst *req, __be32 *p, return decode_attrstat(&xdr, result); } -/* - * Decode diropres reply - * LOOKUP, CREATE, MKDIR - */ -static int -nfs_xdr_diropres(struct rpc_rqst *req, __be32 *p, struct nfs_diropok *res) -{ - int status; - - if ((status = ntohl(*p++))) - return nfs_stat_to_errno(status); - p = xdr_decode_fhandle(p, res->fh); - xdr_decode_fattr(p, res->fattr); - return 0; -} - static int nfs2_xdr_dec_diropres(struct rpc_rqst *req, __be32 *p, struct nfs_diropok *result) { @@ -1016,46 +840,6 @@ static int nfs2_xdr_dec_diropres(struct rpc_rqst *req, __be32 *p, return decode_diropres(&xdr, result); } -/* - * Decode READLINK reply - */ -static int -nfs_xdr_readlinkres(struct rpc_rqst *req, __be32 *p, void *dummy) -{ - struct xdr_buf *rcvbuf = &req->rq_rcv_buf; - struct kvec *iov = rcvbuf->head; - size_t hdrlen; - u32 len, recvd; - int status; - - if ((status = ntohl(*p++))) - return nfs_stat_to_errno(status); - /* Convert length of symlink */ - len = ntohl(*p++); - if (len >= rcvbuf->page_len) { - dprintk("nfs: server returned giant symlink!\n"); - return -ENAMETOOLONG; - } - hdrlen = (u8 *) p - (u8 *) iov->iov_base; - if (iov->iov_len < hdrlen) { - dprintk("NFS: READLINK reply header overflowed:" - "length %Zu > %Zu\n", hdrlen, iov->iov_len); - return -errno_NFSERR_IO; - } else if (iov->iov_len != hdrlen) { - dprintk("NFS: READLINK header is short. iovec will be shifted.\n"); - xdr_shift_buf(rcvbuf, iov->iov_len - hdrlen); - } - recvd = req->rq_rcv_buf.len - hdrlen; - if (recvd < len) { - dprintk("NFS: server cheating in readlink reply: " - "count %u > recvd %u\n", len, recvd); - return -EIO; - } - - xdr_terminate_string(rcvbuf, len); - return 0; -} - /* * 2.2.6. readlinkres * @@ -1120,16 +904,6 @@ out_default: return nfs_stat_to_errno(status); } -/* - * Decode WRITE reply - */ -static int -nfs_xdr_writeres(struct rpc_rqst *req, __be32 *p, struct nfs_writeres *res) -{ - res->verf->committed = NFS_FILE_SYNC; - return nfs_xdr_attrstat(req, p, res->fattr); -} - static int nfs2_xdr_dec_writeres(struct rpc_rqst *req, __be32 *p, struct nfs_writeres *result) { @@ -1274,25 +1048,6 @@ out_default: return nfs_stat_to_errno(status); } -/* - * Decode STATFS reply - */ -static int -nfs_xdr_statfsres(struct rpc_rqst *req, __be32 *p, struct nfs2_fsstat *res) -{ - int status; - - if ((status = ntohl(*p++))) - return nfs_stat_to_errno(status); - - res->tsize = ntohl(*p++); - res->bsize = ntohl(*p++); - res->blocks = ntohl(*p++); - res->bfree = ntohl(*p++); - res->bavail = ntohl(*p++); - return 0; -} - /* * 2.2.18. statfsres * -- cgit v1.2.2 From 5f96e5e31b4f4a2f126adfe0586a7555c11b0562 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 14 Dec 2010 14:55:30 +0000 Subject: NFS: Move and update xdr_decode_foo() functions that we're keeping Clean up. Move the timestamp decoder to match the placement and naming conventions of the other helpers. Fold xdr_decode_fattr() into decode_fattr(), which is now it's only user. Signed-off-by: Chuck Lever Tested-by: J. Bruce Fields Signed-off-by: Trond Myklebust --- fs/nfs/nfs2xdr.c | 97 ++++++++++++++++++++++++++++++++------------------------ 1 file changed, 56 insertions(+), 41 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c index ae751163da8b..70df08a84ead 100644 --- a/fs/nfs/nfs2xdr.c +++ b/fs/nfs/nfs2xdr.c @@ -88,46 +88,6 @@ static void print_overflow_msg(const char *func, const struct xdr_stream *xdr) } -/* - * Common NFS XDR functions as inlines - */ -static inline __be32* -xdr_decode_time(__be32 *p, struct timespec *timep) -{ - timep->tv_sec = ntohl(*p++); - /* Convert microseconds into nanoseconds */ - timep->tv_nsec = ntohl(*p++) * 1000; - return p; -} - -static __be32 * -xdr_decode_fattr(__be32 *p, struct nfs_fattr *fattr) -{ - u32 rdev, type; - type = ntohl(*p++); - fattr->mode = ntohl(*p++); - fattr->nlink = ntohl(*p++); - fattr->uid = ntohl(*p++); - fattr->gid = ntohl(*p++); - fattr->size = ntohl(*p++); - fattr->du.nfs2.blocksize = ntohl(*p++); - rdev = ntohl(*p++); - fattr->du.nfs2.blocks = ntohl(*p++); - fattr->fsid.major = ntohl(*p++); - fattr->fsid.minor = 0; - fattr->fileid = ntohl(*p++); - p = xdr_decode_time(p, &fattr->atime); - p = xdr_decode_time(p, &fattr->mtime); - p = xdr_decode_time(p, &fattr->ctime); - fattr->valid |= NFS_ATTR_FATTR_V2; - fattr->rdev = new_decode_dev(rdev); - if (type == NFCHR && rdev == NFS2_FIFO_DEV) { - fattr->mode = (fattr->mode & ~S_IFMT) | S_IFIFO; - fattr->rdev = 0; - } - return p; -} - /* * Encode/decode NFSv2 basic data types * @@ -207,6 +167,27 @@ out_overflow: return -EIO; } +/* + * 2.3.2. ftype + * + * enum ftype { + * NFNON = 0, + * NFREG = 1, + * NFDIR = 2, + * NFBLK = 3, + * NFCHR = 4, + * NFLNK = 5 + * }; + * + */ +static __be32 *xdr_decode_ftype(__be32 *p, u32 *type) +{ + *type = be32_to_cpup(p++); + if (unlikely(*type > NF2FIFO)) + *type = NFBAD; + return p; +} + /* * 2.3.3. fhandle * @@ -269,6 +250,13 @@ static __be32 *xdr_encode_current_server_time(__be32 *p, return p; } +static __be32 *xdr_decode_time(__be32 *p, struct timespec *timep) +{ + timep->tv_sec = be32_to_cpup(p++); + timep->tv_nsec = be32_to_cpup(p++) * NSEC_PER_USEC; + return p; +} + /* * 2.3.5. fattr * @@ -292,12 +280,39 @@ static __be32 *xdr_encode_current_server_time(__be32 *p, */ static int decode_fattr(struct xdr_stream *xdr, struct nfs_fattr *fattr) { + u32 rdev, type; __be32 *p; p = xdr_inline_decode(xdr, NFS_fattr_sz << 2); if (unlikely(p == NULL)) goto out_overflow; - xdr_decode_fattr(p, fattr); + + fattr->valid |= NFS_ATTR_FATTR_V2; + + p = xdr_decode_ftype(p, &type); + + fattr->mode = be32_to_cpup(p++); + fattr->nlink = be32_to_cpup(p++); + fattr->uid = be32_to_cpup(p++); + fattr->gid = be32_to_cpup(p++); + fattr->size = be32_to_cpup(p++); + fattr->du.nfs2.blocksize = be32_to_cpup(p++); + + rdev = be32_to_cpup(p++); + fattr->rdev = new_decode_dev(rdev); + if (type == (u32)NFCHR && rdev == (u32)NFS2_FIFO_DEV) { + fattr->mode = (fattr->mode & ~S_IFMT) | S_IFIFO; + fattr->rdev = 0; + } + + fattr->du.nfs2.blocks = be32_to_cpup(p++); + fattr->fsid.major = be32_to_cpup(p++); + fattr->fsid.minor = 0; + fattr->fileid = be32_to_cpup(p++); + + p = xdr_decode_time(p, &fattr->atime); + p = xdr_decode_time(p, &fattr->mtime); + xdr_decode_time(p, &fattr->ctime); return 0; out_overflow: print_overflow_msg(__func__, xdr); -- cgit v1.2.2 From 2b061f9ef216b6d229b06267f188167fd6ab3d9b Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 14 Dec 2010 14:55:40 +0000 Subject: lockd: Introduce new-style XDR functions for NLMv3 We'd like to prevent local buffer overflows caused by malicious or broken servers. New xdr_stream style decoders can do that. For efficiency, we also eventually want to be able to pass xdr_streams from call_encode() and call_decode() to all XDR encoding functions, rather than building an xdr_stream in every XDR encoding and decoding function in the kernel. To do all of this, rewrite the XDR encoding and decoding functions in fs/lockd/xdr.c to use xdr_streams. This makes them more or less incompatible with server-side XDR helper functions, so break them out into a separate source file. Static helper functions are left without the "inline" directive. This allows the compiler to choose automatically how to optimize these for size or speed. SHARE-related functionality doesn't seem to be used, as those functions are hiding behind a #define that isn't set anywhere that I can find. And, they've been in there forever (at least as far back as the kernel's git history goes), yet remain unused. Let's take the opportunity to bin them. It should be easy enough for someone to introduce proper XDR functions if at some point SHARE-related NLM functionality is desired. Signed-off-by: Chuck Lever Tested-by: J. Bruce Fields Signed-off-by: Trond Myklebust --- fs/lockd/Makefile | 4 +- fs/lockd/clntxdr.c | 643 +++++++++++++++++++++++++++++++++++++++++++++++++++++ fs/lockd/xdr.c | 258 --------------------- 3 files changed, 645 insertions(+), 260 deletions(-) create mode 100644 fs/lockd/clntxdr.c (limited to 'fs') diff --git a/fs/lockd/Makefile b/fs/lockd/Makefile index 97f6073ab339..d0488b3bd00b 100644 --- a/fs/lockd/Makefile +++ b/fs/lockd/Makefile @@ -4,7 +4,7 @@ obj-$(CONFIG_LOCKD) += lockd.o -lockd-objs-y := clntlock.o clntproc.o host.o svc.o svclock.o svcshare.o \ - svcproc.o svcsubs.o mon.o xdr.o grace.o +lockd-objs-y := clntlock.o clntproc.o clntxdr.o host.o svc.o svclock.o \ + svcshare.o svcproc.o svcsubs.o mon.o xdr.o grace.o lockd-objs-$(CONFIG_LOCKD_V4) += xdr4.o svc4proc.o lockd-objs := $(lockd-objs-y) diff --git a/fs/lockd/clntxdr.c b/fs/lockd/clntxdr.c new file mode 100644 index 000000000000..0472f2aff509 --- /dev/null +++ b/fs/lockd/clntxdr.c @@ -0,0 +1,643 @@ +/* + * linux/fs/lockd/clntxdr.c + * + * XDR functions to encode/decode NLM version 3 RPC arguments and results. + * NLM version 3 is backwards compatible with NLM versions 1 and 2. + * + * NLM client-side only. + * + * Copyright (C) 2010, Oracle. All rights reserved. + */ + +#include +#include +#include +#include +#include + +#define NLMDBG_FACILITY NLMDBG_XDR + +#if (NLMCLNT_OHSIZE > XDR_MAX_NETOBJ) +# error "NLM host name cannot be larger than XDR_MAX_NETOBJ!" +#endif + +/* + * Declare the space requirements for NLM arguments and replies as + * number of 32bit-words + */ +#define NLM_cookie_sz (1+(NLM_MAXCOOKIELEN>>2)) +#define NLM_caller_sz (1+(NLMCLNT_OHSIZE>>2)) +#define NLM_owner_sz (1+(NLMCLNT_OHSIZE>>2)) +#define NLM_fhandle_sz (1+(NFS2_FHSIZE>>2)) +#define NLM_lock_sz (3+NLM_caller_sz+NLM_owner_sz+NLM_fhandle_sz) +#define NLM_holder_sz (4+NLM_owner_sz) + +#define NLM_testargs_sz (NLM_cookie_sz+1+NLM_lock_sz) +#define NLM_lockargs_sz (NLM_cookie_sz+4+NLM_lock_sz) +#define NLM_cancargs_sz (NLM_cookie_sz+2+NLM_lock_sz) +#define NLM_unlockargs_sz (NLM_cookie_sz+NLM_lock_sz) + +#define NLM_testres_sz (NLM_cookie_sz+1+NLM_holder_sz) +#define NLM_res_sz (NLM_cookie_sz+1) +#define NLM_norep_sz (0) + + +static s32 loff_t_to_s32(loff_t offset) +{ + s32 res; + + if (offset >= NLM_OFFSET_MAX) + res = NLM_OFFSET_MAX; + else if (offset <= -NLM_OFFSET_MAX) + res = -NLM_OFFSET_MAX; + else + res = offset; + return res; +} + +static void nlm_compute_offsets(const struct nlm_lock *lock, + u32 *l_offset, u32 *l_len) +{ + const struct file_lock *fl = &lock->fl; + + BUG_ON(fl->fl_start > NLM_OFFSET_MAX); + BUG_ON(fl->fl_end > NLM_OFFSET_MAX && + fl->fl_end != OFFSET_MAX); + + *l_offset = loff_t_to_s32(fl->fl_start); + if (fl->fl_end == OFFSET_MAX) + *l_len = 0; + else + *l_len = loff_t_to_s32(fl->fl_end - fl->fl_start + 1); +} + +/* + * Handle decode buffer overflows out-of-line. + */ +static void print_overflow_msg(const char *func, const struct xdr_stream *xdr) +{ + dprintk("lockd: %s prematurely hit the end of our receive buffer. " + "Remaining buffer length is %tu words.\n", + func, xdr->end - xdr->p); +} + + +/* + * Encode/decode NLMv3 basic data types + * + * Basic NLMv3 data types are not defined in an IETF standards + * document. X/Open has a description of these data types that + * is useful. See Chapter 10 of "Protocols for Interworking: + * XNFS, Version 3W". + * + * Not all basic data types have their own encoding and decoding + * functions. For run-time efficiency, some data types are encoded + * or decoded inline. + */ + +static void encode_bool(struct xdr_stream *xdr, const int value) +{ + __be32 *p; + + p = xdr_reserve_space(xdr, 4); + *p = value ? xdr_one : xdr_zero; +} + +static void encode_int32(struct xdr_stream *xdr, const s32 value) +{ + __be32 *p; + + p = xdr_reserve_space(xdr, 4); + *p = cpu_to_be32(value); +} + +/* + * typedef opaque netobj + */ +static void encode_netobj(struct xdr_stream *xdr, + const u8 *data, const unsigned int length) +{ + __be32 *p; + + BUG_ON(length > XDR_MAX_NETOBJ); + p = xdr_reserve_space(xdr, 4 + length); + xdr_encode_opaque(p, data, length); +} + +static int decode_netobj(struct xdr_stream *xdr, + struct xdr_netobj *obj) +{ + u32 length; + __be32 *p; + + p = xdr_inline_decode(xdr, 4); + if (unlikely(p == NULL)) + goto out_overflow; + length = be32_to_cpup(p++); + if (unlikely(length > XDR_MAX_NETOBJ)) + goto out_size; + obj->len = length; + obj->data = (u8 *)p; + return 0; +out_size: + dprintk("NFS: returned netobj was too long: %u\n", length); + return -EIO; +out_overflow: + print_overflow_msg(__func__, xdr); + return -EIO; +} + +/* + * netobj cookie; + */ +static void encode_cookie(struct xdr_stream *xdr, + const struct nlm_cookie *cookie) +{ + BUG_ON(cookie->len > NLM_MAXCOOKIELEN); + encode_netobj(xdr, (u8 *)&cookie->data, cookie->len); +} + +static int decode_cookie(struct xdr_stream *xdr, + struct nlm_cookie *cookie) +{ + u32 length; + __be32 *p; + + p = xdr_inline_decode(xdr, 4); + if (unlikely(p == NULL)) + goto out_overflow; + length = be32_to_cpup(p++); + /* apparently HPUX can return empty cookies */ + if (length == 0) + goto out_hpux; + if (length > NLM_MAXCOOKIELEN) + goto out_size; + p = xdr_inline_decode(xdr, length); + if (unlikely(p == NULL)) + goto out_overflow; + cookie->len = length; + memcpy(cookie->data, p, length); + return 0; +out_hpux: + cookie->len = 4; + memset(cookie->data, 0, 4); + return 0; +out_size: + dprintk("NFS: returned cookie was too long: %u\n", length); + return -EIO; +out_overflow: + print_overflow_msg(__func__, xdr); + return -EIO; +} + +/* + * netobj fh; + */ +static void encode_fh(struct xdr_stream *xdr, const struct nfs_fh *fh) +{ + BUG_ON(fh->size != NFS2_FHSIZE); + encode_netobj(xdr, (u8 *)&fh->data, NFS2_FHSIZE); +} + +/* + * enum nlm_stats { + * LCK_GRANTED = 0, + * LCK_DENIED = 1, + * LCK_DENIED_NOLOCKS = 2, + * LCK_BLOCKED = 3, + * LCK_DENIED_GRACE_PERIOD = 4 + * }; + * + * + * struct nlm_stat { + * nlm_stats stat; + * }; + * + * NB: we don't swap bytes for the NLM status values. The upper + * layers deal directly with the status value in network byte + * order. + */ + +static void encode_nlm_stat(struct xdr_stream *xdr, + const __be32 stat) +{ + __be32 *p; + + BUG_ON(be32_to_cpu(stat) > NLM_LCK_DENIED_GRACE_PERIOD); + p = xdr_reserve_space(xdr, 4); + *p = stat; +} + +static int decode_nlm_stat(struct xdr_stream *xdr, + __be32 *stat) +{ + __be32 *p; + + p = xdr_inline_decode(xdr, 4); + if (unlikely(p == NULL)) + goto out_overflow; + if (unlikely(*p > nlm_lck_denied_grace_period)) + goto out_enum; + *stat = *p; + return 0; +out_enum: + dprintk("%s: server returned invalid nlm_stats value: %u\n", + __func__, be32_to_cpup(p)); + return -EIO; +out_overflow: + print_overflow_msg(__func__, xdr); + return -EIO; +} + +/* + * struct nlm_holder { + * bool exclusive; + * int uppid; + * netobj oh; + * unsigned l_offset; + * unsigned l_len; + * }; + */ +static void encode_nlm_holder(struct xdr_stream *xdr, + const struct nlm_res *result) +{ + const struct nlm_lock *lock = &result->lock; + u32 l_offset, l_len; + __be32 *p; + + encode_bool(xdr, lock->fl.fl_type == F_RDLCK); + encode_int32(xdr, lock->svid); + encode_netobj(xdr, lock->oh.data, lock->oh.len); + + p = xdr_reserve_space(xdr, 4 + 4); + nlm_compute_offsets(lock, &l_offset, &l_len); + *p++ = cpu_to_be32(l_offset); + *p = cpu_to_be32(l_len); +} + +static int decode_nlm_holder(struct xdr_stream *xdr, struct nlm_res *result) +{ + struct nlm_lock *lock = &result->lock; + struct file_lock *fl = &lock->fl; + u32 exclusive, l_offset, l_len; + int error; + __be32 *p; + s32 end; + + memset(lock, 0, sizeof(*lock)); + locks_init_lock(fl); + + p = xdr_inline_decode(xdr, 4 + 4); + if (unlikely(p == NULL)) + goto out_overflow; + exclusive = be32_to_cpup(p++); + lock->svid = be32_to_cpup(p); + fl->fl_pid = (pid_t)lock->svid; + + error = decode_netobj(xdr, &lock->oh); + if (unlikely(error)) + goto out; + + p = xdr_inline_decode(xdr, 4 + 4); + if (unlikely(p == NULL)) + goto out_overflow; + + fl->fl_flags = FL_POSIX; + fl->fl_type = exclusive != 0 ? F_WRLCK : F_RDLCK; + l_offset = be32_to_cpup(p++); + l_len = be32_to_cpup(p); + end = l_offset + l_len - 1; + + fl->fl_start = (loff_t)l_offset; + if (l_len == 0 || end < 0) + fl->fl_end = OFFSET_MAX; + else + fl->fl_end = (loff_t)end; + error = 0; +out: + return error; +out_overflow: + print_overflow_msg(__func__, xdr); + return -EIO; +} + +/* + * string caller_name; + */ +static void encode_caller_name(struct xdr_stream *xdr, const char *name) +{ + /* NB: client-side does not set lock->len */ + u32 length = strlen(name); + __be32 *p; + + BUG_ON(length > NLM_MAXSTRLEN); + p = xdr_reserve_space(xdr, 4 + length); + xdr_encode_opaque(p, name, length); +} + +/* + * struct nlm_lock { + * string caller_name; + * netobj fh; + * netobj oh; + * int uppid; + * unsigned l_offset; + * unsigned l_len; + * }; + */ +static void encode_nlm_lock(struct xdr_stream *xdr, + const struct nlm_lock *lock) +{ + u32 l_offset, l_len; + __be32 *p; + + encode_caller_name(xdr, lock->caller); + encode_fh(xdr, &lock->fh); + encode_netobj(xdr, lock->oh.data, lock->oh.len); + + p = xdr_reserve_space(xdr, 4 + 4 + 4); + *p++ = cpu_to_be32(lock->svid); + + nlm_compute_offsets(lock, &l_offset, &l_len); + *p++ = cpu_to_be32(l_offset); + *p = cpu_to_be32(l_len); +} + + +/* + * NLMv3 XDR encode functions + * + * NLMv3 argument types are defined in Chapter 10 of The Open Group's + * "Protocols for Interworking: XNFS, Version 3W". + */ + +/* + * struct nlm_testargs { + * netobj cookie; + * bool exclusive; + * struct nlm_lock alock; + * }; + */ +static int nlm_xdr_enc_testargs(struct rpc_rqst *req, __be32 *p, + const struct nlm_args *args) +{ + const struct nlm_lock *lock = &args->lock; + struct xdr_stream xdr; + + xdr_init_encode(&xdr, &req->rq_snd_buf, p); + encode_cookie(&xdr, &args->cookie); + encode_bool(&xdr, lock->fl.fl_type == F_WRLCK); + encode_nlm_lock(&xdr, lock); + return 0; +} + +/* + * struct nlm_lockargs { + * netobj cookie; + * bool block; + * bool exclusive; + * struct nlm_lock alock; + * bool reclaim; + * int state; + * }; + */ +static int nlm_xdr_enc_lockargs(struct rpc_rqst *req, __be32 *p, + const struct nlm_args *args) +{ + const struct nlm_lock *lock = &args->lock; + struct xdr_stream xdr; + + xdr_init_encode(&xdr, &req->rq_snd_buf, p); + encode_cookie(&xdr, &args->cookie); + encode_bool(&xdr, args->block); + encode_bool(&xdr, lock->fl.fl_type == F_WRLCK); + encode_nlm_lock(&xdr, lock); + encode_bool(&xdr, args->reclaim); + encode_int32(&xdr, args->state); + return 0; +} + +/* + * struct nlm_cancargs { + * netobj cookie; + * bool block; + * bool exclusive; + * struct nlm_lock alock; + * }; + */ +static int nlm_xdr_enc_cancargs(struct rpc_rqst *req, __be32 *p, + const struct nlm_args *args) +{ + const struct nlm_lock *lock = &args->lock; + struct xdr_stream xdr; + + xdr_init_encode(&xdr, &req->rq_snd_buf, p); + encode_cookie(&xdr, &args->cookie); + encode_bool(&xdr, args->block); + encode_bool(&xdr, lock->fl.fl_type == F_WRLCK); + encode_nlm_lock(&xdr, lock); + return 0; +} + +/* + * struct nlm_unlockargs { + * netobj cookie; + * struct nlm_lock alock; + * }; + */ +static int nlm_xdr_enc_unlockargs(struct rpc_rqst *req, __be32 *p, + const struct nlm_args *args) +{ + const struct nlm_lock *lock = &args->lock; + struct xdr_stream xdr; + + xdr_init_encode(&xdr, &req->rq_snd_buf, p); + encode_cookie(&xdr, &args->cookie); + encode_nlm_lock(&xdr, lock); + return 0; +} + +/* + * struct nlm_res { + * netobj cookie; + * nlm_stat stat; + * }; + */ +static int nlm_xdr_enc_res(struct rpc_rqst *req, __be32 *p, + const struct nlm_res *result) +{ + struct xdr_stream xdr; + + xdr_init_encode(&xdr, &req->rq_snd_buf, p); + encode_cookie(&xdr, &result->cookie); + encode_nlm_stat(&xdr, result->status); + return 0; +} + +/* + * union nlm_testrply switch (nlm_stats stat) { + * case LCK_DENIED: + * struct nlm_holder holder; + * default: + * void; + * }; + * + * struct nlm_testres { + * netobj cookie; + * nlm_testrply test_stat; + * }; + */ +static void encode_nlm_testrply(struct xdr_stream *xdr, + const struct nlm_res *result) +{ + if (result->status == nlm_lck_denied) + encode_nlm_holder(xdr, result); +} + +static int nlm_xdr_enc_testres(struct rpc_rqst *req, __be32 *p, + const struct nlm_res *result) +{ + struct xdr_stream xdr; + + xdr_init_encode(&xdr, &req->rq_snd_buf, p); + encode_cookie(&xdr, &result->cookie); + encode_nlm_stat(&xdr, result->status); + encode_nlm_testrply(&xdr, result); + return 0; +} + + +/* + * NLMv3 XDR decode functions + * + * NLMv3 result types are defined in Chapter 10 of The Open Group's + * "Protocols for Interworking: XNFS, Version 3W". + */ + +/* + * union nlm_testrply switch (nlm_stats stat) { + * case LCK_DENIED: + * struct nlm_holder holder; + * default: + * void; + * }; + * + * struct nlm_testres { + * netobj cookie; + * nlm_testrply test_stat; + * }; + */ +static int decode_nlm_testrply(struct xdr_stream *xdr, + struct nlm_res *result) +{ + int error; + + error = decode_nlm_stat(xdr, &result->status); + if (unlikely(error)) + goto out; + if (result->status == nlm_lck_denied) + error = decode_nlm_holder(xdr, result); +out: + return error; +} + +static int nlm_xdr_dec_testres(struct rpc_rqst *req, __be32 *p, + struct nlm_res *result) +{ + struct xdr_stream xdr; + int error; + + xdr_init_decode(&xdr, &req->rq_rcv_buf, p); + error = decode_cookie(&xdr, &result->cookie); + if (unlikely(error)) + goto out; + error = decode_nlm_testrply(&xdr, result); +out: + return error; +} + +/* + * struct nlm_res { + * netobj cookie; + * nlm_stat stat; + * }; + */ +static int nlm_xdr_dec_res(struct rpc_rqst *req, __be32 *p, + struct nlm_res *result) +{ + struct xdr_stream xdr; + int error; + + xdr_init_decode(&xdr, &req->rq_rcv_buf, p); + error = decode_cookie(&xdr, &result->cookie); + if (unlikely(error)) + goto out; + error = decode_nlm_stat(&xdr, &result->status); +out: + return error; +} + + +/* + * For NLM, a void procedure really returns nothing + */ +#define nlm_xdr_dec_norep NULL + +#define PROC(proc, argtype, restype) \ +[NLMPROC_##proc] = { \ + .p_proc = NLMPROC_##proc, \ + .p_encode = (kxdrproc_t)nlm_xdr_enc_##argtype, \ + .p_decode = (kxdrproc_t)nlm_xdr_dec_##restype, \ + .p_arglen = NLM_##argtype##_sz, \ + .p_replen = NLM_##restype##_sz, \ + .p_statidx = NLMPROC_##proc, \ + .p_name = #proc, \ + } + +static struct rpc_procinfo nlm_procedures[] = { + PROC(TEST, testargs, testres), + PROC(LOCK, lockargs, res), + PROC(CANCEL, cancargs, res), + PROC(UNLOCK, unlockargs, res), + PROC(GRANTED, testargs, res), + PROC(TEST_MSG, testargs, norep), + PROC(LOCK_MSG, lockargs, norep), + PROC(CANCEL_MSG, cancargs, norep), + PROC(UNLOCK_MSG, unlockargs, norep), + PROC(GRANTED_MSG, testargs, norep), + PROC(TEST_RES, testres, norep), + PROC(LOCK_RES, res, norep), + PROC(CANCEL_RES, res, norep), + PROC(UNLOCK_RES, res, norep), + PROC(GRANTED_RES, res, norep), +}; + +static struct rpc_version nlm_version1 = { + .number = 1, + .nrprocs = ARRAY_SIZE(nlm_procedures), + .procs = nlm_procedures, +}; + +static struct rpc_version nlm_version3 = { + .number = 3, + .nrprocs = ARRAY_SIZE(nlm_procedures), + .procs = nlm_procedures, +}; + +static struct rpc_version *nlm_versions[] = { + [1] = &nlm_version1, + [3] = &nlm_version3, +#ifdef CONFIG_LOCKD_V4 + [4] = &nlm_version4, +#endif +}; + +static struct rpc_stat nlm_rpc_stats; + +struct rpc_program nlm_program = { + .name = "lockd", + .number = NLM_PROGRAM, + .nrvers = ARRAY_SIZE(nlm_versions), + .version = nlm_versions, + .stats = &nlm_rpc_stats, +}; diff --git a/fs/lockd/xdr.c b/fs/lockd/xdr.c index b583ab0a4cbb..0eb694dc497b 100644 --- a/fs/lockd/xdr.c +++ b/fs/lockd/xdr.c @@ -148,37 +148,6 @@ nlm_decode_lock(__be32 *p, struct nlm_lock *lock) return p; } -/* - * Encode a lock as part of an NLM call - */ -static __be32 * -nlm_encode_lock(__be32 *p, struct nlm_lock *lock) -{ - struct file_lock *fl = &lock->fl; - __s32 start, len; - - if (!(p = xdr_encode_string(p, lock->caller)) - || !(p = nlm_encode_fh(p, &lock->fh)) - || !(p = nlm_encode_oh(p, &lock->oh))) - return NULL; - - if (fl->fl_start > NLM_OFFSET_MAX - || (fl->fl_end > NLM_OFFSET_MAX && fl->fl_end != OFFSET_MAX)) - return NULL; - - start = loff_t_to_s32(fl->fl_start); - if (fl->fl_end == OFFSET_MAX) - len = 0; - else - len = loff_t_to_s32(fl->fl_end - fl->fl_start + 1); - - *p++ = htonl(lock->svid); - *p++ = htonl(start); - *p++ = htonl(len); - - return p; -} - /* * Encode result of a TEST/TEST_MSG call */ @@ -373,233 +342,6 @@ nlmsvc_encode_void(struct svc_rqst *rqstp, __be32 *p, void *dummy) return xdr_ressize_check(rqstp, p); } -/* - * Now, the client side XDR functions - */ -#ifdef NLMCLNT_SUPPORT_SHARES -static int -nlmclt_decode_void(struct rpc_rqst *req, u32 *p, void *ptr) -{ - return 0; -} -#endif - -static int -nlmclt_encode_testargs(struct rpc_rqst *req, __be32 *p, nlm_args *argp) -{ - struct nlm_lock *lock = &argp->lock; - - if (!(p = nlm_encode_cookie(p, &argp->cookie))) - return -EIO; - *p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero; - if (!(p = nlm_encode_lock(p, lock))) - return -EIO; - req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); - return 0; -} - -static int -nlmclt_decode_testres(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp) -{ - if (!(p = nlm_decode_cookie(p, &resp->cookie))) - return -EIO; - resp->status = *p++; - if (resp->status == nlm_lck_denied) { - struct file_lock *fl = &resp->lock.fl; - u32 excl; - s32 start, len, end; - - memset(&resp->lock, 0, sizeof(resp->lock)); - locks_init_lock(fl); - excl = ntohl(*p++); - resp->lock.svid = ntohl(*p++); - fl->fl_pid = (pid_t)resp->lock.svid; - if (!(p = nlm_decode_oh(p, &resp->lock.oh))) - return -EIO; - - fl->fl_flags = FL_POSIX; - fl->fl_type = excl? F_WRLCK : F_RDLCK; - start = ntohl(*p++); - len = ntohl(*p++); - end = start + len - 1; - - fl->fl_start = s32_to_loff_t(start); - if (len == 0 || end < 0) - fl->fl_end = OFFSET_MAX; - else - fl->fl_end = s32_to_loff_t(end); - } - return 0; -} - - -static int -nlmclt_encode_lockargs(struct rpc_rqst *req, __be32 *p, nlm_args *argp) -{ - struct nlm_lock *lock = &argp->lock; - - if (!(p = nlm_encode_cookie(p, &argp->cookie))) - return -EIO; - *p++ = argp->block? xdr_one : xdr_zero; - *p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero; - if (!(p = nlm_encode_lock(p, lock))) - return -EIO; - *p++ = argp->reclaim? xdr_one : xdr_zero; - *p++ = htonl(argp->state); - req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); - return 0; -} - -static int -nlmclt_encode_cancargs(struct rpc_rqst *req, __be32 *p, nlm_args *argp) -{ - struct nlm_lock *lock = &argp->lock; - - if (!(p = nlm_encode_cookie(p, &argp->cookie))) - return -EIO; - *p++ = argp->block? xdr_one : xdr_zero; - *p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero; - if (!(p = nlm_encode_lock(p, lock))) - return -EIO; - req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); - return 0; -} - -static int -nlmclt_encode_unlockargs(struct rpc_rqst *req, __be32 *p, nlm_args *argp) -{ - struct nlm_lock *lock = &argp->lock; - - if (!(p = nlm_encode_cookie(p, &argp->cookie))) - return -EIO; - if (!(p = nlm_encode_lock(p, lock))) - return -EIO; - req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); - return 0; -} - -static int -nlmclt_encode_res(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp) -{ - if (!(p = nlm_encode_cookie(p, &resp->cookie))) - return -EIO; - *p++ = resp->status; - req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); - return 0; -} - -static int -nlmclt_encode_testres(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp) -{ - if (!(p = nlm_encode_testres(p, resp))) - return -EIO; - req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); - return 0; -} - -static int -nlmclt_decode_res(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp) -{ - if (!(p = nlm_decode_cookie(p, &resp->cookie))) - return -EIO; - resp->status = *p++; - return 0; -} - -#if (NLMCLNT_OHSIZE > XDR_MAX_NETOBJ) -# error "NLM host name cannot be larger than XDR_MAX_NETOBJ!" -#endif - -/* - * Buffer requirements for NLM - */ -#define NLM_void_sz 0 -#define NLM_cookie_sz 1+XDR_QUADLEN(NLM_MAXCOOKIELEN) -#define NLM_caller_sz 1+XDR_QUADLEN(NLMCLNT_OHSIZE) -#define NLM_owner_sz 1+XDR_QUADLEN(NLMCLNT_OHSIZE) -#define NLM_fhandle_sz 1+XDR_QUADLEN(NFS2_FHSIZE) -#define NLM_lock_sz 3+NLM_caller_sz+NLM_owner_sz+NLM_fhandle_sz -#define NLM_holder_sz 4+NLM_owner_sz - -#define NLM_testargs_sz NLM_cookie_sz+1+NLM_lock_sz -#define NLM_lockargs_sz NLM_cookie_sz+4+NLM_lock_sz -#define NLM_cancargs_sz NLM_cookie_sz+2+NLM_lock_sz -#define NLM_unlockargs_sz NLM_cookie_sz+NLM_lock_sz - -#define NLM_testres_sz NLM_cookie_sz+1+NLM_holder_sz -#define NLM_res_sz NLM_cookie_sz+1 -#define NLM_norep_sz 0 - -/* - * For NLM, a void procedure really returns nothing - */ -#define nlmclt_decode_norep NULL - -#define PROC(proc, argtype, restype) \ -[NLMPROC_##proc] = { \ - .p_proc = NLMPROC_##proc, \ - .p_encode = (kxdrproc_t) nlmclt_encode_##argtype, \ - .p_decode = (kxdrproc_t) nlmclt_decode_##restype, \ - .p_arglen = NLM_##argtype##_sz, \ - .p_replen = NLM_##restype##_sz, \ - .p_statidx = NLMPROC_##proc, \ - .p_name = #proc, \ - } - -static struct rpc_procinfo nlm_procedures[] = { - PROC(TEST, testargs, testres), - PROC(LOCK, lockargs, res), - PROC(CANCEL, cancargs, res), - PROC(UNLOCK, unlockargs, res), - PROC(GRANTED, testargs, res), - PROC(TEST_MSG, testargs, norep), - PROC(LOCK_MSG, lockargs, norep), - PROC(CANCEL_MSG, cancargs, norep), - PROC(UNLOCK_MSG, unlockargs, norep), - PROC(GRANTED_MSG, testargs, norep), - PROC(TEST_RES, testres, norep), - PROC(LOCK_RES, res, norep), - PROC(CANCEL_RES, res, norep), - PROC(UNLOCK_RES, res, norep), - PROC(GRANTED_RES, res, norep), -#ifdef NLMCLNT_SUPPORT_SHARES - PROC(SHARE, shareargs, shareres), - PROC(UNSHARE, shareargs, shareres), - PROC(NM_LOCK, lockargs, res), - PROC(FREE_ALL, notify, void), -#endif -}; - -static struct rpc_version nlm_version1 = { - .number = 1, - .nrprocs = 16, - .procs = nlm_procedures, -}; - -static struct rpc_version nlm_version3 = { - .number = 3, - .nrprocs = 24, - .procs = nlm_procedures, -}; - -static struct rpc_version * nlm_versions[] = { - [1] = &nlm_version1, - [3] = &nlm_version3, -#ifdef CONFIG_LOCKD_V4 - [4] = &nlm_version4, -#endif -}; - -static struct rpc_stat nlm_stats; - -struct rpc_program nlm_program = { - .name = "lockd", - .number = NLM_PROGRAM, - .nrvers = ARRAY_SIZE(nlm_versions), - .version = nlm_versions, - .stats = &nlm_stats, -}; - #ifdef RPC_DEBUG const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie) { -- cgit v1.2.2 From d9c407b138926132e1f93c01fb2dee50eb0bb615 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 14 Dec 2010 14:55:50 +0000 Subject: NFS: Introduce new-style XDR encoding functions for NFSv3 We're interested in taking advantage of the safety benefits of xdr_streams. These data structures allow more careful checking for buffer overflow while encoding. More careful type checking is also introduced in the new functions. For efficiency, we also eventually want to be able to pass xdr_streams from call_encode() to all XDR encoding functions, rather than building an xdr_stream in every XDR encoding function in the kernel. To do this means all encoders must be ready to handle a passed-in xdr_stream. The new encoders follow the modern paradigm for XDR encoders: BUG on error, and always return a zero status code. Static helper functions are left without the "inline" directive. This allows the compiler to choose automatically how to optimize these for size or speed. Signed-off-by: Chuck Lever Tested-by: J. Bruce Fields Signed-off-by: Trond Myklebust --- fs/nfs/nfs3xdr.c | 833 ++++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 830 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c index f6cc60f06dac..3d1043f7667c 100644 --- a/fs/nfs/nfs3xdr.c +++ b/fs/nfs/nfs3xdr.c @@ -37,6 +37,7 @@ #define NFS3_filename_sz (1+(NFS3_MAXNAMLEN>>2)) #define NFS3_path_sz (1+(NFS3_MAXPATHLEN>>2)) #define NFS3_fattr_sz (21) +#define NFS3_cookieverf_sz (NFS3_COOKIEVERFSIZE>>2) #define NFS3_wcc_attr_sz (6) #define NFS3_pre_op_attr_sz (1+NFS3_wcc_attr_sz) #define NFS3_post_op_attr_sz (1+NFS3_fattr_sz) @@ -59,7 +60,8 @@ #define NFS3_mknodargs_sz (NFS3_diropargs_sz+2+NFS3_sattr_sz) #define NFS3_renameargs_sz (NFS3_diropargs_sz+NFS3_diropargs_sz) #define NFS3_linkargs_sz (NFS3_fh_sz+NFS3_diropargs_sz) -#define NFS3_readdirargs_sz (NFS3_fh_sz+2) +#define NFS3_readdirargs_sz (NFS3_fh_sz+NFS3_cookieverf_sz+3) +#define NFS3_readdirplusargs_sz (NFS3_fh_sz+NFS3_cookieverf_sz+4) #define NFS3_commitargs_sz (NFS3_fh_sz+3) #define NFS3_attrstat_sz (1+NFS3_fattr_sz) @@ -107,6 +109,22 @@ static void print_overflow_msg(const char *func, const struct xdr_stream *xdr) func, xdr->end - xdr->p); } +/* + * While encoding arguments, set up the reply buffer in advance to + * receive reply data directly into the page cache. + */ +static void prepare_reply_buffer(struct rpc_rqst *req, struct page **pages, + unsigned int base, unsigned int len, + unsigned int bufsize) +{ + struct rpc_auth *auth = req->rq_cred->cr_auth; + unsigned int replen; + + replen = RPC_REPHDRSIZE + auth->au_rslack + bufsize; + xdr_inline_pages(&req->rq_rcv_buf, replen << 2, pages, base, len); +} + + /* * Common NFS XDR functions as inlines */ @@ -153,7 +171,7 @@ out_overflow: * Encode/decode time. */ static inline __be32 * -xdr_encode_time3(__be32 *p, struct timespec *timep) +xdr_encode_time3(__be32 *p, const struct timespec *timep) { *p++ = htonl(timep->tv_sec); *p++ = htonl(timep->tv_nsec); @@ -205,7 +223,7 @@ xdr_decode_fattr(__be32 *p, struct nfs_fattr *fattr) } static inline __be32 * -xdr_encode_sattr(__be32 *p, struct iattr *attr) +xdr_encode_sattr(__be32 *p, const struct iattr *attr) { if (attr->ia_valid & ATTR_MODE) { *p++ = xdr_one; @@ -306,6 +324,243 @@ xdr_decode_wcc_data(__be32 *p, struct nfs_fattr *fattr) return xdr_decode_post_op_attr(p, fattr); } + +/* + * Encode/decode NFSv3 basic data types + * + * Basic NFSv3 data types are defined in section 2.5 of RFC 1813: + * "NFS Version 3 Protocol Specification". + * + * Not all basic data types have their own encoding and decoding + * functions. For run-time efficiency, some data types are encoded + * or decoded inline. + */ + +static void encode_uint32(struct xdr_stream *xdr, u32 value) +{ + __be32 *p = xdr_reserve_space(xdr, 4); + *p = cpu_to_be32(value); +} + +/* + * filename3 + * + * typedef string filename3<>; + */ +static void encode_filename3(struct xdr_stream *xdr, + const char *name, u32 length) +{ + __be32 *p; + + BUG_ON(length > NFS3_MAXNAMLEN); + p = xdr_reserve_space(xdr, 4 + length); + xdr_encode_opaque(p, name, length); +} + +/* + * nfspath3 + * + * typedef string nfspath3<>; + */ +static void encode_nfspath3(struct xdr_stream *xdr, struct page **pages, + const u32 length) +{ + BUG_ON(length > NFS3_MAXPATHLEN); + encode_uint32(xdr, length); + xdr_write_pages(xdr, pages, 0, length); +} + +/* + * cookie3 + * + * typedef uint64 cookie3 + */ +static __be32 *xdr_encode_cookie3(__be32 *p, u64 cookie) +{ + return xdr_encode_hyper(p, cookie); +} + +/* + * cookieverf3 + * + * typedef opaque cookieverf3[NFS3_COOKIEVERFSIZE]; + */ +static __be32 *xdr_encode_cookieverf3(__be32 *p, const __be32 *verifier) +{ + memcpy(p, verifier, NFS3_COOKIEVERFSIZE); + return p + XDR_QUADLEN(NFS3_COOKIEVERFSIZE); +} + +/* + * createverf3 + * + * typedef opaque createverf3[NFS3_CREATEVERFSIZE]; + */ +static void encode_createverf3(struct xdr_stream *xdr, const __be32 *verifier) +{ + __be32 *p; + + p = xdr_reserve_space(xdr, NFS3_CREATEVERFSIZE); + memcpy(p, verifier, NFS3_CREATEVERFSIZE); +} + +/* + * ftype3 + * + * enum ftype3 { + * NF3REG = 1, + * NF3DIR = 2, + * NF3BLK = 3, + * NF3CHR = 4, + * NF3LNK = 5, + * NF3SOCK = 6, + * NF3FIFO = 7 + * }; + */ +static void encode_ftype3(struct xdr_stream *xdr, const u32 type) +{ + BUG_ON(type > NF3FIFO); + encode_uint32(xdr, type); +} + +/* + * specdata3 + * + * struct specdata3 { + * uint32 specdata1; + * uint32 specdata2; + * }; + */ +static void encode_specdata3(struct xdr_stream *xdr, const dev_t rdev) +{ + __be32 *p; + + p = xdr_reserve_space(xdr, 8); + *p++ = cpu_to_be32(MAJOR(rdev)); + *p = cpu_to_be32(MINOR(rdev)); +} + +/* + * nfs_fh3 + * + * struct nfs_fh3 { + * opaque data; + * }; + */ +static void encode_nfs_fh3(struct xdr_stream *xdr, const struct nfs_fh *fh) +{ + __be32 *p; + + BUG_ON(fh->size > NFS3_FHSIZE); + p = xdr_reserve_space(xdr, 4 + fh->size); + xdr_encode_opaque(p, fh->data, fh->size); +} + +/* + * sattr3 + * + * enum time_how { + * DONT_CHANGE = 0, + * SET_TO_SERVER_TIME = 1, + * SET_TO_CLIENT_TIME = 2 + * }; + * + * union set_mode3 switch (bool set_it) { + * case TRUE: + * mode3 mode; + * default: + * void; + * }; + * + * union set_uid3 switch (bool set_it) { + * case TRUE: + * uid3 uid; + * default: + * void; + * }; + * + * union set_gid3 switch (bool set_it) { + * case TRUE: + * gid3 gid; + * default: + * void; + * }; + * + * union set_size3 switch (bool set_it) { + * case TRUE: + * size3 size; + * default: + * void; + * }; + * + * union set_atime switch (time_how set_it) { + * case SET_TO_CLIENT_TIME: + * nfstime3 atime; + * default: + * void; + * }; + * + * union set_mtime switch (time_how set_it) { + * case SET_TO_CLIENT_TIME: + * nfstime3 mtime; + * default: + * void; + * }; + * + * struct sattr3 { + * set_mode3 mode; + * set_uid3 uid; + * set_gid3 gid; + * set_size3 size; + * set_atime atime; + * set_mtime mtime; + * }; + */ +static void encode_sattr3(struct xdr_stream *xdr, const struct iattr *attr) +{ + u32 nbytes; + __be32 *p; + + /* + * In order to make only a single xdr_reserve_space() call, + * pre-compute the total number of bytes to be reserved. + * Six boolean values, one for each set_foo field, are always + * present in the encoded result, so start there. + */ + nbytes = 6 * 4; + if (attr->ia_valid & ATTR_MODE) + nbytes += 4; + if (attr->ia_valid & ATTR_UID) + nbytes += 4; + if (attr->ia_valid & ATTR_GID) + nbytes += 4; + if (attr->ia_valid & ATTR_SIZE) + nbytes += 8; + if (attr->ia_valid & ATTR_ATIME_SET) + nbytes += 8; + if (attr->ia_valid & ATTR_MTIME_SET) + nbytes += 8; + p = xdr_reserve_space(xdr, nbytes); + + xdr_encode_sattr(p, attr); +} + +/* + * diropargs3 + * + * struct diropargs3 { + * nfs_fh3 dir; + * filename3 name; + * }; + */ +static void encode_diropargs3(struct xdr_stream *xdr, const struct nfs_fh *fh, + const char *name, u32 length) +{ + encode_nfs_fh3(xdr, fh); + encode_filename3(xdr, name, length); +} + + /* * NFS encode functions */ @@ -321,6 +576,23 @@ nfs3_xdr_fhandle(struct rpc_rqst *req, __be32 *p, struct nfs_fh *fh) return 0; } +/* + * 3.3.1 GETATTR3args + * + * struct GETATTR3args { + * nfs_fh3 object; + * }; + */ +static int nfs3_xdr_enc_getattr3args(struct rpc_rqst *req, __be32 *p, + const struct nfs_fh *fh) +{ + struct xdr_stream xdr; + + xdr_init_encode(&xdr, &req->rq_snd_buf, p); + encode_nfs_fh3(&xdr, fh); + return 0; +} + /* * Encode SETATTR arguments */ @@ -336,6 +608,49 @@ nfs3_xdr_sattrargs(struct rpc_rqst *req, __be32 *p, struct nfs3_sattrargs *args) return 0; } +/* + * 3.3.2 SETATTR3args + * + * union sattrguard3 switch (bool check) { + * case TRUE: + * nfstime3 obj_ctime; + * case FALSE: + * void; + * }; + * + * struct SETATTR3args { + * nfs_fh3 object; + * sattr3 new_attributes; + * sattrguard3 guard; + * }; + */ +static void encode_sattrguard3(struct xdr_stream *xdr, + const struct nfs3_sattrargs *args) +{ + __be32 *p; + + if (args->guard) { + p = xdr_reserve_space(xdr, 4 + 8); + *p++ = xdr_one; + xdr_encode_time3(p, &args->guardtime); + } else { + p = xdr_reserve_space(xdr, 4); + *p = xdr_zero; + } +} + +static int nfs3_xdr_enc_setattr3args(struct rpc_rqst *req, __be32 *p, + const struct nfs3_sattrargs *args) +{ + struct xdr_stream xdr; + + xdr_init_encode(&xdr, &req->rq_snd_buf, p); + encode_nfs_fh3(&xdr, args->fh); + encode_sattr3(&xdr, args->sattr); + encode_sattrguard3(&xdr, args); + return 0; +} + /* * Encode directory ops argument */ @@ -348,6 +663,23 @@ nfs3_xdr_diropargs(struct rpc_rqst *req, __be32 *p, struct nfs3_diropargs *args) return 0; } +/* + * 3.3.3 LOOKUP3args + * + * struct LOOKUP3args { + * diropargs3 what; + * }; + */ +static int nfs3_xdr_enc_lookup3args(struct rpc_rqst *req, __be32 *p, + const struct nfs3_diropargs *args) +{ + struct xdr_stream xdr; + + xdr_init_encode(&xdr, &req->rq_snd_buf, p); + encode_diropargs3(&xdr, args->fh, args->name, args->len); + return 0; +} + /* * Encode REMOVE argument */ @@ -372,6 +704,50 @@ nfs3_xdr_accessargs(struct rpc_rqst *req, __be32 *p, struct nfs3_accessargs *arg return 0; } +/* + * 3.3.4 ACCESS3args + * + * struct ACCESS3args { + * nfs_fh3 object; + * uint32 access; + * }; + */ +static void encode_access3args(struct xdr_stream *xdr, + const struct nfs3_accessargs *args) +{ + encode_nfs_fh3(xdr, args->fh); + encode_uint32(xdr, args->access); +} + +static int nfs3_xdr_enc_access3args(struct rpc_rqst *req, __be32 *p, + const struct nfs3_accessargs *args) +{ + struct xdr_stream xdr; + + xdr_init_encode(&xdr, &req->rq_snd_buf, p); + encode_access3args(&xdr, args); + return 0; +} + +/* + * 3.3.5 READLINK3args + * + * struct READLINK3args { + * nfs_fh3 symlink; + * }; + */ +static int nfs3_xdr_enc_readlink3args(struct rpc_rqst *req, __be32 *p, + const struct nfs3_readlinkargs *args) +{ + struct xdr_stream xdr; + + xdr_init_encode(&xdr, &req->rq_snd_buf, p); + encode_nfs_fh3(&xdr, args->fh); + prepare_reply_buffer(req, args->pages, args->pgbase, + args->pglen, NFS3_readlinkres_sz); + return 0; +} + /* * Arguments to a READ call. Since we read data directly into the page * cache, we also set up the reply iovec here so that iov[1] points @@ -397,6 +773,40 @@ nfs3_xdr_readargs(struct rpc_rqst *req, __be32 *p, struct nfs_readargs *args) return 0; } +/* + * 3.3.6 READ3args + * + * struct READ3args { + * nfs_fh3 file; + * offset3 offset; + * count3 count; + * }; + */ +static void encode_read3args(struct xdr_stream *xdr, + const struct nfs_readargs *args) +{ + __be32 *p; + + encode_nfs_fh3(xdr, args->fh); + + p = xdr_reserve_space(xdr, 8 + 4); + p = xdr_encode_hyper(p, args->offset); + *p = cpu_to_be32(args->count); +} + +static int nfs3_xdr_enc_read3args(struct rpc_rqst *req, __be32 *p, + const struct nfs_readargs *args) +{ + struct xdr_stream xdr; + + xdr_init_encode(&xdr, &req->rq_snd_buf, p); + encode_read3args(&xdr, args); + prepare_reply_buffer(req, args->pages, args->pgbase, + args->count, NFS3_readres_sz); + req->rq_rcv_buf.flags |= XDRBUF_READ; + return 0; +} + /* * Write arguments. Splice the buffer to be written into the iovec. */ @@ -419,6 +829,52 @@ nfs3_xdr_writeargs(struct rpc_rqst *req, __be32 *p, struct nfs_writeargs *args) return 0; } +/* + * 3.3.7 WRITE3args + * + * enum stable_how { + * UNSTABLE = 0, + * DATA_SYNC = 1, + * FILE_SYNC = 2 + * }; + * + * struct WRITE3args { + * nfs_fh3 file; + * offset3 offset; + * count3 count; + * stable_how stable; + * opaque data<>; + * }; + */ +static void encode_write3args(struct xdr_stream *xdr, + const struct nfs_writeargs *args) +{ + __be32 *p; + + encode_nfs_fh3(xdr, args->fh); + + p = xdr_reserve_space(xdr, 8 + 4 + 4 + 4); + p = xdr_encode_hyper(p, args->offset); + *p++ = cpu_to_be32(args->count); + + BUG_ON(args->stable > NFS_FILE_SYNC); + *p++ = cpu_to_be32(args->stable); + + *p = cpu_to_be32(args->count); + xdr_write_pages(xdr, args->pages, args->pgbase, args->count); +} + +static int nfs3_xdr_enc_write3args(struct rpc_rqst *req, __be32 *p, + const struct nfs_writeargs *args) +{ + struct xdr_stream xdr; + + xdr_init_encode(&xdr, &req->rq_snd_buf, p); + encode_write3args(&xdr, args); + xdr.buf->flags |= XDRBUF_WRITE; + return 0; +} + /* * Encode CREATE arguments */ @@ -439,6 +895,56 @@ nfs3_xdr_createargs(struct rpc_rqst *req, __be32 *p, struct nfs3_createargs *arg return 0; } +/* + * 3.3.8 CREATE3args + * + * enum createmode3 { + * UNCHECKED = 0, + * GUARDED = 1, + * EXCLUSIVE = 2 + * }; + * + * union createhow3 switch (createmode3 mode) { + * case UNCHECKED: + * case GUARDED: + * sattr3 obj_attributes; + * case EXCLUSIVE: + * createverf3 verf; + * }; + * + * struct CREATE3args { + * diropargs3 where; + * createhow3 how; + * }; + */ +static void encode_createhow3(struct xdr_stream *xdr, + const struct nfs3_createargs *args) +{ + encode_uint32(xdr, args->createmode); + switch (args->createmode) { + case NFS3_CREATE_UNCHECKED: + case NFS3_CREATE_GUARDED: + encode_sattr3(xdr, args->sattr); + break; + case NFS3_CREATE_EXCLUSIVE: + encode_createverf3(xdr, args->verifier); + break; + default: + BUG(); + } +} + +static int nfs3_xdr_enc_create3args(struct rpc_rqst *req, __be32 *p, + const struct nfs3_createargs *args) +{ + struct xdr_stream xdr; + + xdr_init_encode(&xdr, &req->rq_snd_buf, p); + encode_diropargs3(&xdr, args->fh, args->name, args->len); + encode_createhow3(&xdr, args); + return 0; +} + /* * Encode MKDIR arguments */ @@ -452,6 +958,25 @@ nfs3_xdr_mkdirargs(struct rpc_rqst *req, __be32 *p, struct nfs3_mkdirargs *args) return 0; } +/* + * 3.3.9 MKDIR3args + * + * struct MKDIR3args { + * diropargs3 where; + * sattr3 attributes; + * }; + */ +static int nfs3_xdr_enc_mkdir3args(struct rpc_rqst *req, __be32 *p, + const struct nfs3_mkdirargs *args) +{ + struct xdr_stream xdr; + + xdr_init_encode(&xdr, &req->rq_snd_buf, p); + encode_diropargs3(&xdr, args->fh, args->name, args->len); + encode_sattr3(&xdr, args->sattr); + return 0; +} + /* * Encode SYMLINK arguments */ @@ -469,6 +994,37 @@ nfs3_xdr_symlinkargs(struct rpc_rqst *req, __be32 *p, struct nfs3_symlinkargs *a return 0; } +/* + * 3.3.10 SYMLINK3args + * + * struct symlinkdata3 { + * sattr3 symlink_attributes; + * nfspath3 symlink_data; + * }; + * + * struct SYMLINK3args { + * diropargs3 where; + * symlinkdata3 symlink; + * }; + */ +static void encode_symlinkdata3(struct xdr_stream *xdr, + const struct nfs3_symlinkargs *args) +{ + encode_sattr3(xdr, args->sattr); + encode_nfspath3(xdr, args->pages, args->pathlen); +} + +static int nfs3_xdr_enc_symlink3args(struct rpc_rqst *req, __be32 *p, + const struct nfs3_symlinkargs *args) +{ + struct xdr_stream xdr; + + xdr_init_encode(&xdr, &req->rq_snd_buf, p); + encode_diropargs3(&xdr, args->fromfh, args->fromname, args->fromlen); + encode_symlinkdata3(&xdr, args); + return 0; +} + /* * Encode MKNOD arguments */ @@ -488,6 +1044,86 @@ nfs3_xdr_mknodargs(struct rpc_rqst *req, __be32 *p, struct nfs3_mknodargs *args) return 0; } +/* + * 3.3.11 MKNOD3args + * + * struct devicedata3 { + * sattr3 dev_attributes; + * specdata3 spec; + * }; + * + * union mknoddata3 switch (ftype3 type) { + * case NF3CHR: + * case NF3BLK: + * devicedata3 device; + * case NF3SOCK: + * case NF3FIFO: + * sattr3 pipe_attributes; + * default: + * void; + * }; + * + * struct MKNOD3args { + * diropargs3 where; + * mknoddata3 what; + * }; + */ +static void encode_devicedata3(struct xdr_stream *xdr, + const struct nfs3_mknodargs *args) +{ + encode_sattr3(xdr, args->sattr); + encode_specdata3(xdr, args->rdev); +} + +static void encode_mknoddata3(struct xdr_stream *xdr, + const struct nfs3_mknodargs *args) +{ + encode_ftype3(xdr, args->type); + switch (args->type) { + case NF3CHR: + case NF3BLK: + encode_devicedata3(xdr, args); + break; + case NF3SOCK: + case NF3FIFO: + encode_sattr3(xdr, args->sattr); + break; + case NF3REG: + case NF3DIR: + break; + default: + BUG(); + } +} + +static int nfs3_xdr_enc_mknod3args(struct rpc_rqst *req, __be32 *p, + const struct nfs3_mknodargs *args) +{ + struct xdr_stream xdr; + + xdr_init_encode(&xdr, &req->rq_snd_buf, p); + encode_diropargs3(&xdr, args->fh, args->name, args->len); + encode_mknoddata3(&xdr, args); + return 0; +} + +/* + * 3.3.12 REMOVE3args + * + * struct REMOVE3args { + * diropargs3 object; + * }; + */ +static int nfs3_xdr_enc_remove3args(struct rpc_rqst *req, __be32 *p, + const struct nfs_removeargs *args) +{ + struct xdr_stream xdr; + + xdr_init_encode(&xdr, &req->rq_snd_buf, p); + encode_diropargs3(&xdr, args->fh, args->name.name, args->name.len); + return 0; +} + /* * Encode RENAME arguments */ @@ -502,6 +1138,27 @@ nfs3_xdr_renameargs(struct rpc_rqst *req, __be32 *p, struct nfs_renameargs *args return 0; } +/* + * 3.3.14 RENAME3args + * + * struct RENAME3args { + * diropargs3 from; + * diropargs3 to; + * }; + */ +static int nfs3_xdr_enc_rename3args(struct rpc_rqst *req, __be32 *p, + const struct nfs_renameargs *args) +{ + const struct qstr *old = args->old_name; + const struct qstr *new = args->new_name; + struct xdr_stream xdr; + + xdr_init_encode(&xdr, &req->rq_snd_buf, p); + encode_diropargs3(&xdr, args->old_dir, old->name, old->len); + encode_diropargs3(&xdr, args->new_dir, new->name, new->len); + return 0; +} + /* * Encode LINK arguments */ @@ -515,6 +1172,25 @@ nfs3_xdr_linkargs(struct rpc_rqst *req, __be32 *p, struct nfs3_linkargs *args) return 0; } +/* + * 3.3.15 LINK3args + * + * struct LINK3args { + * nfs_fh3 file; + * diropargs3 link; + * }; + */ +static int nfs3_xdr_enc_link3args(struct rpc_rqst *req, __be32 *p, + const struct nfs3_linkargs *args) +{ + struct xdr_stream xdr; + + xdr_init_encode(&xdr, &req->rq_snd_buf, p); + encode_nfs_fh3(&xdr, args->fromfh); + encode_diropargs3(&xdr, args->tofh, args->toname, args->tolen); + return 0; +} + /* * Encode arguments to readdir call */ @@ -543,6 +1219,84 @@ nfs3_xdr_readdirargs(struct rpc_rqst *req, __be32 *p, struct nfs3_readdirargs *a return 0; } +/* + * 3.3.16 READDIR3args + * + * struct READDIR3args { + * nfs_fh3 dir; + * cookie3 cookie; + * cookieverf3 cookieverf; + * count3 count; + * }; + */ +static void encode_readdir3args(struct xdr_stream *xdr, + const struct nfs3_readdirargs *args) +{ + __be32 *p; + + encode_nfs_fh3(xdr, args->fh); + + p = xdr_reserve_space(xdr, 8 + NFS3_COOKIEVERFSIZE + 4); + p = xdr_encode_cookie3(p, args->cookie); + p = xdr_encode_cookieverf3(p, args->verf); + *p = cpu_to_be32(args->count); +} + +static int nfs3_xdr_enc_readdir3args(struct rpc_rqst *req, __be32 *p, + const struct nfs3_readdirargs *args) +{ + struct xdr_stream xdr; + + xdr_init_encode(&xdr, &req->rq_snd_buf, p); + encode_readdir3args(&xdr, args); + prepare_reply_buffer(req, args->pages, 0, + args->count, NFS3_readdirres_sz); + return 0; +} + +/* + * 3.3.17 READDIRPLUS3args + * + * struct READDIRPLUS3args { + * nfs_fh3 dir; + * cookie3 cookie; + * cookieverf3 cookieverf; + * count3 dircount; + * count3 maxcount; + * }; + */ +static void encode_readdirplus3args(struct xdr_stream *xdr, + const struct nfs3_readdirargs *args) +{ + __be32 *p; + + encode_nfs_fh3(xdr, args->fh); + + p = xdr_reserve_space(xdr, 8 + NFS3_COOKIEVERFSIZE + 4 + 4); + p = xdr_encode_cookie3(p, args->cookie); + p = xdr_encode_cookieverf3(p, args->verf); + + /* + * readdirplus: need dircount + buffer size. + * We just make sure we make dircount big enough + */ + *p++ = cpu_to_be32(args->count >> 3); + + *p = cpu_to_be32(args->count); +} + +static int nfs3_xdr_enc_readdirplus3args(struct rpc_rqst *req, __be32 *p, + const struct nfs3_readdirargs *args) +{ + struct xdr_stream xdr; + + xdr_init_encode(&xdr, &req->rq_snd_buf, p); + encode_readdirplus3args(&xdr, args); + prepare_reply_buffer(req, args->pages, 0, + args->count, NFS3_readdirres_sz); + return 0; +} + /* * Decode the result of a readdir call. * We just check for syntactical correctness. @@ -674,6 +1428,37 @@ nfs3_xdr_commitargs(struct rpc_rqst *req, __be32 *p, struct nfs_writeargs *args) return 0; } +/* + * 3.3.21 COMMIT3args + * + * struct COMMIT3args { + * nfs_fh3 file; + * offset3 offset; + * count3 count; + * }; + */ +static void encode_commit3args(struct xdr_stream *xdr, + const struct nfs_writeargs *args) +{ + __be32 *p; + + encode_nfs_fh3(xdr, args->fh); + + p = xdr_reserve_space(xdr, 8 + 4); + p = xdr_encode_hyper(p, args->offset); + *p = cpu_to_be32(args->count); +} + +static int nfs3_xdr_enc_commit3args(struct rpc_rqst *req, __be32 *p, + const struct nfs_writeargs *args) +{ + struct xdr_stream xdr; + + xdr_init_encode(&xdr, &req->rq_snd_buf, p); + encode_commit3args(&xdr, args); + return 0; +} + #ifdef CONFIG_NFS_V3_ACL /* * Encode GETACL arguments @@ -699,6 +1484,21 @@ nfs3_xdr_getaclargs(struct rpc_rqst *req, __be32 *p, return 0; } +static int nfs3_xdr_enc_getacl3args(struct rpc_rqst *req, __be32 *p, + const struct nfs3_getaclargs *args) +{ + struct xdr_stream xdr; + + xdr_init_encode(&xdr, &req->rq_snd_buf, p); + encode_nfs_fh3(&xdr, args->fh); + encode_uint32(&xdr, args->mask); + if (args->mask & (NFS_ACL | NFS_DFACL)) + prepare_reply_buffer(req, args->pages, 0, + NFSACL_MAXPAGES << PAGE_SHIFT, + ACL3_getaclres_sz); + return 0; +} + /* * Encode SETACL arguments */ @@ -731,6 +1531,33 @@ nfs3_xdr_setaclargs(struct rpc_rqst *req, __be32 *p, NFS_ACL_DEFAULT); return (err > 0) ? 0 : err; } + +static int nfs3_xdr_enc_setacl3args(struct rpc_rqst *req, __be32 *p, + const struct nfs3_setaclargs *args) +{ + struct xdr_stream xdr; + unsigned int base; + int error; + + xdr_init_encode(&xdr, &req->rq_snd_buf, p); + encode_nfs_fh3(&xdr, NFS_FH(args->inode)); + encode_uint32(&xdr, args->mask); + if (args->npages != 0) + xdr_write_pages(&xdr, args->pages, 0, args->len); + + base = req->rq_slen; + error = nfsacl_encode(xdr.buf, base, args->inode, + (args->mask & NFS_ACL) ? + args->acl_access : NULL, 1, 0); + BUG_ON(error < 0); + error = nfsacl_encode(xdr.buf, base + error, args->inode, + (args->mask & NFS_DFACL) ? + args->acl_default : NULL, 1, + NFS_ACL_DEFAULT); + BUG_ON(error < 0); + return 0; +} + #endif /* CONFIG_NFS_V3_ACL */ /* -- cgit v1.2.2 From ad96b5b5eae59696b97e207d730b8c8cfb9d4e42 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 14 Dec 2010 14:56:01 +0000 Subject: NFS: Replace old NFSv3 encoder functions with xdr_stream-based ones The naming scheme of the new encoder functions, which follows the NFSv4 XDR encoder functions, is slightly different than the scheme used for the old functions. Rename the functions as a separate step to keep the patches clean. Signed-off-by: Chuck Lever Tested-by: J. Bruce Fields Signed-off-by: Trond Myklebust --- fs/nfs/nfs3xdr.c | 58 +++++++++++++++++++++++++++++--------------------------- 1 file changed, 30 insertions(+), 28 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c index 3d1043f7667c..bbda89042053 100644 --- a/fs/nfs/nfs3xdr.c +++ b/fs/nfs/nfs3xdr.c @@ -46,10 +46,11 @@ #define NFS3_fsinfo_sz #define NFS3_pathconf_sz #define NFS3_entry_sz (NFS3_filename_sz+3) - -#define NFS3_sattrargs_sz (NFS3_fh_sz+NFS3_sattr_sz+3) #define NFS3_diropargs_sz (NFS3_fh_sz+NFS3_filename_sz) -#define NFS3_removeargs_sz (NFS3_fh_sz+NFS3_filename_sz) + +#define NFS3_getattrargs_sz (NFS3_fh_sz) +#define NFS3_setattrargs_sz (NFS3_fh_sz+NFS3_sattr_sz+3) +#define NFS3_lookupargs_sz (NFS3_fh_sz+NFS3_filename_sz) #define NFS3_accessargs_sz (NFS3_fh_sz+1) #define NFS3_readlinkargs_sz (NFS3_fh_sz) #define NFS3_readargs_sz (NFS3_fh_sz+3) @@ -58,6 +59,7 @@ #define NFS3_mkdirargs_sz (NFS3_diropargs_sz+NFS3_sattr_sz) #define NFS3_symlinkargs_sz (NFS3_diropargs_sz+1+NFS3_sattr_sz) #define NFS3_mknodargs_sz (NFS3_diropargs_sz+2+NFS3_sattr_sz) +#define NFS3_removeargs_sz (NFS3_fh_sz+NFS3_filename_sz) #define NFS3_renameargs_sz (NFS3_diropargs_sz+NFS3_diropargs_sz) #define NFS3_linkargs_sz (NFS3_fh_sz+NFS3_diropargs_sz) #define NFS3_readdirargs_sz (NFS3_fh_sz+NFS3_cookieverf_sz+3) @@ -1969,9 +1971,9 @@ nfs3_xdr_setaclres(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr) #define PROC(proc, argtype, restype, timer) \ [NFS3PROC_##proc] = { \ .p_proc = NFS3PROC_##proc, \ - .p_encode = (kxdrproc_t) nfs3_xdr_##argtype, \ + .p_encode = (kxdrproc_t)nfs3_xdr_enc_##argtype##3args, \ .p_decode = (kxdrproc_t) nfs3_xdr_##restype, \ - .p_arglen = NFS3_##argtype##_sz, \ + .p_arglen = NFS3_##argtype##args_sz, \ .p_replen = NFS3_##restype##_sz, \ .p_timer = timer, \ .p_statidx = NFS3PROC_##proc, \ @@ -1979,27 +1981,27 @@ nfs3_xdr_setaclres(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr) } struct rpc_procinfo nfs3_procedures[] = { - PROC(GETATTR, fhandle, attrstat, 1), - PROC(SETATTR, sattrargs, wccstat, 0), - PROC(LOOKUP, diropargs, lookupres, 2), - PROC(ACCESS, accessargs, accessres, 1), - PROC(READLINK, readlinkargs, readlinkres, 3), - PROC(READ, readargs, readres, 3), - PROC(WRITE, writeargs, writeres, 4), - PROC(CREATE, createargs, createres, 0), - PROC(MKDIR, mkdirargs, createres, 0), - PROC(SYMLINK, symlinkargs, createres, 0), - PROC(MKNOD, mknodargs, createres, 0), - PROC(REMOVE, removeargs, removeres, 0), - PROC(RMDIR, diropargs, wccstat, 0), - PROC(RENAME, renameargs, renameres, 0), - PROC(LINK, linkargs, linkres, 0), - PROC(READDIR, readdirargs, readdirres, 3), - PROC(READDIRPLUS, readdirargs, readdirres, 3), - PROC(FSSTAT, fhandle, fsstatres, 0), - PROC(FSINFO, fhandle, fsinfores, 0), - PROC(PATHCONF, fhandle, pathconfres, 0), - PROC(COMMIT, commitargs, commitres, 5), + PROC(GETATTR, getattr, attrstat, 1), + PROC(SETATTR, setattr, wccstat, 0), + PROC(LOOKUP, lookup, lookupres, 2), + PROC(ACCESS, access, accessres, 1), + PROC(READLINK, readlink, readlinkres, 3), + PROC(READ, read, readres, 3), + PROC(WRITE, write, writeres, 4), + PROC(CREATE, create, createres, 0), + PROC(MKDIR, mkdir, createres, 0), + PROC(SYMLINK, symlink, createres, 0), + PROC(MKNOD, mknod, createres, 0), + PROC(REMOVE, remove, removeres, 0), + PROC(RMDIR, lookup, wccstat, 0), + PROC(RENAME, rename, renameres, 0), + PROC(LINK, link, linkres, 0), + PROC(READDIR, readdir, readdirres, 3), + PROC(READDIRPLUS, readdirplus, readdirres, 3), + PROC(FSSTAT, getattr, fsstatres, 0), + PROC(FSINFO, getattr, fsinfores, 0), + PROC(PATHCONF, getattr, pathconfres, 0), + PROC(COMMIT, commit, commitres, 5), }; struct rpc_version nfs_version3 = { @@ -2012,7 +2014,7 @@ struct rpc_version nfs_version3 = { static struct rpc_procinfo nfs3_acl_procedures[] = { [ACLPROC3_GETACL] = { .p_proc = ACLPROC3_GETACL, - .p_encode = (kxdrproc_t) nfs3_xdr_getaclargs, + .p_encode = (kxdrproc_t)nfs3_xdr_enc_getacl3args, .p_decode = (kxdrproc_t) nfs3_xdr_getaclres, .p_arglen = ACL3_getaclargs_sz, .p_replen = ACL3_getaclres_sz, @@ -2021,7 +2023,7 @@ static struct rpc_procinfo nfs3_acl_procedures[] = { }, [ACLPROC3_SETACL] = { .p_proc = ACLPROC3_SETACL, - .p_encode = (kxdrproc_t) nfs3_xdr_setaclargs, + .p_encode = (kxdrproc_t)nfs3_xdr_enc_setacl3args, .p_decode = (kxdrproc_t) nfs3_xdr_setaclres, .p_arglen = ACL3_setaclargs_sz, .p_replen = ACL3_setaclres_sz, -- cgit v1.2.2 From 499ff710b2fd3a03c8195c82340e5166eed04205 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 14 Dec 2010 14:56:10 +0000 Subject: NFS: Remove unused old NFSv3 encoder functions Clean up. Remove unused legacy argument encoder functions, and any now unused encoder helper functions. Signed-off-by: Chuck Lever Tested-by: J. Bruce Fields Signed-off-by: Trond Myklebust --- fs/nfs/nfs3xdr.c | 328 +------------------------------------------------------ 1 file changed, 4 insertions(+), 324 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c index bbda89042053..b0af263e4db9 100644 --- a/fs/nfs/nfs3xdr.c +++ b/fs/nfs/nfs3xdr.c @@ -130,12 +130,6 @@ static void prepare_reply_buffer(struct rpc_rqst *req, struct page **pages, /* * Common NFS XDR functions as inlines */ -static inline __be32 * -xdr_encode_fhandle(__be32 *p, const struct nfs_fh *fh) -{ - return xdr_encode_array(p, fh->data, fh->size); -} - static inline __be32 * xdr_decode_fhandle(__be32 *p, struct nfs_fh *fh) { @@ -564,20 +558,12 @@ static void encode_diropargs3(struct xdr_stream *xdr, const struct nfs_fh *fh, /* - * NFS encode functions + * NFSv3 XDR encode functions + * + * NFSv3 argument types are defined in section 3.3 of RFC 1813: + * "NFS Version 3 Protocol Specification". */ -/* - * Encode file handle argument - */ -static int -nfs3_xdr_fhandle(struct rpc_rqst *req, __be32 *p, struct nfs_fh *fh) -{ - p = xdr_encode_fhandle(p, fh); - req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); - return 0; -} - /* * 3.3.1 GETATTR3args * @@ -595,21 +581,6 @@ static int nfs3_xdr_enc_getattr3args(struct rpc_rqst *req, __be32 *p, return 0; } -/* - * Encode SETATTR arguments - */ -static int -nfs3_xdr_sattrargs(struct rpc_rqst *req, __be32 *p, struct nfs3_sattrargs *args) -{ - p = xdr_encode_fhandle(p, args->fh); - p = xdr_encode_sattr(p, args->sattr); - *p++ = htonl(args->guard); - if (args->guard) - p = xdr_encode_time3(p, &args->guardtime); - req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); - return 0; -} - /* * 3.3.2 SETATTR3args * @@ -653,18 +624,6 @@ static int nfs3_xdr_enc_setattr3args(struct rpc_rqst *req, __be32 *p, return 0; } -/* - * Encode directory ops argument - */ -static int -nfs3_xdr_diropargs(struct rpc_rqst *req, __be32 *p, struct nfs3_diropargs *args) -{ - p = xdr_encode_fhandle(p, args->fh); - p = xdr_encode_array(p, args->name, args->len); - req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); - return 0; -} - /* * 3.3.3 LOOKUP3args * @@ -682,30 +641,6 @@ static int nfs3_xdr_enc_lookup3args(struct rpc_rqst *req, __be32 *p, return 0; } -/* - * Encode REMOVE argument - */ -static int -nfs3_xdr_removeargs(struct rpc_rqst *req, __be32 *p, const struct nfs_removeargs *args) -{ - p = xdr_encode_fhandle(p, args->fh); - p = xdr_encode_array(p, args->name.name, args->name.len); - req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); - return 0; -} - -/* - * Encode access() argument - */ -static int -nfs3_xdr_accessargs(struct rpc_rqst *req, __be32 *p, struct nfs3_accessargs *args) -{ - p = xdr_encode_fhandle(p, args->fh); - *p++ = htonl(args->access); - req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); - return 0; -} - /* * 3.3.4 ACCESS3args * @@ -750,31 +685,6 @@ static int nfs3_xdr_enc_readlink3args(struct rpc_rqst *req, __be32 *p, return 0; } -/* - * Arguments to a READ call. Since we read data directly into the page - * cache, we also set up the reply iovec here so that iov[1] points - * exactly to the page we want to fetch. - */ -static int -nfs3_xdr_readargs(struct rpc_rqst *req, __be32 *p, struct nfs_readargs *args) -{ - struct rpc_auth *auth = req->rq_cred->cr_auth; - unsigned int replen; - u32 count = args->count; - - p = xdr_encode_fhandle(p, args->fh); - p = xdr_encode_hyper(p, args->offset); - *p++ = htonl(count); - req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); - - /* Inline the page array */ - replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS3_readres_sz) << 2; - xdr_inline_pages(&req->rq_rcv_buf, replen, - args->pages, args->pgbase, count); - req->rq_rcv_buf.flags |= XDRBUF_READ; - return 0; -} - /* * 3.3.6 READ3args * @@ -809,28 +719,6 @@ static int nfs3_xdr_enc_read3args(struct rpc_rqst *req, __be32 *p, return 0; } -/* - * Write arguments. Splice the buffer to be written into the iovec. - */ -static int -nfs3_xdr_writeargs(struct rpc_rqst *req, __be32 *p, struct nfs_writeargs *args) -{ - struct xdr_buf *sndbuf = &req->rq_snd_buf; - u32 count = args->count; - - p = xdr_encode_fhandle(p, args->fh); - p = xdr_encode_hyper(p, args->offset); - *p++ = htonl(count); - *p++ = htonl(args->stable); - *p++ = htonl(count); - sndbuf->len = xdr_adjust_iovec(sndbuf->head, p); - - /* Copy the page array */ - xdr_encode_pages(sndbuf, args->pages, args->pgbase, count); - sndbuf->flags |= XDRBUF_WRITE; - return 0; -} - /* * 3.3.7 WRITE3args * @@ -877,26 +765,6 @@ static int nfs3_xdr_enc_write3args(struct rpc_rqst *req, __be32 *p, return 0; } -/* - * Encode CREATE arguments - */ -static int -nfs3_xdr_createargs(struct rpc_rqst *req, __be32 *p, struct nfs3_createargs *args) -{ - p = xdr_encode_fhandle(p, args->fh); - p = xdr_encode_array(p, args->name, args->len); - - *p++ = htonl(args->createmode); - if (args->createmode == NFS3_CREATE_EXCLUSIVE) { - *p++ = args->verifier[0]; - *p++ = args->verifier[1]; - } else - p = xdr_encode_sattr(p, args->sattr); - - req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); - return 0; -} - /* * 3.3.8 CREATE3args * @@ -947,19 +815,6 @@ static int nfs3_xdr_enc_create3args(struct rpc_rqst *req, __be32 *p, return 0; } -/* - * Encode MKDIR arguments - */ -static int -nfs3_xdr_mkdirargs(struct rpc_rqst *req, __be32 *p, struct nfs3_mkdirargs *args) -{ - p = xdr_encode_fhandle(p, args->fh); - p = xdr_encode_array(p, args->name, args->len); - p = xdr_encode_sattr(p, args->sattr); - req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); - return 0; -} - /* * 3.3.9 MKDIR3args * @@ -979,23 +834,6 @@ static int nfs3_xdr_enc_mkdir3args(struct rpc_rqst *req, __be32 *p, return 0; } -/* - * Encode SYMLINK arguments - */ -static int -nfs3_xdr_symlinkargs(struct rpc_rqst *req, __be32 *p, struct nfs3_symlinkargs *args) -{ - p = xdr_encode_fhandle(p, args->fromfh); - p = xdr_encode_array(p, args->fromname, args->fromlen); - p = xdr_encode_sattr(p, args->sattr); - *p++ = htonl(args->pathlen); - req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); - - /* Copy the page */ - xdr_encode_pages(&req->rq_snd_buf, args->pages, 0, args->pathlen); - return 0; -} - /* * 3.3.10 SYMLINK3args * @@ -1027,25 +865,6 @@ static int nfs3_xdr_enc_symlink3args(struct rpc_rqst *req, __be32 *p, return 0; } -/* - * Encode MKNOD arguments - */ -static int -nfs3_xdr_mknodargs(struct rpc_rqst *req, __be32 *p, struct nfs3_mknodargs *args) -{ - p = xdr_encode_fhandle(p, args->fh); - p = xdr_encode_array(p, args->name, args->len); - *p++ = htonl(args->type); - p = xdr_encode_sattr(p, args->sattr); - if (args->type == NF3CHR || args->type == NF3BLK) { - *p++ = htonl(MAJOR(args->rdev)); - *p++ = htonl(MINOR(args->rdev)); - } - - req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); - return 0; -} - /* * 3.3.11 MKNOD3args * @@ -1126,20 +945,6 @@ static int nfs3_xdr_enc_remove3args(struct rpc_rqst *req, __be32 *p, return 0; } -/* - * Encode RENAME arguments - */ -static int -nfs3_xdr_renameargs(struct rpc_rqst *req, __be32 *p, struct nfs_renameargs *args) -{ - p = xdr_encode_fhandle(p, args->old_dir); - p = xdr_encode_array(p, args->old_name->name, args->old_name->len); - p = xdr_encode_fhandle(p, args->new_dir); - p = xdr_encode_array(p, args->new_name->name, args->new_name->len); - req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); - return 0; -} - /* * 3.3.14 RENAME3args * @@ -1161,19 +966,6 @@ static int nfs3_xdr_enc_rename3args(struct rpc_rqst *req, __be32 *p, return 0; } -/* - * Encode LINK arguments - */ -static int -nfs3_xdr_linkargs(struct rpc_rqst *req, __be32 *p, struct nfs3_linkargs *args) -{ - p = xdr_encode_fhandle(p, args->fromfh); - p = xdr_encode_fhandle(p, args->tofh); - p = xdr_encode_array(p, args->toname, args->tolen); - req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); - return 0; -} - /* * 3.3.15 LINK3args * @@ -1193,34 +985,6 @@ static int nfs3_xdr_enc_link3args(struct rpc_rqst *req, __be32 *p, return 0; } -/* - * Encode arguments to readdir call - */ -static int -nfs3_xdr_readdirargs(struct rpc_rqst *req, __be32 *p, struct nfs3_readdirargs *args) -{ - struct rpc_auth *auth = req->rq_cred->cr_auth; - unsigned int replen; - u32 count = args->count; - - p = xdr_encode_fhandle(p, args->fh); - p = xdr_encode_hyper(p, args->cookie); - *p++ = args->verf[0]; - *p++ = args->verf[1]; - if (args->plus) { - /* readdirplus: need dircount + buffer size. - * We just make sure we make dircount big enough */ - *p++ = htonl(count >> 3); - } - *p++ = htonl(count); - req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); - - /* Inline the page array */ - replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS3_readdirres_sz) << 2; - xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages, 0, count); - return 0; -} - /* * 3.3.16 READDIR3args * @@ -1417,19 +1181,6 @@ out_overflow_exit: return ERR_PTR(-EAGAIN); } -/* - * Encode COMMIT arguments - */ -static int -nfs3_xdr_commitargs(struct rpc_rqst *req, __be32 *p, struct nfs_writeargs *args) -{ - p = xdr_encode_fhandle(p, args->fh); - p = xdr_encode_hyper(p, args->offset); - *p++ = htonl(args->count); - req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); - return 0; -} - /* * 3.3.21 COMMIT3args * @@ -1462,29 +1213,6 @@ static int nfs3_xdr_enc_commit3args(struct rpc_rqst *req, __be32 *p, } #ifdef CONFIG_NFS_V3_ACL -/* - * Encode GETACL arguments - */ -static int -nfs3_xdr_getaclargs(struct rpc_rqst *req, __be32 *p, - struct nfs3_getaclargs *args) -{ - struct rpc_auth *auth = req->rq_cred->cr_auth; - unsigned int replen; - - p = xdr_encode_fhandle(p, args->fh); - *p++ = htonl(args->mask); - req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); - - if (args->mask & (NFS_ACL | NFS_DFACL)) { - /* Inline the page array */ - replen = (RPC_REPHDRSIZE + auth->au_rslack + - ACL3_getaclres_sz) << 2; - xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages, 0, - NFSACL_MAXPAGES << PAGE_SHIFT); - } - return 0; -} static int nfs3_xdr_enc_getacl3args(struct rpc_rqst *req, __be32 *p, const struct nfs3_getaclargs *args) @@ -1501,39 +1229,6 @@ static int nfs3_xdr_enc_getacl3args(struct rpc_rqst *req, __be32 *p, return 0; } -/* - * Encode SETACL arguments - */ -static int -nfs3_xdr_setaclargs(struct rpc_rqst *req, __be32 *p, - struct nfs3_setaclargs *args) -{ - struct xdr_buf *buf = &req->rq_snd_buf; - unsigned int base; - int err; - - p = xdr_encode_fhandle(p, NFS_FH(args->inode)); - *p++ = htonl(args->mask); - req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); - base = req->rq_slen; - - if (args->npages != 0) - xdr_encode_pages(buf, args->pages, 0, args->len); - else - req->rq_slen = xdr_adjust_iovec(req->rq_svec, - p + XDR_QUADLEN(args->len)); - - err = nfsacl_encode(buf, base, args->inode, - (args->mask & NFS_ACL) ? - args->acl_access : NULL, 1, 0); - if (err > 0) - err = nfsacl_encode(buf, base + err, args->inode, - (args->mask & NFS_DFACL) ? - args->acl_default : NULL, 1, - NFS_ACL_DEFAULT); - return (err > 0) ? 0 : err; -} - static int nfs3_xdr_enc_setacl3args(struct rpc_rqst *req, __be32 *p, const struct nfs3_setaclargs *args) { @@ -1635,21 +1330,6 @@ nfs3_xdr_accessres(struct rpc_rqst *req, __be32 *p, struct nfs3_accessres *res) return 0; } -static int -nfs3_xdr_readlinkargs(struct rpc_rqst *req, __be32 *p, struct nfs3_readlinkargs *args) -{ - struct rpc_auth *auth = req->rq_cred->cr_auth; - unsigned int replen; - - p = xdr_encode_fhandle(p, args->fh); - req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); - - /* Inline the page array */ - replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS3_readlinkres_sz) << 2; - xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages, args->pgbase, args->pglen); - return 0; -} - /* * Decode READLINK reply */ -- cgit v1.2.2 From 9d5a64343925a152e1907c652a0d71d6640868b3 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 14 Dec 2010 14:56:20 +0000 Subject: NFS: Update xdr_encode_foo() functions that we're keeping Clean up. Move the timestamp and the sattr encoder to match the placement convention of the other helpers, update their coding style, and refresh their documenting comments. Signed-off-by: Chuck Lever Tested-by: J. Bruce Fields Signed-off-by: Trond Myklebust --- fs/nfs/nfs3xdr.c | 111 +++++++++++++++++++++++++++---------------------------- 1 file changed, 55 insertions(+), 56 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c index b0af263e4db9..119844d0b4d5 100644 --- a/fs/nfs/nfs3xdr.c +++ b/fs/nfs/nfs3xdr.c @@ -166,14 +166,6 @@ out_overflow: /* * Encode/decode time. */ -static inline __be32 * -xdr_encode_time3(__be32 *p, const struct timespec *timep) -{ - *p++ = htonl(timep->tv_sec); - *p++ = htonl(timep->tv_nsec); - return p; -} - static inline __be32 * xdr_decode_time3(__be32 *p, struct timespec *timep) { @@ -218,52 +210,6 @@ xdr_decode_fattr(__be32 *p, struct nfs_fattr *fattr) return p; } -static inline __be32 * -xdr_encode_sattr(__be32 *p, const struct iattr *attr) -{ - if (attr->ia_valid & ATTR_MODE) { - *p++ = xdr_one; - *p++ = htonl(attr->ia_mode & S_IALLUGO); - } else { - *p++ = xdr_zero; - } - if (attr->ia_valid & ATTR_UID) { - *p++ = xdr_one; - *p++ = htonl(attr->ia_uid); - } else { - *p++ = xdr_zero; - } - if (attr->ia_valid & ATTR_GID) { - *p++ = xdr_one; - *p++ = htonl(attr->ia_gid); - } else { - *p++ = xdr_zero; - } - if (attr->ia_valid & ATTR_SIZE) { - *p++ = xdr_one; - p = xdr_encode_hyper(p, (__u64) attr->ia_size); - } else { - *p++ = xdr_zero; - } - if (attr->ia_valid & ATTR_ATIME_SET) { - *p++ = xdr_two; - p = xdr_encode_time3(p, &attr->ia_atime); - } else if (attr->ia_valid & ATTR_ATIME) { - *p++ = xdr_one; - } else { - *p++ = xdr_zero; - } - if (attr->ia_valid & ATTR_MTIME_SET) { - *p++ = xdr_two; - p = xdr_encode_time3(p, &attr->ia_mtime); - } else if (attr->ia_valid & ATTR_MTIME) { - *p++ = xdr_one; - } else { - *p++ = xdr_zero; - } - return p; -} - static inline __be32 * xdr_decode_wcc_attr(__be32 *p, struct nfs_fattr *fattr) { @@ -452,6 +398,21 @@ static void encode_nfs_fh3(struct xdr_stream *xdr, const struct nfs_fh *fh) xdr_encode_opaque(p, fh->data, fh->size); } +/* + * nfstime3 + * + * struct nfstime3 { + * uint32 seconds; + * uint32 nseconds; + * }; + */ +static __be32 *xdr_encode_nfstime3(__be32 *p, const struct timespec *timep) +{ + *p++ = cpu_to_be32(timep->tv_sec); + *p++ = cpu_to_be32(timep->tv_nsec); + return p; +} + /* * sattr3 * @@ -538,7 +499,45 @@ static void encode_sattr3(struct xdr_stream *xdr, const struct iattr *attr) nbytes += 8; p = xdr_reserve_space(xdr, nbytes); - xdr_encode_sattr(p, attr); + if (attr->ia_valid & ATTR_MODE) { + *p++ = xdr_one; + *p++ = cpu_to_be32(attr->ia_mode & S_IALLUGO); + } else + *p++ = xdr_zero; + + if (attr->ia_valid & ATTR_UID) { + *p++ = xdr_one; + *p++ = cpu_to_be32(attr->ia_uid); + } else + *p++ = xdr_zero; + + if (attr->ia_valid & ATTR_GID) { + *p++ = xdr_one; + *p++ = cpu_to_be32(attr->ia_gid); + } else + *p++ = xdr_zero; + + if (attr->ia_valid & ATTR_SIZE) { + *p++ = xdr_one; + p = xdr_encode_hyper(p, (u64)attr->ia_size); + } else + *p++ = xdr_zero; + + if (attr->ia_valid & ATTR_ATIME_SET) { + *p++ = xdr_two; + p = xdr_encode_nfstime3(p, &attr->ia_atime); + } else if (attr->ia_valid & ATTR_ATIME) { + *p++ = xdr_one; + } else + *p++ = xdr_zero; + + if (attr->ia_valid & ATTR_MTIME_SET) { + *p++ = xdr_two; + xdr_encode_nfstime3(p, &attr->ia_mtime); + } else if (attr->ia_valid & ATTR_MTIME) { + *p = xdr_one; + } else + *p = xdr_zero; } /* @@ -605,7 +604,7 @@ static void encode_sattrguard3(struct xdr_stream *xdr, if (args->guard) { p = xdr_reserve_space(xdr, 4 + 8); *p++ = xdr_one; - xdr_encode_time3(p, &args->guardtime); + xdr_encode_nfstime3(p, &args->guardtime); } else { p = xdr_reserve_space(xdr, 4); *p = xdr_zero; -- cgit v1.2.2 From e4f9323409369a3aeb01885c0c4409d2eeec794a Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 14 Dec 2010 14:56:30 +0000 Subject: NFS: Introduce new-style XDR decoding functions for NFSv2 We'd like to prevent local buffer overflows caused by malicious or broken servers. New xdr_stream style decoders can do that. For efficiency, we also eventually want to be able to pass xdr_streams from call_decode() to all XDR decoding functions, rather than building an xdr_stream in every XDR decoding function in the kernel. Static helper functions are left without the "inline" directive. This allows the compiler to choose automatically how to optimize these for size or speed. Signed-off-by: Chuck Lever Tested-by: J. Bruce Fields Signed-off-by: Trond Myklebust --- fs/nfs/nfs3xdr.c | 1534 +++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 1450 insertions(+), 84 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c index 119844d0b4d5..0f07c6d55131 100644 --- a/fs/nfs/nfs3xdr.c +++ b/fs/nfs/nfs3xdr.c @@ -104,13 +104,6 @@ static const umode_t nfs_type2fmt[] = { [NF3FIFO] = S_IFIFO, }; -static void print_overflow_msg(const char *func, const struct xdr_stream *xdr) -{ - dprintk("nfs: %s: prematurely hit end of receive buffer. " - "Remaining buffer length is %tu words.\n", - func, xdr->end - xdr->p); -} - /* * While encoding arguments, set up the reply buffer in advance to * receive reply data directly into the page cache. @@ -126,6 +119,16 @@ static void prepare_reply_buffer(struct rpc_rqst *req, struct page **pages, xdr_inline_pages(&req->rq_rcv_buf, replen << 2, pages, base, len); } +/* + * Handle decode buffer overflows out-of-line. + */ +static void print_overflow_msg(const char *func, const struct xdr_stream *xdr) +{ + dprintk("NFS: %s prematurely hit the end of our receive buffer. " + "Remaining buffer length is %tu words.\n", + func, xdr->end - xdr->p); +} + /* * Common NFS XDR functions as inlines @@ -284,6 +287,44 @@ static void encode_uint32(struct xdr_stream *xdr, u32 value) *p = cpu_to_be32(value); } +static int decode_uint32(struct xdr_stream *xdr, u32 *value) +{ + __be32 *p; + + p = xdr_inline_decode(xdr, 4); + if (unlikely(p == NULL)) + goto out_overflow; + *value = be32_to_cpup(p); + return 0; +out_overflow: + print_overflow_msg(__func__, xdr); + return -EIO; +} + +static int decode_uint64(struct xdr_stream *xdr, u64 *value) +{ + __be32 *p; + + p = xdr_inline_decode(xdr, 8); + if (unlikely(p == NULL)) + goto out_overflow; + xdr_decode_hyper(p, value); + return 0; +out_overflow: + print_overflow_msg(__func__, xdr); + return -EIO; +} + +/* + * fileid3 + * + * typedef uint64 fileid3; + */ +static int decode_fileid3(struct xdr_stream *xdr, u64 *fileid) +{ + return decode_uint64(xdr, fileid); +} + /* * filename3 * @@ -299,6 +340,33 @@ static void encode_filename3(struct xdr_stream *xdr, xdr_encode_opaque(p, name, length); } +static int decode_inline_filename3(struct xdr_stream *xdr, + const char **name, u32 *length) +{ + __be32 *p; + u32 count; + + p = xdr_inline_decode(xdr, 4); + if (unlikely(p == NULL)) + goto out_overflow; + count = be32_to_cpup(p); + if (count > NFS3_MAXNAMLEN) + goto out_nametoolong; + p = xdr_inline_decode(xdr, count); + if (unlikely(p == NULL)) + goto out_overflow; + *name = (const char *)p; + *length = count; + return 0; + +out_nametoolong: + dprintk("NFS: returned filename too long: %u\n", count); + return -ENAMETOOLONG; +out_overflow: + print_overflow_msg(__func__, xdr); + return -EIO; +} + /* * nfspath3 * @@ -312,6 +380,39 @@ static void encode_nfspath3(struct xdr_stream *xdr, struct page **pages, xdr_write_pages(xdr, pages, 0, length); } +static int decode_nfspath3(struct xdr_stream *xdr) +{ + u32 recvd, count; + size_t hdrlen; + __be32 *p; + + p = xdr_inline_decode(xdr, 4); + if (unlikely(p == NULL)) + goto out_overflow; + count = be32_to_cpup(p); + if (unlikely(count >= xdr->buf->page_len || count > NFS3_MAXPATHLEN)) + goto out_nametoolong; + hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base; + recvd = xdr->buf->len - hdrlen; + if (unlikely(count > recvd)) + goto out_cheating; + + xdr_read_pages(xdr, count); + xdr_terminate_string(xdr->buf, count); + return 0; + +out_nametoolong: + dprintk("NFS: returned pathname too long: %u\n", count); + return -ENAMETOOLONG; +out_cheating: + dprintk("NFS: server cheating in pathname result: " + "count %u > recvd %u\n", count, recvd); + return -EIO; +out_overflow: + print_overflow_msg(__func__, xdr); + return -EIO; +} + /* * cookie3 * @@ -322,6 +423,11 @@ static __be32 *xdr_encode_cookie3(__be32 *p, u64 cookie) return xdr_encode_hyper(p, cookie); } +static int decode_cookie3(struct xdr_stream *xdr, u64 *cookie) +{ + return decode_uint64(xdr, cookie); +} + /* * cookieverf3 * @@ -333,6 +439,20 @@ static __be32 *xdr_encode_cookieverf3(__be32 *p, const __be32 *verifier) return p + XDR_QUADLEN(NFS3_COOKIEVERFSIZE); } +static int decode_cookieverf3(struct xdr_stream *xdr, __be32 *verifier) +{ + __be32 *p; + + p = xdr_inline_decode(xdr, NFS3_COOKIEVERFSIZE); + if (unlikely(p == NULL)) + goto out_overflow; + memcpy(verifier, p, NFS3_COOKIEVERFSIZE); + return 0; +out_overflow: + print_overflow_msg(__func__, xdr); + return -EIO; +} + /* * createverf3 * @@ -346,6 +466,54 @@ static void encode_createverf3(struct xdr_stream *xdr, const __be32 *verifier) memcpy(p, verifier, NFS3_CREATEVERFSIZE); } +static int decode_writeverf3(struct xdr_stream *xdr, __be32 *verifier) +{ + __be32 *p; + + p = xdr_inline_decode(xdr, NFS3_WRITEVERFSIZE); + if (unlikely(p == NULL)) + goto out_overflow; + memcpy(verifier, p, NFS3_WRITEVERFSIZE); + return 0; +out_overflow: + print_overflow_msg(__func__, xdr); + return -EIO; +} + +/* + * size3 + * + * typedef uint64 size3; + */ +static __be32 *xdr_decode_size3(__be32 *p, u64 *size) +{ + return xdr_decode_hyper(p, size); +} + +/* + * nfsstat3 + * + * enum nfsstat3 { + * NFS3_OK = 0, + * ... + * } + */ +#define NFS3_OK NFS_OK + +static int decode_nfsstat3(struct xdr_stream *xdr, enum nfs_stat *status) +{ + __be32 *p; + + p = xdr_inline_decode(xdr, 4); + if (unlikely(p == NULL)) + goto out_overflow; + *status = be32_to_cpup(p); + return 0; +out_overflow: + print_overflow_msg(__func__, xdr); + return -EIO; +} + /* * ftype3 * @@ -398,6 +566,36 @@ static void encode_nfs_fh3(struct xdr_stream *xdr, const struct nfs_fh *fh) xdr_encode_opaque(p, fh->data, fh->size); } +static int decode_nfs_fh3(struct xdr_stream *xdr, struct nfs_fh *fh) +{ + u32 length; + __be32 *p; + + p = xdr_inline_decode(xdr, 4); + if (unlikely(p == NULL)) + goto out_overflow; + length = be32_to_cpup(p++); + if (unlikely(length > NFS3_FHSIZE)) + goto out_toobig; + p = xdr_inline_decode(xdr, length); + if (unlikely(p == NULL)) + goto out_overflow; + fh->size = length; + memcpy(fh->data, p, length); + return 0; +out_toobig: + dprintk("NFS: file handle size (%u) too big\n", length); + return -E2BIG; +out_overflow: + print_overflow_msg(__func__, xdr); + return -EIO; +} + +static void zero_nfs_fh3(struct nfs_fh *fh) +{ + memset(fh, 0, sizeof(*fh)); +} + /* * nfstime3 * @@ -540,6 +738,153 @@ static void encode_sattr3(struct xdr_stream *xdr, const struct iattr *attr) *p = xdr_zero; } +/* + * fattr3 + * + * struct fattr3 { + * ftype3 type; + * mode3 mode; + * uint32 nlink; + * uid3 uid; + * gid3 gid; + * size3 size; + * size3 used; + * specdata3 rdev; + * uint64 fsid; + * fileid3 fileid; + * nfstime3 atime; + * nfstime3 mtime; + * nfstime3 ctime; + * }; + */ +static int decode_fattr3(struct xdr_stream *xdr, struct nfs_fattr *fattr) +{ + __be32 *p; + + p = xdr_inline_decode(xdr, NFS3_fattr_sz << 2); + if (unlikely(p == NULL)) + goto out_overflow; + xdr_decode_fattr(p, fattr); + return 0; +out_overflow: + print_overflow_msg(__func__, xdr); + return -EIO; +} + +/* + * post_op_attr + * + * union post_op_attr switch (bool attributes_follow) { + * case TRUE: + * fattr3 attributes; + * case FALSE: + * void; + * }; + */ +static int decode_post_op_attr(struct xdr_stream *xdr, struct nfs_fattr *fattr) +{ + __be32 *p; + + p = xdr_inline_decode(xdr, 4); + if (unlikely(p == NULL)) + goto out_overflow; + if (*p != xdr_zero) + return decode_fattr3(xdr, fattr); + return 0; +out_overflow: + print_overflow_msg(__func__, xdr); + return -EIO; +} + +/* + * wcc_attr + * struct wcc_attr { + * size3 size; + * nfstime3 mtime; + * nfstime3 ctime; + * }; + */ +static int decode_wcc_attr(struct xdr_stream *xdr, struct nfs_fattr *fattr) +{ + __be32 *p; + + p = xdr_inline_decode(xdr, NFS3_wcc_attr_sz << 2); + if (unlikely(p == NULL)) + goto out_overflow; + xdr_decode_wcc_attr(p, fattr); + return 0; +out_overflow: + print_overflow_msg(__func__, xdr); + return -EIO; +} + +/* + * pre_op_attr + * union pre_op_attr switch (bool attributes_follow) { + * case TRUE: + * wcc_attr attributes; + * case FALSE: + * void; + * }; + * + * wcc_data + * + * struct wcc_data { + * pre_op_attr before; + * post_op_attr after; + * }; + */ +static int decode_pre_op_attr(struct xdr_stream *xdr, struct nfs_fattr *fattr) +{ + __be32 *p; + + p = xdr_inline_decode(xdr, 4); + if (unlikely(p == NULL)) + goto out_overflow; + if (*p != xdr_zero) + return decode_wcc_attr(xdr, fattr); + return 0; +out_overflow: + print_overflow_msg(__func__, xdr); + return -EIO; +} + +static int decode_wcc_data(struct xdr_stream *xdr, struct nfs_fattr *fattr) +{ + int error; + + error = decode_pre_op_attr(xdr, fattr); + if (unlikely(error)) + goto out; + error = decode_post_op_attr(xdr, fattr); +out: + return error; +} + +/* + * post_op_fh3 + * + * union post_op_fh3 switch (bool handle_follows) { + * case TRUE: + * nfs_fh3 handle; + * case FALSE: + * void; + * }; + */ +static int decode_post_op_fh3(struct xdr_stream *xdr, struct nfs_fh *fh) +{ + __be32 *p = xdr_inline_decode(xdr, 4); + if (unlikely(p == NULL)) + goto out_overflow; + if (*p != xdr_zero) + return decode_nfs_fh3(xdr, fh); + zero_nfs_fh3(fh); + return 0; +out_overflow: + print_overflow_msg(__func__, xdr); + return -EIO; +} + /* * diropargs3 * @@ -1108,78 +1453,6 @@ nfs3_xdr_readdirres(struct rpc_rqst *req, __be32 *p, struct nfs3_readdirres *res return pglen; } -__be32 * -nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, struct nfs_server *server, int plus) -{ - __be32 *p; - struct nfs_entry old = *entry; - - p = xdr_inline_decode(xdr, 4); - if (unlikely(!p)) - goto out_overflow; - if (!ntohl(*p++)) { - p = xdr_inline_decode(xdr, 4); - if (unlikely(!p)) - goto out_overflow; - if (!ntohl(*p++)) - return ERR_PTR(-EAGAIN); - entry->eof = 1; - return ERR_PTR(-EBADCOOKIE); - } - - p = xdr_inline_decode(xdr, 12); - if (unlikely(!p)) - goto out_overflow; - p = xdr_decode_hyper(p, &entry->ino); - entry->len = ntohl(*p++); - - p = xdr_inline_decode(xdr, entry->len + 8); - if (unlikely(!p)) - goto out_overflow; - entry->name = (const char *) p; - p += XDR_QUADLEN(entry->len); - entry->prev_cookie = entry->cookie; - p = xdr_decode_hyper(p, &entry->cookie); - - entry->d_type = DT_UNKNOWN; - if (plus) { - entry->fattr->valid = 0; - p = xdr_decode_post_op_attr_stream(xdr, entry->fattr); - if (IS_ERR(p)) - goto out_overflow_exit; - entry->d_type = nfs_umode_to_dtype(entry->fattr->mode); - /* In fact, a post_op_fh3: */ - p = xdr_inline_decode(xdr, 4); - if (unlikely(!p)) - goto out_overflow; - if (*p++) { - p = xdr_decode_fhandle_stream(xdr, entry->fh); - if (IS_ERR(p)) - goto out_overflow_exit; - /* Ugh -- server reply was truncated */ - if (p == NULL) { - dprintk("NFS: FH truncated\n"); - *entry = old; - return ERR_PTR(-EAGAIN); - } - } else - memset((u8*)(entry->fh), 0, sizeof(*entry->fh)); - } - - p = xdr_inline_peek(xdr, 8); - if (p != NULL) - entry->eof = !p[0] && p[1]; - else - entry->eof = 0; - - return p; - -out_overflow: - print_overflow_msg(__func__, xdr); -out_overflow_exit: - return ERR_PTR(-EAGAIN); -} - /* * 3.3.21 COMMIT3args * @@ -1275,13 +1548,47 @@ nfs3_xdr_attrstat(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr) } /* - * Decode status+wcc_data reply - * SATTR, REMOVE, RMDIR + * 3.3.1 GETATTR3res + * + * struct GETATTR3resok { + * fattr3 obj_attributes; + * }; + * + * union GETATTR3res switch (nfsstat3 status) { + * case NFS3_OK: + * GETATTR3resok resok; + * default: + * void; + * }; */ -static int -nfs3_xdr_wccstat(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr) +static int nfs3_xdr_dec_getattr3res(struct rpc_rqst *req, __be32 *p, + struct nfs_fattr *result) { - int status; + struct xdr_stream xdr; + enum nfs_stat status; + int error; + + xdr_init_decode(&xdr, &req->rq_rcv_buf, p); + error = decode_nfsstat3(&xdr, &status); + if (unlikely(error)) + goto out; + if (status != NFS3_OK) + goto out_default; + error = decode_fattr3(&xdr, result); +out: + return error; +out_default: + return nfs_stat_to_errno(status); +} + +/* + * Decode status+wcc_data reply + * SATTR, REMOVE, RMDIR + */ +static int +nfs3_xdr_wccstat(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr) +{ + int status; if ((status = ntohl(*p++))) status = nfs_stat_to_errno(status); @@ -1289,6 +1596,46 @@ nfs3_xdr_wccstat(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr) return status; } +/* + * 3.3.2 SETATTR3res + * + * struct SETATTR3resok { + * wcc_data obj_wcc; + * }; + * + * struct SETATTR3resfail { + * wcc_data obj_wcc; + * }; + * + * union SETATTR3res switch (nfsstat3 status) { + * case NFS3_OK: + * SETATTR3resok resok; + * default: + * SETATTR3resfail resfail; + * }; + */ +static int nfs3_xdr_dec_setattr3res(struct rpc_rqst *req, __be32 *p, + struct nfs_fattr *result) +{ + struct xdr_stream xdr; + enum nfs_stat status; + int error; + + xdr_init_decode(&xdr, &req->rq_rcv_buf, p); + error = decode_nfsstat3(&xdr, &status); + if (unlikely(error)) + goto out; + error = decode_wcc_data(&xdr, result); + if (unlikely(error)) + goto out; + if (status != NFS3_OK) + goto out_status; +out: + return error; +out_status: + return nfs_stat_to_errno(status); +} + static int nfs3_xdr_removeres(struct rpc_rqst *req, __be32 *p, struct nfs_removeres *res) { @@ -1314,6 +1661,55 @@ nfs3_xdr_lookupres(struct rpc_rqst *req, __be32 *p, struct nfs3_diropres *res) return status; } +/* + * 3.3.3 LOOKUP3res + * + * struct LOOKUP3resok { + * nfs_fh3 object; + * post_op_attr obj_attributes; + * post_op_attr dir_attributes; + * }; + * + * struct LOOKUP3resfail { + * post_op_attr dir_attributes; + * }; + * + * union LOOKUP3res switch (nfsstat3 status) { + * case NFS3_OK: + * LOOKUP3resok resok; + * default: + * LOOKUP3resfail resfail; + * }; + */ +static int nfs3_xdr_dec_lookup3res(struct rpc_rqst *req, __be32 *p, + struct nfs3_diropres *result) +{ + struct xdr_stream xdr; + enum nfs_stat status; + int error; + + xdr_init_decode(&xdr, &req->rq_rcv_buf, p); + error = decode_nfsstat3(&xdr, &status); + if (unlikely(error)) + goto out; + if (status != NFS3_OK) + goto out_default; + error = decode_nfs_fh3(&xdr, result->fh); + if (unlikely(error)) + goto out; + error = decode_post_op_attr(&xdr, result->fattr); + if (unlikely(error)) + goto out; + error = decode_post_op_attr(&xdr, result->dir_attr); +out: + return error; +out_default: + error = decode_post_op_attr(&xdr, result->dir_attr); + if (unlikely(error)) + goto out; + return nfs_stat_to_errno(status); +} + /* * Decode ACCESS reply */ @@ -1329,6 +1725,48 @@ nfs3_xdr_accessres(struct rpc_rqst *req, __be32 *p, struct nfs3_accessres *res) return 0; } +/* + * 3.3.4 ACCESS3res + * + * struct ACCESS3resok { + * post_op_attr obj_attributes; + * uint32 access; + * }; + * + * struct ACCESS3resfail { + * post_op_attr obj_attributes; + * }; + * + * union ACCESS3res switch (nfsstat3 status) { + * case NFS3_OK: + * ACCESS3resok resok; + * default: + * ACCESS3resfail resfail; + * }; + */ +static int nfs3_xdr_dec_access3res(struct rpc_rqst *req, __be32 *p, + struct nfs3_accessres *result) +{ + struct xdr_stream xdr; + enum nfs_stat status; + int error; + + xdr_init_decode(&xdr, &req->rq_rcv_buf, p); + error = decode_nfsstat3(&xdr, &status); + if (unlikely(error)) + goto out; + error = decode_post_op_attr(&xdr, result->fattr); + if (unlikely(error)) + goto out; + if (status != NFS3_OK) + goto out_default; + error = decode_uint32(&xdr, &result->access); +out: + return error; +out_default: + return nfs_stat_to_errno(status); +} + /* * Decode READLINK reply */ @@ -1375,6 +1813,48 @@ nfs3_xdr_readlinkres(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr) return 0; } +/* + * 3.3.5 READLINK3res + * + * struct READLINK3resok { + * post_op_attr symlink_attributes; + * nfspath3 data; + * }; + * + * struct READLINK3resfail { + * post_op_attr symlink_attributes; + * }; + * + * union READLINK3res switch (nfsstat3 status) { + * case NFS3_OK: + * READLINK3resok resok; + * default: + * READLINK3resfail resfail; + * }; + */ +static int nfs3_xdr_dec_readlink3res(struct rpc_rqst *req, __be32 *p, + struct nfs_fattr *result) +{ + struct xdr_stream xdr; + enum nfs_stat status; + int error; + + xdr_init_decode(&xdr, &req->rq_rcv_buf, p); + error = decode_nfsstat3(&xdr, &status); + if (unlikely(error)) + goto out; + error = decode_post_op_attr(&xdr, result); + if (unlikely(error)) + goto out; + if (status != NFS3_OK) + goto out_default; + error = decode_nfspath3(&xdr); +out: + return error; +out_default: + return nfs_stat_to_errno(status); +} + /* * Decode READ reply */ @@ -1428,6 +1908,90 @@ nfs3_xdr_readres(struct rpc_rqst *req, __be32 *p, struct nfs_readres *res) return count; } +/* + * 3.3.6 READ3res + * + * struct READ3resok { + * post_op_attr file_attributes; + * count3 count; + * bool eof; + * opaque data<>; + * }; + * + * struct READ3resfail { + * post_op_attr file_attributes; + * }; + * + * union READ3res switch (nfsstat3 status) { + * case NFS3_OK: + * READ3resok resok; + * default: + * READ3resfail resfail; + * }; + */ +static int decode_read3resok(struct xdr_stream *xdr, + struct nfs_readres *result) +{ + u32 eof, count, ocount, recvd; + size_t hdrlen; + __be32 *p; + + p = xdr_inline_decode(xdr, 4 + 4 + 4); + if (unlikely(p == NULL)) + goto out_overflow; + count = be32_to_cpup(p++); + eof = be32_to_cpup(p++); + ocount = be32_to_cpup(p++); + if (unlikely(ocount != count)) + goto out_mismatch; + hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base; + recvd = xdr->buf->len - hdrlen; + if (unlikely(count > recvd)) + goto out_cheating; + +out: + xdr_read_pages(xdr, count); + result->eof = eof; + result->count = count; + return count; +out_mismatch: + dprintk("NFS: READ count doesn't match length of opaque: " + "count %u != ocount %u\n", count, ocount); + return -EIO; +out_cheating: + dprintk("NFS: server cheating in read result: " + "count %u > recvd %u\n", count, recvd); + count = recvd; + eof = 0; + goto out; +out_overflow: + print_overflow_msg(__func__, xdr); + return -EIO; +} + +static int nfs3_xdr_dec_read3res(struct rpc_rqst *req, __be32 *p, + struct nfs_readres *result) +{ + struct xdr_stream xdr; + enum nfs_stat status; + int error; + + xdr_init_decode(&xdr, &req->rq_rcv_buf, p); + error = decode_nfsstat3(&xdr, &status); + if (unlikely(error)) + goto out; + error = decode_post_op_attr(&xdr, result->fattr); + if (unlikely(error)) + goto out; + if (status != NFS3_OK) + goto out_status; + error = decode_read3resok(&xdr, result); +out: + return error; +out_status: + return nfs_stat_to_errno(status); +} + /* * Decode WRITE response */ @@ -1450,6 +2014,78 @@ nfs3_xdr_writeres(struct rpc_rqst *req, __be32 *p, struct nfs_writeres *res) return res->count; } +/* + * 3.3.7 WRITE3res + * + * enum stable_how { + * UNSTABLE = 0, + * DATA_SYNC = 1, + * FILE_SYNC = 2 + * }; + * + * struct WRITE3resok { + * wcc_data file_wcc; + * count3 count; + * stable_how committed; + * writeverf3 verf; + * }; + * + * struct WRITE3resfail { + * wcc_data file_wcc; + * }; + * + * union WRITE3res switch (nfsstat3 status) { + * case NFS3_OK: + * WRITE3resok resok; + * default: + * WRITE3resfail resfail; + * }; + */ +static int decode_write3resok(struct xdr_stream *xdr, + struct nfs_writeres *result) +{ + __be32 *p; + + p = xdr_inline_decode(xdr, 4 + 4 + NFS3_WRITEVERFSIZE); + if (unlikely(p == NULL)) + goto out_overflow; + result->count = be32_to_cpup(p++); + result->verf->committed = be32_to_cpup(p++); + if (unlikely(result->verf->committed > NFS_FILE_SYNC)) + goto out_badvalue; + memcpy(result->verf->verifier, p, NFS3_WRITEVERFSIZE); + return result->count; +out_badvalue: + dprintk("NFS: bad stable_how value: %u\n", result->verf->committed); + return -EIO; +out_overflow: + print_overflow_msg(__func__, xdr); + return -EIO; +} + +static int nfs3_xdr_dec_write3res(struct rpc_rqst *req, __be32 *p, + struct nfs_writeres *result) +{ + struct xdr_stream xdr; + enum nfs_stat status; + int error; + + xdr_init_decode(&xdr, &req->rq_rcv_buf, p); + error = decode_nfsstat3(&xdr, &status); + if (unlikely(error)) + goto out; + error = decode_wcc_data(&xdr, result->fattr); + if (unlikely(error)) + goto out; + if (status != NFS3_OK) + goto out_status; + error = decode_write3resok(&xdr, result); +out: + return error; +out_status: + return nfs_stat_to_errno(status); +} + /* * Decode a CREATE response */ @@ -1477,6 +2113,111 @@ nfs3_xdr_createres(struct rpc_rqst *req, __be32 *p, struct nfs3_diropres *res) return status; } +/* + * 3.3.8 CREATE3res + * + * struct CREATE3resok { + * post_op_fh3 obj; + * post_op_attr obj_attributes; + * wcc_data dir_wcc; + * }; + * + * struct CREATE3resfail { + * wcc_data dir_wcc; + * }; + * + * union CREATE3res switch (nfsstat3 status) { + * case NFS3_OK: + * CREATE3resok resok; + * default: + * CREATE3resfail resfail; + * }; + */ +static int decode_create3resok(struct xdr_stream *xdr, + struct nfs3_diropres *result) +{ + int error; + + error = decode_post_op_fh3(xdr, result->fh); + if (unlikely(error)) + goto out; + error = decode_post_op_attr(xdr, result->fattr); + if (unlikely(error)) + goto out; + /* The server isn't required to return a file handle. + * If it didn't, force the client to perform a LOOKUP + * to determine the correct file handle and attribute + * values for the new object. */ + if (result->fh->size == 0) + result->fattr->valid = 0; + error = decode_wcc_data(xdr, result->dir_attr); +out: + return error; +} + +static int nfs3_xdr_dec_create3res(struct rpc_rqst *req, __be32 *p, + struct nfs3_diropres *result) +{ + struct xdr_stream xdr; + enum nfs_stat status; + int error; + + xdr_init_decode(&xdr, &req->rq_rcv_buf, p); + error = decode_nfsstat3(&xdr, &status); + if (unlikely(error)) + goto out; + if (status != NFS3_OK) + goto out_default; + error = decode_create3resok(&xdr, result); +out: + return error; +out_default: + error = decode_wcc_data(&xdr, result->dir_attr); + if (unlikely(error)) + goto out; + return nfs_stat_to_errno(status); +} + +/* + * 3.3.12 REMOVE3res + * + * struct REMOVE3resok { + * wcc_data dir_wcc; + * }; + * + * struct REMOVE3resfail { + * wcc_data dir_wcc; + * }; + * + * union REMOVE3res switch (nfsstat3 status) { + * case NFS3_OK: + * REMOVE3resok resok; + * default: + * REMOVE3resfail resfail; + * }; + */ +static int nfs3_xdr_dec_remove3res(struct rpc_rqst *req, __be32 *p, + struct nfs_removeres *result) +{ + struct xdr_stream xdr; + enum nfs_stat status; + int error; + + xdr_init_decode(&xdr, &req->rq_rcv_buf, p); + error = decode_nfsstat3(&xdr, &status); + if (unlikely(error)) + goto out; + error = decode_wcc_data(&xdr, result->dir_attr); + if (unlikely(error)) + goto out; + if (status != NFS3_OK) + goto out_status; +out: + return error; +out_status: + return nfs_stat_to_errno(status); +} + /* * Decode RENAME reply */ @@ -1492,6 +2233,51 @@ nfs3_xdr_renameres(struct rpc_rqst *req, __be32 *p, struct nfs_renameres *res) return status; } +/* + * 3.3.14 RENAME3res + * + * struct RENAME3resok { + * wcc_data fromdir_wcc; + * wcc_data todir_wcc; + * }; + * + * struct RENAME3resfail { + * wcc_data fromdir_wcc; + * wcc_data todir_wcc; + * }; + * + * union RENAME3res switch (nfsstat3 status) { + * case NFS3_OK: + * RENAME3resok resok; + * default: + * RENAME3resfail resfail; + * }; + */ +static int nfs3_xdr_dec_rename3res(struct rpc_rqst *req, __be32 *p, + struct nfs_renameres *result) +{ + struct xdr_stream xdr; + enum nfs_stat status; + int error; + + xdr_init_decode(&xdr, &req->rq_rcv_buf, p); + error = decode_nfsstat3(&xdr, &status); + if (unlikely(error)) + goto out; + error = decode_wcc_data(&xdr, result->old_fattr); + if (unlikely(error)) + goto out; + error = decode_wcc_data(&xdr, result->new_fattr); + if (unlikely(error)) + goto out; + if (status != NFS3_OK) + goto out_status; +out: + return error; +out_status: + return nfs_stat_to_errno(status); +} + /* * Decode LINK reply */ @@ -1507,6 +2293,249 @@ nfs3_xdr_linkres(struct rpc_rqst *req, __be32 *p, struct nfs3_linkres *res) return status; } +/* + * 3.3.15 LINK3res + * + * struct LINK3resok { + * post_op_attr file_attributes; + * wcc_data linkdir_wcc; + * }; + * + * struct LINK3resfail { + * post_op_attr file_attributes; + * wcc_data linkdir_wcc; + * }; + * + * union LINK3res switch (nfsstat3 status) { + * case NFS3_OK: + * LINK3resok resok; + * default: + * LINK3resfail resfail; + * }; + */ +static int nfs3_xdr_dec_link3res(struct rpc_rqst *req, __be32 *p, + struct nfs3_linkres *result) +{ + struct xdr_stream xdr; + enum nfs_stat status; + int error; + + xdr_init_decode(&xdr, &req->rq_rcv_buf, p); + error = decode_nfsstat3(&xdr, &status); + if (unlikely(error)) + goto out; + error = decode_post_op_attr(&xdr, result->fattr); + if (unlikely(error)) + goto out; + error = decode_wcc_data(&xdr, result->dir_attr); + if (unlikely(error)) + goto out; + if (status != NFS3_OK) + goto out_status; +out: + return error; +out_status: + return nfs_stat_to_errno(status); +} + +/** + * nfs3_decode_dirent - Decode a single NFSv3 directory entry stored in + * the local page cache + * @xdr: XDR stream where entry resides + * @entry: buffer to fill in with entry data + * @server: nfs_server data for this directory + * @plus: boolean indicating whether this should be a readdirplus entry + * + * Returns the position of the next item in the buffer, or an ERR_PTR. + * + * This function is not invoked during READDIR reply decoding, but + * rather whenever an application invokes the getdents(2) system call + * on a directory already in our cache. + * + * 3.3.16 entry3 + * + * struct entry3 { + * fileid3 fileid; + * filename3 name; + * cookie3 cookie; + * fhandle3 filehandle; + * post_op_attr3 attributes; + * entry3 *nextentry; + * }; + * + * 3.3.17 entryplus3 + * struct entryplus3 { + * fileid3 fileid; + * filename3 name; + * cookie3 cookie; + * post_op_attr name_attributes; + * post_op_fh3 name_handle; + * entryplus3 *nextentry; + * }; + */ +__be32 *nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, + struct nfs_server *server, int plus) +{ + struct nfs_entry old = *entry; + __be32 *p; + int error; + + p = xdr_inline_decode(xdr, 4); + if (unlikely(p == NULL)) + goto out_overflow; + if (*p == xdr_zero) { + p = xdr_inline_decode(xdr, 4); + if (unlikely(p == NULL)) + goto out_overflow; + if (*p == xdr_zero) + return ERR_PTR(-EAGAIN); + entry->eof = 1; + return ERR_PTR(-EBADCOOKIE); + } + + error = decode_fileid3(xdr, &entry->ino); + if (unlikely(error)) + return ERR_PTR(error); + + error = decode_inline_filename3(xdr, &entry->name, &entry->len); + if (unlikely(error)) + return ERR_PTR(error); + + entry->prev_cookie = entry->cookie; + error = decode_cookie3(xdr, &entry->cookie); + if (unlikely(error)) + return ERR_PTR(error); + + entry->d_type = DT_UNKNOWN; + + if (plus) { + entry->fattr->valid = 0; + error = decode_post_op_attr(xdr, entry->fattr); + if (unlikely(error)) + return ERR_PTR(error); + if (entry->fattr->valid & NFS_ATTR_FATTR_V3) + entry->d_type = nfs_umode_to_dtype(entry->fattr->mode); + + /* In fact, a post_op_fh3: */ + p = xdr_inline_decode(xdr, 4); + if (unlikely(p == NULL)) + goto out_overflow; + if (*p != xdr_zero) { + error = decode_nfs_fh3(xdr, entry->fh); + if (unlikely(error)) { + if (error == -E2BIG) + goto out_truncated; + return ERR_PTR(error); + } + } else + zero_nfs_fh3(entry->fh); + } + + /* Peek at the next entry to see if we're at EOD */ + p = xdr_inline_peek(xdr, 4 + 4); + entry->eof = 0; + if (p != NULL) + entry->eof = (p[0] == xdr_zero) && (p[1] != xdr_zero); + return p; + +out_overflow: + print_overflow_msg(__func__, xdr); + return ERR_PTR(-EAGAIN); +out_truncated: + dprintk("NFS: directory entry contains invalid file handle\n"); + *entry = old; + return ERR_PTR(-EAGAIN); +} + +/* + * 3.3.16 READDIR3res + * + * struct dirlist3 { + * entry3 *entries; + * bool eof; + * }; + * + * struct READDIR3resok { + * post_op_attr dir_attributes; + * cookieverf3 cookieverf; + * dirlist3 reply; + * }; + * + * struct READDIR3resfail { + * post_op_attr dir_attributes; + * }; + * + * union READDIR3res switch (nfsstat3 status) { + * case NFS3_OK: + * READDIR3resok resok; + * default: + * READDIR3resfail resfail; + * }; + * + * Read the directory contents into the page cache, but otherwise + * don't touch them. The actual decoding is done by nfs3_decode_entry() + * during subsequent nfs_readdir() calls. + */ +static int decode_dirlist3(struct xdr_stream *xdr) +{ + u32 recvd, pglen; + size_t hdrlen; + + pglen = xdr->buf->page_len; + hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base; + recvd = xdr->buf->len - hdrlen; + if (unlikely(pglen > recvd)) + goto out_cheating; +out: + xdr_read_pages(xdr, pglen); + return pglen; +out_cheating: + dprintk("NFS: server cheating in readdir result: " + "pglen %u > recvd %u\n", pglen, recvd); + pglen = recvd; + goto out; +} + +static int decode_readdir3resok(struct xdr_stream *xdr, + struct nfs3_readdirres *result) +{ + int error; + + error = decode_post_op_attr(xdr, result->dir_attr); + if (unlikely(error)) + goto out; + /* XXX: do we need to check if result->verf != NULL ? */ + error = decode_cookieverf3(xdr, result->verf); + if (unlikely(error)) + goto out; + error = decode_dirlist3(xdr); +out: + return error; +} + +static int nfs3_xdr_dec_readdir3res(struct rpc_rqst *req, __be32 *p, + struct nfs3_readdirres *result) +{ + struct xdr_stream xdr; + enum nfs_stat status; + int error; + + xdr_init_decode(&xdr, &req->rq_rcv_buf, p); + error = decode_nfsstat3(&xdr, &status); + if (unlikely(error)) + goto out; + if (status != NFS3_OK) + goto out_default; + error = decode_readdir3resok(&xdr, result); +out: + return error; +out_default: + error = decode_post_op_attr(&xdr, result->dir_attr); + if (unlikely(error)) + goto out; + return nfs_stat_to_errno(status); +} + /* * Decode FSSTAT reply */ @@ -1532,6 +2561,75 @@ nfs3_xdr_fsstatres(struct rpc_rqst *req, __be32 *p, struct nfs_fsstat *res) return 0; } +/* + * 3.3.18 FSSTAT3res + * + * struct FSSTAT3resok { + * post_op_attr obj_attributes; + * size3 tbytes; + * size3 fbytes; + * size3 abytes; + * size3 tfiles; + * size3 ffiles; + * size3 afiles; + * uint32 invarsec; + * }; + * + * struct FSSTAT3resfail { + * post_op_attr obj_attributes; + * }; + * + * union FSSTAT3res switch (nfsstat3 status) { + * case NFS3_OK: + * FSSTAT3resok resok; + * default: + * FSSTAT3resfail resfail; + * }; + */ +static int decode_fsstat3resok(struct xdr_stream *xdr, + struct nfs_fsstat *result) +{ + __be32 *p; + + p = xdr_inline_decode(xdr, 8 * 6 + 4); + if (unlikely(p == NULL)) + goto out_overflow; + p = xdr_decode_size3(p, &result->tbytes); + p = xdr_decode_size3(p, &result->fbytes); + p = xdr_decode_size3(p, &result->abytes); + p = xdr_decode_size3(p, &result->tfiles); + p = xdr_decode_size3(p, &result->ffiles); + xdr_decode_size3(p, &result->afiles); + /* ignore invarsec */ + return 0; +out_overflow: + print_overflow_msg(__func__, xdr); + return -EIO; +} + +static int nfs3_xdr_dec_fsstat3res(struct rpc_rqst *req, __be32 *p, + struct nfs_fsstat *result) +{ + struct xdr_stream xdr; + enum nfs_stat status; + int error; + + xdr_init_decode(&xdr, &req->rq_rcv_buf, p); + error = decode_nfsstat3(&xdr, &status); + if (unlikely(error)) + goto out; + error = decode_post_op_attr(&xdr, result->fattr); + if (unlikely(error)) + goto out; + if (status != NFS3_OK) + goto out_status; + error = decode_fsstat3resok(&xdr, result); +out: + return error; +out_status: + return nfs_stat_to_errno(status); +} + /* * Decode FSINFO reply */ @@ -1561,6 +2659,83 @@ nfs3_xdr_fsinfores(struct rpc_rqst *req, __be32 *p, struct nfs_fsinfo *res) return 0; } +/* + * 3.3.19 FSINFO3res + * + * struct FSINFO3resok { + * post_op_attr obj_attributes; + * uint32 rtmax; + * uint32 rtpref; + * uint32 rtmult; + * uint32 wtmax; + * uint32 wtpref; + * uint32 wtmult; + * uint32 dtpref; + * size3 maxfilesize; + * nfstime3 time_delta; + * uint32 properties; + * }; + * + * struct FSINFO3resfail { + * post_op_attr obj_attributes; + * }; + * + * union FSINFO3res switch (nfsstat3 status) { + * case NFS3_OK: + * FSINFO3resok resok; + * default: + * FSINFO3resfail resfail; + * }; + */ +static int decode_fsinfo3resok(struct xdr_stream *xdr, + struct nfs_fsinfo *result) +{ + __be32 *p; + + p = xdr_inline_decode(xdr, 4 * 7 + 8 + 8 + 4); + if (unlikely(p == NULL)) + goto out_overflow; + result->rtmax = be32_to_cpup(p++); + result->rtpref = be32_to_cpup(p++); + result->rtmult = be32_to_cpup(p++); + result->wtmax = be32_to_cpup(p++); + result->wtpref = be32_to_cpup(p++); + result->wtmult = be32_to_cpup(p++); + result->dtpref = be32_to_cpup(p++); + p = xdr_decode_size3(p, &result->maxfilesize); + xdr_decode_time3(p, &result->time_delta); + + /* ignore properties */ + result->lease_time = 0; + return 0; +out_overflow: + print_overflow_msg(__func__, xdr); + return -EIO; +} + +static int nfs3_xdr_dec_fsinfo3res(struct rpc_rqst *req, __be32 *p, + struct nfs_fsinfo *result) +{ + struct xdr_stream xdr; + enum nfs_stat status; + int error; + + xdr_init_decode(&xdr, &req->rq_rcv_buf, p); + error = decode_nfsstat3(&xdr, &status); + if (unlikely(error)) + goto out; + error = decode_post_op_attr(&xdr, result->fattr); + if (unlikely(error)) + goto out; + if (status != NFS3_OK) + goto out_status; + error = decode_fsinfo3resok(&xdr, result); +out: + return error; +out_status: + return nfs_stat_to_errno(status); +} + /* * Decode PATHCONF reply */ @@ -1581,6 +2756,70 @@ nfs3_xdr_pathconfres(struct rpc_rqst *req, __be32 *p, struct nfs_pathconf *res) return 0; } +/* + * 3.3.20 PATHCONF3res + * + * struct PATHCONF3resok { + * post_op_attr obj_attributes; + * uint32 linkmax; + * uint32 name_max; + * bool no_trunc; + * bool chown_restricted; + * bool case_insensitive; + * bool case_preserving; + * }; + * + * struct PATHCONF3resfail { + * post_op_attr obj_attributes; + * }; + * + * union PATHCONF3res switch (nfsstat3 status) { + * case NFS3_OK: + * PATHCONF3resok resok; + * default: + * PATHCONF3resfail resfail; + * }; + */ +static int decode_pathconf3resok(struct xdr_stream *xdr, + struct nfs_pathconf *result) +{ + __be32 *p; + + p = xdr_inline_decode(xdr, 4 * 6); + if (unlikely(p == NULL)) + goto out_overflow; + result->max_link = be32_to_cpup(p++); + result->max_namelen = be32_to_cpup(p); + /* ignore remaining fields */ + return 0; +out_overflow: + print_overflow_msg(__func__, xdr); + return -EIO; +} + +static int nfs3_xdr_dec_pathconf3res(struct rpc_rqst *req, __be32 *p, + struct nfs_pathconf *result) +{ + struct xdr_stream xdr; + enum nfs_stat status; + int error; + + xdr_init_decode(&xdr, &req->rq_rcv_buf, p); + error = decode_nfsstat3(&xdr, &status); + if (unlikely(error)) + goto out; + error = decode_post_op_attr(&xdr, result->fattr); + if (unlikely(error)) + goto out; + if (status != NFS3_OK) + goto out_status; + error = decode_pathconf3resok(&xdr, result); +out: + return error; +out_status: + return nfs_stat_to_errno(status); +} + /* * Decode COMMIT reply */ @@ -1599,6 +2838,48 @@ nfs3_xdr_commitres(struct rpc_rqst *req, __be32 *p, struct nfs_writeres *res) return 0; } +/* + * 3.3.21 COMMIT3res + * + * struct COMMIT3resok { + * wcc_data file_wcc; + * writeverf3 verf; + * }; + * + * struct COMMIT3resfail { + * wcc_data file_wcc; + * }; + * + * union COMMIT3res switch (nfsstat3 status) { + * case NFS3_OK: + * COMMIT3resok resok; + * default: + * COMMIT3resfail resfail; + * }; + */ +static int nfs3_xdr_dec_commit3res(struct rpc_rqst *req, __be32 *p, + struct nfs_writeres *result) +{ + struct xdr_stream xdr; + enum nfs_stat status; + int error; + + xdr_init_decode(&xdr, &req->rq_rcv_buf, p); + error = decode_nfsstat3(&xdr, &status); + if (unlikely(error)) + goto out; + error = decode_wcc_data(&xdr, result->fattr); + if (unlikely(error)) + goto out; + if (status != NFS3_OK) + goto out_status; + error = decode_writeverf3(&xdr, result->verf->verifier); +out: + return error; +out_status: + return nfs_stat_to_errno(status); +} + #ifdef CONFIG_NFS_V3_ACL /* * Decode GETACL reply @@ -1632,6 +2913,70 @@ nfs3_xdr_getaclres(struct rpc_rqst *req, __be32 *p, return (err > 0) ? 0 : err; } +static inline int decode_getacl3resok(struct xdr_stream *xdr, + struct nfs3_getaclres *result) +{ + struct posix_acl **acl; + unsigned int *aclcnt; + size_t hdrlen; + int error; + + error = decode_post_op_attr(xdr, result->fattr); + if (unlikely(error)) + goto out; + error = decode_uint32(xdr, &result->mask); + if (unlikely(error)) + goto out; + error = -EINVAL; + if (result->mask & ~(NFS_ACL|NFS_ACLCNT|NFS_DFACL|NFS_DFACLCNT)) + goto out; + + hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base; + + acl = NULL; + if (result->mask & NFS_ACL) + acl = &result->acl_access; + aclcnt = NULL; + if (result->mask & NFS_ACLCNT) + aclcnt = &result->acl_access_count; + error = nfsacl_decode(xdr->buf, hdrlen, aclcnt, acl); + if (unlikely(error <= 0)) + goto out; + + acl = NULL; + if (result->mask & NFS_DFACL) + acl = &result->acl_default; + aclcnt = NULL; + if (result->mask & NFS_DFACLCNT) + aclcnt = &result->acl_default_count; + error = nfsacl_decode(xdr->buf, hdrlen + error, aclcnt, acl); + if (unlikely(error <= 0)) + return error; + error = 0; +out: + return error; +} + +static int nfs3_xdr_dec_getacl3res(struct rpc_rqst *req, __be32 *p, + struct nfs3_getaclres *result) +{ + struct xdr_stream xdr; + enum nfs_stat status; + int error; + + xdr_init_decode(&xdr, &req->rq_rcv_buf, p); + error = decode_nfsstat3(&xdr, &status); + if (unlikely(error)) + goto out; + if (status != NFS3_OK) + goto out_default; + error = decode_getacl3resok(&xdr, result); +out: + return error; +out_default: + return nfs_stat_to_errno(status); +} + /* * Decode setacl reply. */ @@ -1645,6 +2990,27 @@ nfs3_xdr_setaclres(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr) xdr_decode_post_op_attr(p, fattr); return 0; } + +static int nfs3_xdr_dec_setacl3res(struct rpc_rqst *req, __be32 *p, + struct nfs_fattr *result) +{ + struct xdr_stream xdr; + enum nfs_stat status; + int error; + + xdr_init_decode(&xdr, &req->rq_rcv_buf, p); + error = decode_nfsstat3(&xdr, &status); + if (unlikely(error)) + goto out; + if (status != NFS3_OK) + goto out_default; + error = decode_post_op_attr(&xdr, result); +out: + return error; +out_default: + return nfs_stat_to_errno(status); +} + #endif /* CONFIG_NFS_V3_ACL */ #define PROC(proc, argtype, restype, timer) \ -- cgit v1.2.2 From f5fc3c50c99a7df2bf908dfe66f112d35178ee07 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 14 Dec 2010 14:56:42 +0000 Subject: NFS: Switch in new NFSv3 decoder functions The naming scheme of the new decoder functions, which follows the NFSv4 XDR decoder functions, is slightly different than the scheme used for the old functions. Rename the functions as a separate step to keep the patches clean. Signed-off-by: Chuck Lever Tested-by: J. Bruce Fields Signed-off-by: Trond Myklebust --- fs/nfs/nfs3xdr.c | 64 ++++++++++++++++++++++++++------------------------------ 1 file changed, 30 insertions(+), 34 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c index 0f07c6d55131..19c791101928 100644 --- a/fs/nfs/nfs3xdr.c +++ b/fs/nfs/nfs3xdr.c @@ -38,14 +38,10 @@ #define NFS3_path_sz (1+(NFS3_MAXPATHLEN>>2)) #define NFS3_fattr_sz (21) #define NFS3_cookieverf_sz (NFS3_COOKIEVERFSIZE>>2) -#define NFS3_wcc_attr_sz (6) +#define NFS3_wcc_attr_sz (6) #define NFS3_pre_op_attr_sz (1+NFS3_wcc_attr_sz) #define NFS3_post_op_attr_sz (1+NFS3_fattr_sz) -#define NFS3_wcc_data_sz (NFS3_pre_op_attr_sz+NFS3_post_op_attr_sz) -#define NFS3_fsstat_sz -#define NFS3_fsinfo_sz -#define NFS3_pathconf_sz -#define NFS3_entry_sz (NFS3_filename_sz+3) +#define NFS3_wcc_data_sz (NFS3_pre_op_attr_sz+NFS3_post_op_attr_sz) #define NFS3_diropargs_sz (NFS3_fh_sz+NFS3_filename_sz) #define NFS3_getattrargs_sz (NFS3_fh_sz) @@ -66,9 +62,9 @@ #define NFS3_readdirplusargs_sz (NFS3_fh_sz+NFS3_cookieverf_sz+4) #define NFS3_commitargs_sz (NFS3_fh_sz+3) -#define NFS3_attrstat_sz (1+NFS3_fattr_sz) -#define NFS3_wccstat_sz (1+NFS3_wcc_data_sz) -#define NFS3_removeres_sz (NFS3_wccstat_sz) +#define NFS3_getattrres_sz (1+NFS3_fattr_sz) +#define NFS3_setattrres_sz (1+NFS3_wcc_data_sz) +#define NFS3_removeres_sz (NFS3_setattrres_sz) #define NFS3_lookupres_sz (1+NFS3_fh_sz+(2 * NFS3_post_op_attr_sz)) #define NFS3_accessres_sz (1+NFS3_post_op_attr_sz+1) #define NFS3_readlinkres_sz (1+NFS3_post_op_attr_sz+1) @@ -3017,36 +3013,36 @@ out_default: [NFS3PROC_##proc] = { \ .p_proc = NFS3PROC_##proc, \ .p_encode = (kxdrproc_t)nfs3_xdr_enc_##argtype##3args, \ - .p_decode = (kxdrproc_t) nfs3_xdr_##restype, \ + .p_decode = (kxdrproc_t)nfs3_xdr_dec_##restype##3res, \ .p_arglen = NFS3_##argtype##args_sz, \ - .p_replen = NFS3_##restype##_sz, \ + .p_replen = NFS3_##restype##res_sz, \ .p_timer = timer, \ .p_statidx = NFS3PROC_##proc, \ .p_name = #proc, \ } struct rpc_procinfo nfs3_procedures[] = { - PROC(GETATTR, getattr, attrstat, 1), - PROC(SETATTR, setattr, wccstat, 0), - PROC(LOOKUP, lookup, lookupres, 2), - PROC(ACCESS, access, accessres, 1), - PROC(READLINK, readlink, readlinkres, 3), - PROC(READ, read, readres, 3), - PROC(WRITE, write, writeres, 4), - PROC(CREATE, create, createres, 0), - PROC(MKDIR, mkdir, createres, 0), - PROC(SYMLINK, symlink, createres, 0), - PROC(MKNOD, mknod, createres, 0), - PROC(REMOVE, remove, removeres, 0), - PROC(RMDIR, lookup, wccstat, 0), - PROC(RENAME, rename, renameres, 0), - PROC(LINK, link, linkres, 0), - PROC(READDIR, readdir, readdirres, 3), - PROC(READDIRPLUS, readdirplus, readdirres, 3), - PROC(FSSTAT, getattr, fsstatres, 0), - PROC(FSINFO, getattr, fsinfores, 0), - PROC(PATHCONF, getattr, pathconfres, 0), - PROC(COMMIT, commit, commitres, 5), + PROC(GETATTR, getattr, getattr, 1), + PROC(SETATTR, setattr, setattr, 0), + PROC(LOOKUP, lookup, lookup, 2), + PROC(ACCESS, access, access, 1), + PROC(READLINK, readlink, readlink, 3), + PROC(READ, read, read, 3), + PROC(WRITE, write, write, 4), + PROC(CREATE, create, create, 0), + PROC(MKDIR, mkdir, create, 0), + PROC(SYMLINK, symlink, create, 0), + PROC(MKNOD, mknod, create, 0), + PROC(REMOVE, remove, remove, 0), + PROC(RMDIR, lookup, setattr, 0), + PROC(RENAME, rename, rename, 0), + PROC(LINK, link, link, 0), + PROC(READDIR, readdir, readdir, 3), + PROC(READDIRPLUS, readdirplus, readdir, 3), + PROC(FSSTAT, getattr, fsstat, 0), + PROC(FSINFO, getattr, fsinfo, 0), + PROC(PATHCONF, getattr, pathconf, 0), + PROC(COMMIT, commit, commit, 5), }; struct rpc_version nfs_version3 = { @@ -3060,7 +3056,7 @@ static struct rpc_procinfo nfs3_acl_procedures[] = { [ACLPROC3_GETACL] = { .p_proc = ACLPROC3_GETACL, .p_encode = (kxdrproc_t)nfs3_xdr_enc_getacl3args, - .p_decode = (kxdrproc_t) nfs3_xdr_getaclres, + .p_decode = (kxdrproc_t)nfs3_xdr_dec_getacl3res, .p_arglen = ACL3_getaclargs_sz, .p_replen = ACL3_getaclres_sz, .p_timer = 1, @@ -3069,7 +3065,7 @@ static struct rpc_procinfo nfs3_acl_procedures[] = { [ACLPROC3_SETACL] = { .p_proc = ACLPROC3_SETACL, .p_encode = (kxdrproc_t)nfs3_xdr_enc_setacl3args, - .p_decode = (kxdrproc_t) nfs3_xdr_setaclres, + .p_decode = (kxdrproc_t)nfs3_xdr_dec_setacl3res, .p_arglen = ACL3_setaclargs_sz, .p_replen = ACL3_setaclres_sz, .p_timer = 0, -- cgit v1.2.2 From b2cdd9c9c95e0e389a8b75fe25f266fc5267bbb6 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 14 Dec 2010 14:56:52 +0000 Subject: NFS: Remove unused old NFSv3 decoder functions Clean up. Remove unused legacy result decoder functions, and any now unused decoder helper functions. Signed-off-by: Chuck Lever Tested-by: J. Bruce Fields Signed-off-by: Trond Myklebust --- fs/nfs/nfs3xdr.c | 512 +------------------------------------------------------ 1 file changed, 4 insertions(+), 508 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c index 19c791101928..586587f42fc9 100644 --- a/fs/nfs/nfs3xdr.c +++ b/fs/nfs/nfs3xdr.c @@ -129,38 +129,6 @@ static void print_overflow_msg(const char *func, const struct xdr_stream *xdr) /* * Common NFS XDR functions as inlines */ -static inline __be32 * -xdr_decode_fhandle(__be32 *p, struct nfs_fh *fh) -{ - if ((fh->size = ntohl(*p++)) <= NFS3_FHSIZE) { - memcpy(fh->data, p, fh->size); - return p + XDR_QUADLEN(fh->size); - } - return NULL; -} - -static inline __be32 * -xdr_decode_fhandle_stream(struct xdr_stream *xdr, struct nfs_fh *fh) -{ - __be32 *p; - p = xdr_inline_decode(xdr, 4); - if (unlikely(!p)) - goto out_overflow; - fh->size = ntohl(*p++); - - if (fh->size <= NFS3_FHSIZE) { - p = xdr_inline_decode(xdr, fh->size); - if (unlikely(!p)) - goto out_overflow; - memcpy(fh->data, p, fh->size); - return p + XDR_QUADLEN(fh->size); - } - return NULL; - -out_overflow: - print_overflow_msg(__func__, xdr); - return ERR_PTR(-EIO); -} /* * Encode/decode time. @@ -221,51 +189,6 @@ xdr_decode_wcc_attr(__be32 *p, struct nfs_fattr *fattr) return p; } -static inline __be32 * -xdr_decode_post_op_attr(__be32 *p, struct nfs_fattr *fattr) -{ - if (*p++) - p = xdr_decode_fattr(p, fattr); - return p; -} - -static inline __be32 * -xdr_decode_post_op_attr_stream(struct xdr_stream *xdr, struct nfs_fattr *fattr) -{ - __be32 *p; - - p = xdr_inline_decode(xdr, 4); - if (unlikely(!p)) - goto out_overflow; - if (ntohl(*p++)) { - p = xdr_inline_decode(xdr, 84); - if (unlikely(!p)) - goto out_overflow; - p = xdr_decode_fattr(p, fattr); - } - return p; -out_overflow: - print_overflow_msg(__func__, xdr); - return ERR_PTR(-EIO); -} - -static inline __be32 * -xdr_decode_pre_op_attr(__be32 *p, struct nfs_fattr *fattr) -{ - if (*p++) - return xdr_decode_wcc_attr(p, fattr); - return p; -} - - -static inline __be32 * -xdr_decode_wcc_data(__be32 *p, struct nfs_fattr *fattr) -{ - p = xdr_decode_pre_op_attr(p, fattr); - return xdr_decode_post_op_attr(p, fattr); -} - - /* * Encode/decode NFSv3 basic data types * @@ -1403,52 +1326,6 @@ static int nfs3_xdr_enc_readdirplus3args(struct rpc_rqst *req, __be32 *p, return 0; } -/* - * Decode the result of a readdir call. - * We just check for syntactical correctness. - */ -static int -nfs3_xdr_readdirres(struct rpc_rqst *req, __be32 *p, struct nfs3_readdirres *res) -{ - struct xdr_buf *rcvbuf = &req->rq_rcv_buf; - struct kvec *iov = rcvbuf->head; - struct page **page; - size_t hdrlen; - u32 recvd, pglen; - int status; - - status = ntohl(*p++); - /* Decode post_op_attrs */ - p = xdr_decode_post_op_attr(p, res->dir_attr); - if (status) - return nfs_stat_to_errno(status); - /* Decode verifier cookie */ - if (res->verf) { - res->verf[0] = *p++; - res->verf[1] = *p++; - } else { - p += 2; - } - - hdrlen = (u8 *) p - (u8 *) iov->iov_base; - if (iov->iov_len < hdrlen) { - dprintk("NFS: READDIR reply header overflowed:" - "length %Zu > %Zu\n", hdrlen, iov->iov_len); - return -errno_NFSERR_IO; - } else if (iov->iov_len != hdrlen) { - dprintk("NFS: READDIR header is short. iovec will be shifted.\n"); - xdr_shift_buf(rcvbuf, iov->iov_len - hdrlen); - } - - pglen = rcvbuf->page_len; - recvd = rcvbuf->len - hdrlen; - if (pglen > recvd) - pglen = recvd; - page = rcvbuf->pages; - - return pglen; -} - /* * 3.3.21 COMMIT3args * @@ -1526,22 +1403,11 @@ static int nfs3_xdr_enc_setacl3args(struct rpc_rqst *req, __be32 *p, #endif /* CONFIG_NFS_V3_ACL */ /* - * NFS XDR decode functions - */ - -/* - * Decode attrstat reply. + * NFSv3 XDR decode functions + * + * NFSv3 result types are defined in section 3.3 of RFC 1813: + * "NFS Version 3 Protocol Specification". */ -static int -nfs3_xdr_attrstat(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr) -{ - int status; - - if ((status = ntohl(*p++))) - return nfs_stat_to_errno(status); - xdr_decode_fattr(p, fattr); - return 0; -} /* * 3.3.1 GETATTR3res @@ -1577,21 +1443,6 @@ out_default: return nfs_stat_to_errno(status); } -/* - * Decode status+wcc_data reply - * SATTR, REMOVE, RMDIR - */ -static int -nfs3_xdr_wccstat(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr) -{ - int status; - - if ((status = ntohl(*p++))) - status = nfs_stat_to_errno(status); - xdr_decode_wcc_data(p, fattr); - return status; -} - /* * 3.3.2 SETATTR3res * @@ -1632,31 +1483,6 @@ out_status: return nfs_stat_to_errno(status); } -static int -nfs3_xdr_removeres(struct rpc_rqst *req, __be32 *p, struct nfs_removeres *res) -{ - return nfs3_xdr_wccstat(req, p, res->dir_attr); -} - -/* - * Decode LOOKUP reply - */ -static int -nfs3_xdr_lookupres(struct rpc_rqst *req, __be32 *p, struct nfs3_diropres *res) -{ - int status; - - if ((status = ntohl(*p++))) { - status = nfs_stat_to_errno(status); - } else { - if (!(p = xdr_decode_fhandle(p, res->fh))) - return -errno_NFSERR_IO; - p = xdr_decode_post_op_attr(p, res->fattr); - } - xdr_decode_post_op_attr(p, res->dir_attr); - return status; -} - /* * 3.3.3 LOOKUP3res * @@ -1706,21 +1532,6 @@ out_default: return nfs_stat_to_errno(status); } -/* - * Decode ACCESS reply - */ -static int -nfs3_xdr_accessres(struct rpc_rqst *req, __be32 *p, struct nfs3_accessres *res) -{ - int status = ntohl(*p++); - - p = xdr_decode_post_op_attr(p, res->fattr); - if (status) - return nfs_stat_to_errno(status); - res->access = ntohl(*p++); - return 0; -} - /* * 3.3.4 ACCESS3res * @@ -1763,52 +1574,6 @@ out_default: return nfs_stat_to_errno(status); } -/* - * Decode READLINK reply - */ -static int -nfs3_xdr_readlinkres(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr) -{ - struct xdr_buf *rcvbuf = &req->rq_rcv_buf; - struct kvec *iov = rcvbuf->head; - size_t hdrlen; - u32 len, recvd; - int status; - - status = ntohl(*p++); - p = xdr_decode_post_op_attr(p, fattr); - - if (status != 0) - return nfs_stat_to_errno(status); - - /* Convert length of symlink */ - len = ntohl(*p++); - if (len >= rcvbuf->page_len) { - dprintk("nfs: server returned giant symlink!\n"); - return -ENAMETOOLONG; - } - - hdrlen = (u8 *) p - (u8 *) iov->iov_base; - if (iov->iov_len < hdrlen) { - dprintk("NFS: READLINK reply header overflowed:" - "length %Zu > %Zu\n", hdrlen, iov->iov_len); - return -errno_NFSERR_IO; - } else if (iov->iov_len != hdrlen) { - dprintk("NFS: READLINK header is short. " - "iovec will be shifted.\n"); - xdr_shift_buf(rcvbuf, iov->iov_len - hdrlen); - } - recvd = req->rq_rcv_buf.len - hdrlen; - if (recvd < len) { - dprintk("NFS: server cheating in readlink reply: " - "count %u > recvd %u\n", len, recvd); - return -EIO; - } - - xdr_terminate_string(rcvbuf, len); - return 0; -} - /* * 3.3.5 READLINK3res * @@ -1851,59 +1616,6 @@ out_default: return nfs_stat_to_errno(status); } -/* - * Decode READ reply - */ -static int -nfs3_xdr_readres(struct rpc_rqst *req, __be32 *p, struct nfs_readres *res) -{ - struct kvec *iov = req->rq_rcv_buf.head; - size_t hdrlen; - u32 count, ocount, recvd; - int status; - - status = ntohl(*p++); - p = xdr_decode_post_op_attr(p, res->fattr); - - if (status != 0) - return nfs_stat_to_errno(status); - - /* Decode reply count and EOF flag. NFSv3 is somewhat redundant - * in that it puts the count both in the res struct and in the - * opaque data count. */ - count = ntohl(*p++); - res->eof = ntohl(*p++); - ocount = ntohl(*p++); - - if (ocount != count) { - dprintk("NFS: READ count doesn't match RPC opaque count.\n"); - return -errno_NFSERR_IO; - } - - hdrlen = (u8 *) p - (u8 *) iov->iov_base; - if (iov->iov_len < hdrlen) { - dprintk("NFS: READ reply header overflowed:" - "length %Zu > %Zu\n", hdrlen, iov->iov_len); - return -errno_NFSERR_IO; - } else if (iov->iov_len != hdrlen) { - dprintk("NFS: READ header is short. iovec will be shifted.\n"); - xdr_shift_buf(&req->rq_rcv_buf, iov->iov_len - hdrlen); - } - - recvd = req->rq_rcv_buf.len - hdrlen; - if (count > recvd) { - dprintk("NFS: server cheating in read reply: " - "count %u > recvd %u\n", count, recvd); - count = recvd; - res->eof = 0; - } - - if (count < res->count) - res->count = count; - - return count; -} - /* * 3.3.6 READ3res * @@ -1988,28 +1700,6 @@ out_status: return nfs_stat_to_errno(status); } -/* - * Decode WRITE response - */ -static int -nfs3_xdr_writeres(struct rpc_rqst *req, __be32 *p, struct nfs_writeres *res) -{ - int status; - - status = ntohl(*p++); - p = xdr_decode_wcc_data(p, res->fattr); - - if (status != 0) - return nfs_stat_to_errno(status); - - res->count = ntohl(*p++); - res->verf->committed = (enum nfs3_stable_how)ntohl(*p++); - res->verf->verifier[0] = *p++; - res->verf->verifier[1] = *p++; - - return res->count; -} - /* * 3.3.7 WRITE3res * @@ -2082,33 +1772,6 @@ out_status: return nfs_stat_to_errno(status); } -/* - * Decode a CREATE response - */ -static int -nfs3_xdr_createres(struct rpc_rqst *req, __be32 *p, struct nfs3_diropres *res) -{ - int status; - - status = ntohl(*p++); - if (status == 0) { - if (*p++) { - if (!(p = xdr_decode_fhandle(p, res->fh))) - return -errno_NFSERR_IO; - p = xdr_decode_post_op_attr(p, res->fattr); - } else { - memset(res->fh, 0, sizeof(*res->fh)); - /* Do decode post_op_attr but set it to NULL */ - p = xdr_decode_post_op_attr(p, res->fattr); - res->fattr->valid = 0; - } - } else { - status = nfs_stat_to_errno(status); - } - p = xdr_decode_wcc_data(p, res->dir_attr); - return status; -} - /* * 3.3.8 CREATE3res * @@ -2214,21 +1877,6 @@ out_status: return nfs_stat_to_errno(status); } -/* - * Decode RENAME reply - */ -static int -nfs3_xdr_renameres(struct rpc_rqst *req, __be32 *p, struct nfs_renameres *res) -{ - int status; - - if ((status = ntohl(*p++)) != 0) - status = nfs_stat_to_errno(status); - p = xdr_decode_wcc_data(p, res->old_fattr); - p = xdr_decode_wcc_data(p, res->new_fattr); - return status; -} - /* * 3.3.14 RENAME3res * @@ -2274,21 +1922,6 @@ out_status: return nfs_stat_to_errno(status); } -/* - * Decode LINK reply - */ -static int -nfs3_xdr_linkres(struct rpc_rqst *req, __be32 *p, struct nfs3_linkres *res) -{ - int status; - - if ((status = ntohl(*p++)) != 0) - status = nfs_stat_to_errno(status); - p = xdr_decode_post_op_attr(p, res->fattr); - p = xdr_decode_wcc_data(p, res->dir_attr); - return status; -} - /* * 3.3.15 LINK3res * @@ -2532,31 +2165,6 @@ out_default: return nfs_stat_to_errno(status); } -/* - * Decode FSSTAT reply - */ -static int -nfs3_xdr_fsstatres(struct rpc_rqst *req, __be32 *p, struct nfs_fsstat *res) -{ - int status; - - status = ntohl(*p++); - - p = xdr_decode_post_op_attr(p, res->fattr); - if (status != 0) - return nfs_stat_to_errno(status); - - p = xdr_decode_hyper(p, &res->tbytes); - p = xdr_decode_hyper(p, &res->fbytes); - p = xdr_decode_hyper(p, &res->abytes); - p = xdr_decode_hyper(p, &res->tfiles); - p = xdr_decode_hyper(p, &res->ffiles); - p = xdr_decode_hyper(p, &res->afiles); - - /* ignore invarsec */ - return 0; -} - /* * 3.3.18 FSSTAT3res * @@ -2626,35 +2234,6 @@ out_status: return nfs_stat_to_errno(status); } -/* - * Decode FSINFO reply - */ -static int -nfs3_xdr_fsinfores(struct rpc_rqst *req, __be32 *p, struct nfs_fsinfo *res) -{ - int status; - - status = ntohl(*p++); - - p = xdr_decode_post_op_attr(p, res->fattr); - if (status != 0) - return nfs_stat_to_errno(status); - - res->rtmax = ntohl(*p++); - res->rtpref = ntohl(*p++); - res->rtmult = ntohl(*p++); - res->wtmax = ntohl(*p++); - res->wtpref = ntohl(*p++); - res->wtmult = ntohl(*p++); - res->dtpref = ntohl(*p++); - p = xdr_decode_hyper(p, &res->maxfilesize); - p = xdr_decode_time3(p, &res->time_delta); - - /* ignore properties */ - res->lease_time = 0; - return 0; -} - /* * 3.3.19 FSINFO3res * @@ -2732,26 +2311,6 @@ out_status: return nfs_stat_to_errno(status); } -/* - * Decode PATHCONF reply - */ -static int -nfs3_xdr_pathconfres(struct rpc_rqst *req, __be32 *p, struct nfs_pathconf *res) -{ - int status; - - status = ntohl(*p++); - - p = xdr_decode_post_op_attr(p, res->fattr); - if (status != 0) - return nfs_stat_to_errno(status); - res->max_link = ntohl(*p++); - res->max_namelen = ntohl(*p++); - - /* ignore remaining fields */ - return 0; -} - /* * 3.3.20 PATHCONF3res * @@ -2816,24 +2375,6 @@ out_status: return nfs_stat_to_errno(status); } -/* - * Decode COMMIT reply - */ -static int -nfs3_xdr_commitres(struct rpc_rqst *req, __be32 *p, struct nfs_writeres *res) -{ - int status; - - status = ntohl(*p++); - p = xdr_decode_wcc_data(p, res->fattr); - if (status != 0) - return nfs_stat_to_errno(status); - - res->verf->verifier[0] = *p++; - res->verf->verifier[1] = *p++; - return 0; -} - /* * 3.3.21 COMMIT3res * @@ -2877,37 +2418,6 @@ out_status: } #ifdef CONFIG_NFS_V3_ACL -/* - * Decode GETACL reply - */ -static int -nfs3_xdr_getaclres(struct rpc_rqst *req, __be32 *p, - struct nfs3_getaclres *res) -{ - struct xdr_buf *buf = &req->rq_rcv_buf; - int status = ntohl(*p++); - struct posix_acl **acl; - unsigned int *aclcnt; - int err, base; - - if (status != 0) - return nfs_stat_to_errno(status); - p = xdr_decode_post_op_attr(p, res->fattr); - res->mask = ntohl(*p++); - if (res->mask & ~(NFS_ACL|NFS_ACLCNT|NFS_DFACL|NFS_DFACLCNT)) - return -EINVAL; - base = (char *)p - (char *)req->rq_rcv_buf.head->iov_base; - - acl = (res->mask & NFS_ACL) ? &res->acl_access : NULL; - aclcnt = (res->mask & NFS_ACLCNT) ? &res->acl_access_count : NULL; - err = nfsacl_decode(buf, base, aclcnt, acl); - - acl = (res->mask & NFS_DFACL) ? &res->acl_default : NULL; - aclcnt = (res->mask & NFS_DFACLCNT) ? &res->acl_default_count : NULL; - if (err > 0) - err = nfsacl_decode(buf, base + err, aclcnt, acl); - return (err > 0) ? 0 : err; -} static inline int decode_getacl3resok(struct xdr_stream *xdr, struct nfs3_getaclres *result) @@ -2973,20 +2483,6 @@ out_default: return nfs_stat_to_errno(status); } -/* - * Decode setacl reply. - */ -static int -nfs3_xdr_setaclres(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr) -{ - int status = ntohl(*p++); - - if (status) - return nfs_stat_to_errno(status); - xdr_decode_post_op_attr(p, fattr); - return 0; -} - static int nfs3_xdr_dec_setacl3res(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *result) { -- cgit v1.2.2 From f6048709391336cf27fb5c1cfca8e792103e5a73 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 14 Dec 2010 14:57:02 +0000 Subject: NFS: Move and update xdr_decode_foo() functions that we're keeping Clean up. Move the timestamp decoder to match the placement and naming conventions of the other helpers. Fold xdr_decode_fattr() into decode_fattr3(), which is now it's only user. Fold xdr_decode_wcc_attr() into decode_wcc_attr(), which is now it's only user. Signed-off-by: Chuck Lever Tested-by: J. Bruce Fields Signed-off-by: Trond Myklebust --- fs/nfs/nfs3xdr.c | 136 +++++++++++++++++++++++++++---------------------------- 1 file changed, 67 insertions(+), 69 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c index 586587f42fc9..c97d00fe849a 100644 --- a/fs/nfs/nfs3xdr.c +++ b/fs/nfs/nfs3xdr.c @@ -126,69 +126,6 @@ static void print_overflow_msg(const char *func, const struct xdr_stream *xdr) } -/* - * Common NFS XDR functions as inlines - */ - -/* - * Encode/decode time. - */ -static inline __be32 * -xdr_decode_time3(__be32 *p, struct timespec *timep) -{ - timep->tv_sec = ntohl(*p++); - timep->tv_nsec = ntohl(*p++); - return p; -} - -static __be32 * -xdr_decode_fattr(__be32 *p, struct nfs_fattr *fattr) -{ - unsigned int type, major, minor; - umode_t fmode; - - type = ntohl(*p++); - if (type > NF3FIFO) - type = NF3NON; - fmode = nfs_type2fmt[type]; - fattr->mode = (ntohl(*p++) & ~S_IFMT) | fmode; - fattr->nlink = ntohl(*p++); - fattr->uid = ntohl(*p++); - fattr->gid = ntohl(*p++); - p = xdr_decode_hyper(p, &fattr->size); - p = xdr_decode_hyper(p, &fattr->du.nfs3.used); - - /* Turn remote device info into Linux-specific dev_t */ - major = ntohl(*p++); - minor = ntohl(*p++); - fattr->rdev = MKDEV(major, minor); - if (MAJOR(fattr->rdev) != major || MINOR(fattr->rdev) != minor) - fattr->rdev = 0; - - p = xdr_decode_hyper(p, &fattr->fsid.major); - fattr->fsid.minor = 0; - p = xdr_decode_hyper(p, &fattr->fileid); - p = xdr_decode_time3(p, &fattr->atime); - p = xdr_decode_time3(p, &fattr->mtime); - p = xdr_decode_time3(p, &fattr->ctime); - - /* Update the mode bits */ - fattr->valid |= NFS_ATTR_FATTR_V3; - return p; -} - -static inline __be32 * -xdr_decode_wcc_attr(__be32 *p, struct nfs_fattr *fattr) -{ - p = xdr_decode_hyper(p, &fattr->pre_size); - p = xdr_decode_time3(p, &fattr->pre_mtime); - p = xdr_decode_time3(p, &fattr->pre_ctime); - fattr->valid |= NFS_ATTR_FATTR_PRESIZE - | NFS_ATTR_FATTR_PREMTIME - | NFS_ATTR_FATTR_PRECTIME; - return p; -} - /* * Encode/decode NFSv3 basic data types * @@ -239,6 +176,11 @@ out_overflow: * * typedef uint64 fileid3; */ +static __be32 *xdr_decode_fileid3(__be32 *p, u64 *fileid) +{ + return xdr_decode_hyper(p, fileid); +} + static int decode_fileid3(struct xdr_stream *xdr, u64 *fileid) { return decode_uint64(xdr, fileid); @@ -452,6 +394,17 @@ static void encode_ftype3(struct xdr_stream *xdr, const u32 type) encode_uint32(xdr, type); } +static __be32 *xdr_decode_ftype3(__be32 *p, umode_t *mode) +{ + u32 type; + + type = be32_to_cpup(p++); + if (type > NF3FIFO) + type = NF3NON; + *mode = nfs_type2fmt[type]; + return p; +} + /* * specdata3 * @@ -469,6 +422,18 @@ static void encode_specdata3(struct xdr_stream *xdr, const dev_t rdev) *p = cpu_to_be32(MINOR(rdev)); } +static __be32 *xdr_decode_specdata3(__be32 *p, dev_t *rdev) +{ + unsigned int major, minor; + + major = be32_to_cpup(p++); + minor = be32_to_cpup(p++); + *rdev = MKDEV(major, minor); + if (MAJOR(*rdev) != major || MINOR(*rdev) != minor) + *rdev = 0; + return p; +} + /* * nfs_fh3 * @@ -530,6 +495,13 @@ static __be32 *xdr_encode_nfstime3(__be32 *p, const struct timespec *timep) return p; } +static __be32 *xdr_decode_nfstime3(__be32 *p, struct timespec *timep) +{ + timep->tv_sec = be32_to_cpup(p++); + timep->tv_nsec = be32_to_cpup(p++); + return p; +} + /* * sattr3 * @@ -678,12 +650,33 @@ static void encode_sattr3(struct xdr_stream *xdr, const struct iattr *attr) */ static int decode_fattr3(struct xdr_stream *xdr, struct nfs_fattr *fattr) { + umode_t fmode; __be32 *p; p = xdr_inline_decode(xdr, NFS3_fattr_sz << 2); if (unlikely(p == NULL)) goto out_overflow; - xdr_decode_fattr(p, fattr); + + p = xdr_decode_ftype3(p, &fmode); + + fattr->mode = (be32_to_cpup(p++) & ~S_IFMT) | fmode; + fattr->nlink = be32_to_cpup(p++); + fattr->uid = be32_to_cpup(p++); + fattr->gid = be32_to_cpup(p++); + + p = xdr_decode_size3(p, &fattr->size); + p = xdr_decode_size3(p, &fattr->du.nfs3.used); + p = xdr_decode_specdata3(p, &fattr->rdev); + + p = xdr_decode_hyper(p, &fattr->fsid.major); + fattr->fsid.minor = 0; + + p = xdr_decode_fileid3(p, &fattr->fileid); + p = xdr_decode_nfstime3(p, &fattr->atime); + p = xdr_decode_nfstime3(p, &fattr->mtime); + xdr_decode_nfstime3(p, &fattr->ctime); + + fattr->valid |= NFS_ATTR_FATTR_V3; return 0; out_overflow: print_overflow_msg(__func__, xdr); @@ -730,7 +723,15 @@ static int decode_wcc_attr(struct xdr_stream *xdr, struct nfs_fattr *fattr) p = xdr_inline_decode(xdr, NFS3_wcc_attr_sz << 2); if (unlikely(p == NULL)) goto out_overflow; - xdr_decode_wcc_attr(p, fattr); + + fattr->valid |= NFS_ATTR_FATTR_PRESIZE + | NFS_ATTR_FATTR_PREMTIME + | NFS_ATTR_FATTR_PRECTIME; + + p = xdr_decode_size3(p, &fattr->pre_size); + p = xdr_decode_nfstime3(p, &fattr->pre_mtime); + xdr_decode_nfstime3(p, &fattr->pre_ctime); + return 0; out_overflow: print_overflow_msg(__func__, xdr); @@ -1009,10 +1010,7 @@ static void encode_write3args(struct xdr_stream *xdr, p = xdr_reserve_space(xdr, 8 + 4 + 4 + 4); p = xdr_encode_hyper(p, args->offset); *p++ = cpu_to_be32(args->count); - - BUG_ON(args->stable > NFS_FILE_SYNC); *p++ = cpu_to_be32(args->stable); - *p = cpu_to_be32(args->count); xdr_write_pages(xdr, args->pages, args->pgbase, args->count); } @@ -2278,7 +2276,7 @@ static int decode_fsinfo3resok(struct xdr_stream *xdr, result->wtmult = be32_to_cpup(p++); result->dtpref = be32_to_cpup(p++); p = xdr_decode_size3(p, &result->maxfilesize); - xdr_decode_time3(p, &result->time_delta); + xdr_decode_nfstime3(p, &result->time_delta); /* ignore properties */ result->lease_time = 0; -- cgit v1.2.2 From 3460f29a27344db8c7af62cafdb961286ef0b6cd Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 14 Dec 2010 14:57:12 +0000 Subject: lockd: Introduce new-style XDR functions for NLMv4 We'd like to prevent local buffer overflows caused by malicious or broken servers. New xdr_stream style decoders can do that. For efficiency, we also want to be able to pass xdr_streams from call_encode() to all XDR encoding functions, rather than building an xdr_stream in every XDR encoding function in the kernel. Same idea as the NLM v3 XDR overhaul. Signed-off-by: Chuck Lever Tested-by: J. Bruce Fields Signed-off-by: Trond Myklebust --- fs/lockd/Makefile | 2 +- fs/lockd/clnt4xdr.c | 621 ++++++++++++++++++++++++++++++++++++++++++++++++++++ fs/lockd/xdr4.c | 255 --------------------- 3 files changed, 622 insertions(+), 256 deletions(-) create mode 100644 fs/lockd/clnt4xdr.c (limited to 'fs') diff --git a/fs/lockd/Makefile b/fs/lockd/Makefile index d0488b3bd00b..ca58d64374ca 100644 --- a/fs/lockd/Makefile +++ b/fs/lockd/Makefile @@ -6,5 +6,5 @@ obj-$(CONFIG_LOCKD) += lockd.o lockd-objs-y := clntlock.o clntproc.o clntxdr.o host.o svc.o svclock.o \ svcshare.o svcproc.o svcsubs.o mon.o xdr.o grace.o -lockd-objs-$(CONFIG_LOCKD_V4) += xdr4.o svc4proc.o +lockd-objs-$(CONFIG_LOCKD_V4) += clnt4xdr.o xdr4.o svc4proc.o lockd-objs := $(lockd-objs-y) diff --git a/fs/lockd/clnt4xdr.c b/fs/lockd/clnt4xdr.c new file mode 100644 index 000000000000..1a1c3e21ed2c --- /dev/null +++ b/fs/lockd/clnt4xdr.c @@ -0,0 +1,621 @@ +/* + * linux/fs/lockd/clnt4xdr.c + * + * XDR functions to encode/decode NLM version 4 RPC arguments and results. + * + * NLM client-side only. + * + * Copyright (C) 2010, Oracle. All rights reserved. + */ + +#include +#include +#include +#include +#include + +#define NLMDBG_FACILITY NLMDBG_XDR + +#if (NLMCLNT_OHSIZE > XDR_MAX_NETOBJ) +# error "NLM host name cannot be larger than XDR_MAX_NETOBJ!" +#endif + +#if (NLMCLNT_OHSIZE > NLM_MAXSTRLEN) +# error "NLM host name cannot be larger than NLM's maximum string length!" +#endif + +/* + * Declare the space requirements for NLM arguments and replies as + * number of 32bit-words + */ +#define NLM4_void_sz (0) +#define NLM4_cookie_sz (1+(NLM_MAXCOOKIELEN>>2)) +#define NLM4_caller_sz (1+(NLMCLNT_OHSIZE>>2)) +#define NLM4_owner_sz (1+(NLMCLNT_OHSIZE>>2)) +#define NLM4_fhandle_sz (1+(NFS3_FHSIZE>>2)) +#define NLM4_lock_sz (5+NLM4_caller_sz+NLM4_owner_sz+NLM4_fhandle_sz) +#define NLM4_holder_sz (6+NLM4_owner_sz) + +#define NLM4_testargs_sz (NLM4_cookie_sz+1+NLM4_lock_sz) +#define NLM4_lockargs_sz (NLM4_cookie_sz+4+NLM4_lock_sz) +#define NLM4_cancargs_sz (NLM4_cookie_sz+2+NLM4_lock_sz) +#define NLM4_unlockargs_sz (NLM4_cookie_sz+NLM4_lock_sz) + +#define NLM4_testres_sz (NLM4_cookie_sz+1+NLM4_holder_sz) +#define NLM4_res_sz (NLM4_cookie_sz+1) +#define NLM4_norep_sz (0) + + +static s64 loff_t_to_s64(loff_t offset) +{ + s64 res; + + if (offset >= NLM4_OFFSET_MAX) + res = NLM4_OFFSET_MAX; + else if (offset <= -NLM4_OFFSET_MAX) + res = -NLM4_OFFSET_MAX; + else + res = offset; + return res; +} + +static void nlm4_compute_offsets(const struct nlm_lock *lock, + u64 *l_offset, u64 *l_len) +{ + const struct file_lock *fl = &lock->fl; + + BUG_ON(fl->fl_start > NLM4_OFFSET_MAX); + BUG_ON(fl->fl_end > NLM4_OFFSET_MAX && + fl->fl_end != OFFSET_MAX); + + *l_offset = loff_t_to_s64(fl->fl_start); + if (fl->fl_end == OFFSET_MAX) + *l_len = 0; + else + *l_len = loff_t_to_s64(fl->fl_end - fl->fl_start + 1); +} + +/* + * Handle decode buffer overflows out-of-line. + */ +static void print_overflow_msg(const char *func, const struct xdr_stream *xdr) +{ + dprintk("lockd: %s prematurely hit the end of our receive buffer. " + "Remaining buffer length is %tu words.\n", + func, xdr->end - xdr->p); +} + + +/* + * Encode/decode NLMv4 basic data types + * + * Basic NLMv4 data types are defined in Appendix II, section 6.1.4 + * of RFC 1813: "NFS Version 3 Protocol Specification" and in Chapter + * 10 of X/Open's "Protocols for Interworking: XNFS, Version 3W". + * + * Not all basic data types have their own encoding and decoding + * functions. For run-time efficiency, some data types are encoded + * or decoded inline. + */ + +static void encode_bool(struct xdr_stream *xdr, const int value) +{ + __be32 *p; + + p = xdr_reserve_space(xdr, 4); + *p = value ? xdr_one : xdr_zero; +} + +static void encode_int32(struct xdr_stream *xdr, const s32 value) +{ + __be32 *p; + + p = xdr_reserve_space(xdr, 4); + *p = cpu_to_be32(value); +} + +/* + * typedef opaque netobj + */ +static void encode_netobj(struct xdr_stream *xdr, + const u8 *data, const unsigned int length) +{ + __be32 *p; + + BUG_ON(length > XDR_MAX_NETOBJ); + p = xdr_reserve_space(xdr, 4 + length); + xdr_encode_opaque(p, data, length); +} + +static int decode_netobj(struct xdr_stream *xdr, + struct xdr_netobj *obj) +{ + u32 length; + __be32 *p; + + p = xdr_inline_decode(xdr, 4); + if (unlikely(p == NULL)) + goto out_overflow; + length = be32_to_cpup(p++); + if (unlikely(length > XDR_MAX_NETOBJ)) + goto out_size; + obj->len = length; + obj->data = (u8 *)p; + return 0; +out_size: + dprintk("NFS: returned netobj was too long: %u\n", length); + return -EIO; +out_overflow: + print_overflow_msg(__func__, xdr); + return -EIO; +} + +/* + * netobj cookie; + */ +static void encode_cookie(struct xdr_stream *xdr, + const struct nlm_cookie *cookie) +{ + BUG_ON(cookie->len > NLM_MAXCOOKIELEN); + encode_netobj(xdr, (u8 *)&cookie->data, cookie->len); +} + +static int decode_cookie(struct xdr_stream *xdr, + struct nlm_cookie *cookie) +{ + u32 length; + __be32 *p; + + p = xdr_inline_decode(xdr, 4); + if (unlikely(p == NULL)) + goto out_overflow; + length = be32_to_cpup(p++); + /* apparently HPUX can return empty cookies */ + if (length == 0) + goto out_hpux; + if (length > NLM_MAXCOOKIELEN) + goto out_size; + p = xdr_inline_decode(xdr, length); + if (unlikely(p == NULL)) + goto out_overflow; + cookie->len = length; + memcpy(cookie->data, p, length); + return 0; +out_hpux: + cookie->len = 4; + memset(cookie->data, 0, 4); + return 0; +out_size: + dprintk("NFS: returned cookie was too long: %u\n", length); + return -EIO; +out_overflow: + print_overflow_msg(__func__, xdr); + return -EIO; +} + +/* + * netobj fh; + */ +static void encode_fh(struct xdr_stream *xdr, const struct nfs_fh *fh) +{ + BUG_ON(fh->size > NFS3_FHSIZE); + encode_netobj(xdr, (u8 *)&fh->data, fh->size); +} + +/* + * enum nlm4_stats { + * NLM4_GRANTED = 0, + * NLM4_DENIED = 1, + * NLM4_DENIED_NOLOCKS = 2, + * NLM4_BLOCKED = 3, + * NLM4_DENIED_GRACE_PERIOD = 4, + * NLM4_DEADLCK = 5, + * NLM4_ROFS = 6, + * NLM4_STALE_FH = 7, + * NLM4_FBIG = 8, + * NLM4_FAILED = 9 + * }; + * + * struct nlm4_stat { + * nlm4_stats stat; + * }; + * + * NB: we don't swap bytes for the NLM status values. The upper + * layers deal directly with the status value in network byte + * order. + */ +static void encode_nlm4_stat(struct xdr_stream *xdr, + const __be32 stat) +{ + __be32 *p; + + BUG_ON(be32_to_cpu(stat) > NLM_FAILED); + p = xdr_reserve_space(xdr, 4); + *p = stat; +} + +static int decode_nlm4_stat(struct xdr_stream *xdr, __be32 *stat) +{ + __be32 *p; + + p = xdr_inline_decode(xdr, 4); + if (unlikely(p == NULL)) + goto out_overflow; + if (unlikely(*p > nlm4_failed)) + goto out_bad_xdr; + *stat = *p; + return 0; +out_bad_xdr: + dprintk("%s: server returned invalid nlm4_stats value: %u\n", + __func__, be32_to_cpup(p)); + return -EIO; +out_overflow: + print_overflow_msg(__func__, xdr); + return -EIO; +} + +/* + * struct nlm4_holder { + * bool exclusive; + * int32 svid; + * netobj oh; + * uint64 l_offset; + * uint64 l_len; + * }; + */ +static void encode_nlm4_holder(struct xdr_stream *xdr, + const struct nlm_res *result) +{ + const struct nlm_lock *lock = &result->lock; + u64 l_offset, l_len; + __be32 *p; + + encode_bool(xdr, lock->fl.fl_type == F_RDLCK); + encode_int32(xdr, lock->svid); + encode_netobj(xdr, lock->oh.data, lock->oh.len); + + p = xdr_reserve_space(xdr, 4 + 4); + nlm4_compute_offsets(lock, &l_offset, &l_len); + p = xdr_encode_hyper(p, l_offset); + xdr_encode_hyper(p, l_len); +} + +static int decode_nlm4_holder(struct xdr_stream *xdr, struct nlm_res *result) +{ + struct nlm_lock *lock = &result->lock; + struct file_lock *fl = &lock->fl; + u64 l_offset, l_len; + u32 exclusive; + int error; + __be32 *p; + s32 end; + + memset(lock, 0, sizeof(*lock)); + locks_init_lock(fl); + + p = xdr_inline_decode(xdr, 4 + 4); + if (unlikely(p == NULL)) + goto out_overflow; + exclusive = be32_to_cpup(p++); + lock->svid = be32_to_cpup(p); + fl->fl_pid = (pid_t)lock->svid; + + error = decode_netobj(xdr, &lock->oh); + if (unlikely(error)) + goto out; + + p = xdr_inline_decode(xdr, 8 + 8); + if (unlikely(p == NULL)) + goto out_overflow; + + fl->fl_flags = FL_POSIX; + fl->fl_type = exclusive != 0 ? F_WRLCK : F_RDLCK; + p = xdr_decode_hyper(p, &l_offset); + xdr_decode_hyper(p, &l_len); + end = l_offset + l_len - 1; + + fl->fl_start = (loff_t)l_offset; + if (l_len == 0 || end < 0) + fl->fl_end = OFFSET_MAX; + else + fl->fl_end = (loff_t)end; + error = 0; +out: + return error; +out_overflow: + print_overflow_msg(__func__, xdr); + return -EIO; +} + +/* + * string caller_name; + */ +static void encode_caller_name(struct xdr_stream *xdr, const char *name) +{ + /* NB: client-side does not set lock->len */ + u32 length = strlen(name); + __be32 *p; + + BUG_ON(length > NLM_MAXSTRLEN); + p = xdr_reserve_space(xdr, 4 + length); + xdr_encode_opaque(p, name, length); +} + +/* + * struct nlm4_lock { + * string caller_name; + * netobj fh; + * netobj oh; + * int32 svid; + * uint64 l_offset; + * uint64 l_len; + * }; + */ +static void encode_nlm4_lock(struct xdr_stream *xdr, + const struct nlm_lock *lock) +{ + u64 l_offset, l_len; + __be32 *p; + + encode_caller_name(xdr, lock->caller); + encode_fh(xdr, &lock->fh); + encode_netobj(xdr, lock->oh.data, lock->oh.len); + + p = xdr_reserve_space(xdr, 4 + 8 + 8); + *p++ = cpu_to_be32(lock->svid); + + nlm4_compute_offsets(lock, &l_offset, &l_len); + p = xdr_encode_hyper(p, l_offset); + xdr_encode_hyper(p, l_len); +} + + +/* + * NLMv4 XDR encode functions + * + * NLMv4 argument types are defined in Appendix II of RFC 1813: + * "NFS Version 3 Protocol Specification" and Chapter 10 of X/Open's + * "Protocols for Interworking: XNFS, Version 3W". + */ + +/* + * struct nlm4_testargs { + * netobj cookie; + * bool exclusive; + * struct nlm4_lock alock; + * }; + */ +static int nlm4_xdr_enc_testargs(struct rpc_rqst *req, __be32 *p, + const struct nlm_args *args) +{ + const struct nlm_lock *lock = &args->lock; + struct xdr_stream xdr; + + xdr_init_encode(&xdr, &req->rq_snd_buf, p); + encode_cookie(&xdr, &args->cookie); + encode_bool(&xdr, lock->fl.fl_type == F_WRLCK); + encode_nlm4_lock(&xdr, lock); + return 0; +} + +/* + * struct nlm4_lockargs { + * netobj cookie; + * bool block; + * bool exclusive; + * struct nlm4_lock alock; + * bool reclaim; + * int state; + * }; + */ +static int nlm4_xdr_enc_lockargs(struct rpc_rqst *req, __be32 *p, + const struct nlm_args *args) +{ + const struct nlm_lock *lock = &args->lock; + struct xdr_stream xdr; + + xdr_init_encode(&xdr, &req->rq_snd_buf, p); + encode_cookie(&xdr, &args->cookie); + encode_bool(&xdr, args->block); + encode_bool(&xdr, lock->fl.fl_type == F_WRLCK); + encode_nlm4_lock(&xdr, lock); + encode_bool(&xdr, args->reclaim); + encode_int32(&xdr, args->state); + return 0; +} + +/* + * struct nlm4_cancargs { + * netobj cookie; + * bool block; + * bool exclusive; + * struct nlm4_lock alock; + * }; + */ +static int nlm4_xdr_enc_cancargs(struct rpc_rqst *req, __be32 *p, + const struct nlm_args *args) +{ + const struct nlm_lock *lock = &args->lock; + struct xdr_stream xdr; + + xdr_init_encode(&xdr, &req->rq_snd_buf, p); + encode_cookie(&xdr, &args->cookie); + encode_bool(&xdr, args->block); + encode_bool(&xdr, lock->fl.fl_type == F_WRLCK); + encode_nlm4_lock(&xdr, lock); + return 0; +} + +/* + * struct nlm4_unlockargs { + * netobj cookie; + * struct nlm4_lock alock; + * }; + */ +static int nlm4_xdr_enc_unlockargs(struct rpc_rqst *req, __be32 *p, + const struct nlm_args *args) +{ + const struct nlm_lock *lock = &args->lock; + struct xdr_stream xdr; + + xdr_init_encode(&xdr, &req->rq_snd_buf, p); + encode_cookie(&xdr, &args->cookie); + encode_nlm4_lock(&xdr, lock); + return 0; +} + +/* + * struct nlm4_res { + * netobj cookie; + * nlm4_stat stat; + * }; + */ +static int nlm4_xdr_enc_res(struct rpc_rqst *req, __be32 *p, + const struct nlm_res *result) +{ + struct xdr_stream xdr; + + xdr_init_encode(&xdr, &req->rq_snd_buf, p); + encode_cookie(&xdr, &result->cookie); + encode_nlm4_stat(&xdr, result->status); + return 0; +} + +/* + * union nlm4_testrply switch (nlm4_stats stat) { + * case NLM4_DENIED: + * struct nlm4_holder holder; + * default: + * void; + * }; + * + * struct nlm4_testres { + * netobj cookie; + * nlm4_testrply test_stat; + * }; + */ +static int nlm4_xdr_enc_testres(struct rpc_rqst *req, __be32 *p, + const struct nlm_res *result) +{ + struct xdr_stream xdr; + + xdr_init_encode(&xdr, &req->rq_snd_buf, p); + encode_cookie(&xdr, &result->cookie); + encode_nlm4_stat(&xdr, result->status); + if (result->status == nlm_lck_denied) + encode_nlm4_holder(&xdr, result); + return 0; +} + + +/* + * NLMv4 XDR decode functions + * + * NLMv4 argument types are defined in Appendix II of RFC 1813: + * "NFS Version 3 Protocol Specification" and Chapter 10 of X/Open's + * "Protocols for Interworking: XNFS, Version 3W". + */ + +/* + * union nlm4_testrply switch (nlm4_stats stat) { + * case NLM4_DENIED: + * struct nlm4_holder holder; + * default: + * void; + * }; + * + * struct nlm4_testres { + * netobj cookie; + * nlm4_testrply test_stat; + * }; + */ +static int decode_nlm4_testrply(struct xdr_stream *xdr, + struct nlm_res *result) +{ + int error; + + error = decode_nlm4_stat(xdr, &result->status); + if (unlikely(error)) + goto out; + if (result->status == nlm_lck_denied) + error = decode_nlm4_holder(xdr, result); +out: + return error; +} + +static int nlm4_xdr_dec_testres(struct rpc_rqst *req, __be32 *p, + struct nlm_res *result) +{ + struct xdr_stream xdr; + int error; + + xdr_init_decode(&xdr, &req->rq_rcv_buf, p); + error = decode_cookie(&xdr, &result->cookie); + if (unlikely(error)) + goto out; + error = decode_nlm4_testrply(&xdr, result); +out: + return error; +} + +/* + * struct nlm4_res { + * netobj cookie; + * nlm4_stat stat; + * }; + */ +static int nlm4_xdr_dec_res(struct rpc_rqst *req, __be32 *p, + struct nlm_res *result) +{ + struct xdr_stream xdr; + int error; + + xdr_init_decode(&xdr, &req->rq_rcv_buf, p); + error = decode_cookie(&xdr, &result->cookie); + if (unlikely(error)) + goto out; + error = decode_nlm4_stat(&xdr, &result->status); +out: + return error; +} + + +/* + * For NLM, a void procedure really returns nothing + */ +#define nlm4_xdr_dec_norep NULL + +#define PROC(proc, argtype, restype) \ +[NLMPROC_##proc] = { \ + .p_proc = NLMPROC_##proc, \ + .p_encode = (kxdrproc_t)nlm4_xdr_enc_##argtype, \ + .p_decode = (kxdrproc_t)nlm4_xdr_dec_##restype, \ + .p_arglen = NLM4_##argtype##_sz, \ + .p_replen = NLM4_##restype##_sz, \ + .p_statidx = NLMPROC_##proc, \ + .p_name = #proc, \ + } + +static struct rpc_procinfo nlm4_procedures[] = { + PROC(TEST, testargs, testres), + PROC(LOCK, lockargs, res), + PROC(CANCEL, cancargs, res), + PROC(UNLOCK, unlockargs, res), + PROC(GRANTED, testargs, res), + PROC(TEST_MSG, testargs, norep), + PROC(LOCK_MSG, lockargs, norep), + PROC(CANCEL_MSG, cancargs, norep), + PROC(UNLOCK_MSG, unlockargs, norep), + PROC(GRANTED_MSG, testargs, norep), + PROC(TEST_RES, testres, norep), + PROC(LOCK_RES, res, norep), + PROC(CANCEL_RES, res, norep), + PROC(UNLOCK_RES, res, norep), + PROC(GRANTED_RES, res, norep), +}; + +struct rpc_version nlm_version4 = { + .number = 4, + .nrprocs = ARRAY_SIZE(nlm4_procedures), + .procs = nlm4_procedures, +}; diff --git a/fs/lockd/xdr4.c b/fs/lockd/xdr4.c index ad9dbbc9145d..dfa4789cd460 100644 --- a/fs/lockd/xdr4.c +++ b/fs/lockd/xdr4.c @@ -93,15 +93,6 @@ nlm4_decode_fh(__be32 *p, struct nfs_fh *f) return p + XDR_QUADLEN(f->size); } -static __be32 * -nlm4_encode_fh(__be32 *p, struct nfs_fh *f) -{ - *p++ = htonl(f->size); - if (f->size) p[XDR_QUADLEN(f->size)-1] = 0; /* don't leak anything */ - memcpy(p, f->data, f->size); - return p + XDR_QUADLEN(f->size); -} - /* * Encode and decode owner handle */ @@ -111,12 +102,6 @@ nlm4_decode_oh(__be32 *p, struct xdr_netobj *oh) return xdr_decode_netobj(p, oh); } -static __be32 * -nlm4_encode_oh(__be32 *p, struct xdr_netobj *oh) -{ - return xdr_encode_netobj(p, oh); -} - static __be32 * nlm4_decode_lock(__be32 *p, struct nlm_lock *lock) { @@ -149,38 +134,6 @@ nlm4_decode_lock(__be32 *p, struct nlm_lock *lock) return p; } -/* - * Encode a lock as part of an NLM call - */ -static __be32 * -nlm4_encode_lock(__be32 *p, struct nlm_lock *lock) -{ - struct file_lock *fl = &lock->fl; - __s64 start, len; - - if (!(p = xdr_encode_string(p, lock->caller)) - || !(p = nlm4_encode_fh(p, &lock->fh)) - || !(p = nlm4_encode_oh(p, &lock->oh))) - return NULL; - - if (fl->fl_start > NLM4_OFFSET_MAX - || (fl->fl_end > NLM4_OFFSET_MAX && fl->fl_end != OFFSET_MAX)) - return NULL; - - *p++ = htonl(lock->svid); - - start = loff_t_to_s64(fl->fl_start); - if (fl->fl_end == OFFSET_MAX) - len = 0; - else - len = loff_t_to_s64(fl->fl_end - fl->fl_start + 1); - - p = xdr_encode_hyper(p, start); - p = xdr_encode_hyper(p, len); - - return p; -} - /* * Encode result of a TEST/TEST_MSG call */ @@ -379,211 +332,3 @@ nlm4svc_encode_void(struct svc_rqst *rqstp, __be32 *p, void *dummy) { return xdr_ressize_check(rqstp, p); } - -/* - * Now, the client side XDR functions - */ -#ifdef NLMCLNT_SUPPORT_SHARES -static int -nlm4clt_decode_void(struct rpc_rqst *req, __be32 *p, void *ptr) -{ - return 0; -} -#endif - -static int -nlm4clt_encode_testargs(struct rpc_rqst *req, __be32 *p, nlm_args *argp) -{ - struct nlm_lock *lock = &argp->lock; - - if (!(p = nlm4_encode_cookie(p, &argp->cookie))) - return -EIO; - *p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero; - if (!(p = nlm4_encode_lock(p, lock))) - return -EIO; - req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); - return 0; -} - -static int -nlm4clt_decode_testres(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp) -{ - if (!(p = nlm4_decode_cookie(p, &resp->cookie))) - return -EIO; - resp->status = *p++; - if (resp->status == nlm_lck_denied) { - struct file_lock *fl = &resp->lock.fl; - u32 excl; - __u64 start, len; - __s64 end; - - memset(&resp->lock, 0, sizeof(resp->lock)); - locks_init_lock(fl); - excl = ntohl(*p++); - resp->lock.svid = ntohl(*p++); - fl->fl_pid = (pid_t)resp->lock.svid; - if (!(p = nlm4_decode_oh(p, &resp->lock.oh))) - return -EIO; - - fl->fl_flags = FL_POSIX; - fl->fl_type = excl? F_WRLCK : F_RDLCK; - p = xdr_decode_hyper(p, &start); - p = xdr_decode_hyper(p, &len); - end = start + len - 1; - - fl->fl_start = s64_to_loff_t(start); - if (len == 0 || end < 0) - fl->fl_end = OFFSET_MAX; - else - fl->fl_end = s64_to_loff_t(end); - } - return 0; -} - - -static int -nlm4clt_encode_lockargs(struct rpc_rqst *req, __be32 *p, nlm_args *argp) -{ - struct nlm_lock *lock = &argp->lock; - - if (!(p = nlm4_encode_cookie(p, &argp->cookie))) - return -EIO; - *p++ = argp->block? xdr_one : xdr_zero; - *p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero; - if (!(p = nlm4_encode_lock(p, lock))) - return -EIO; - *p++ = argp->reclaim? xdr_one : xdr_zero; - *p++ = htonl(argp->state); - req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); - return 0; -} - -static int -nlm4clt_encode_cancargs(struct rpc_rqst *req, __be32 *p, nlm_args *argp) -{ - struct nlm_lock *lock = &argp->lock; - - if (!(p = nlm4_encode_cookie(p, &argp->cookie))) - return -EIO; - *p++ = argp->block? xdr_one : xdr_zero; - *p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero; - if (!(p = nlm4_encode_lock(p, lock))) - return -EIO; - req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); - return 0; -} - -static int -nlm4clt_encode_unlockargs(struct rpc_rqst *req, __be32 *p, nlm_args *argp) -{ - struct nlm_lock *lock = &argp->lock; - - if (!(p = nlm4_encode_cookie(p, &argp->cookie))) - return -EIO; - if (!(p = nlm4_encode_lock(p, lock))) - return -EIO; - req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); - return 0; -} - -static int -nlm4clt_encode_res(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp) -{ - if (!(p = nlm4_encode_cookie(p, &resp->cookie))) - return -EIO; - *p++ = resp->status; - req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); - return 0; -} - -static int -nlm4clt_encode_testres(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp) -{ - if (!(p = nlm4_encode_testres(p, resp))) - return -EIO; - req->rq_slen = xdr_adjust_iovec(req->rq_svec, p); - return 0; -} - -static int -nlm4clt_decode_res(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp) -{ - if (!(p = nlm4_decode_cookie(p, &resp->cookie))) - return -EIO; - resp->status = *p++; - return 0; -} - -#if (NLMCLNT_OHSIZE > XDR_MAX_NETOBJ) -# error "NLM host name cannot be larger than XDR_MAX_NETOBJ!" -#endif - -#if (NLMCLNT_OHSIZE > NLM_MAXSTRLEN) -# error "NLM host name cannot be larger than NLM's maximum string length!" -#endif - -/* - * Buffer requirements for NLM - */ -#define NLM4_void_sz 0 -#define NLM4_cookie_sz 1+XDR_QUADLEN(NLM_MAXCOOKIELEN) -#define NLM4_caller_sz 1+XDR_QUADLEN(NLMCLNT_OHSIZE) -#define NLM4_owner_sz 1+XDR_QUADLEN(NLMCLNT_OHSIZE) -#define NLM4_fhandle_sz 1+XDR_QUADLEN(NFS3_FHSIZE) -#define NLM4_lock_sz 5+NLM4_caller_sz+NLM4_owner_sz+NLM4_fhandle_sz -#define NLM4_holder_sz 6+NLM4_owner_sz - -#define NLM4_testargs_sz NLM4_cookie_sz+1+NLM4_lock_sz -#define NLM4_lockargs_sz NLM4_cookie_sz+4+NLM4_lock_sz -#define NLM4_cancargs_sz NLM4_cookie_sz+2+NLM4_lock_sz -#define NLM4_unlockargs_sz NLM4_cookie_sz+NLM4_lock_sz - -#define NLM4_testres_sz NLM4_cookie_sz+1+NLM4_holder_sz -#define NLM4_res_sz NLM4_cookie_sz+1 -#define NLM4_norep_sz 0 - -/* - * For NLM, a void procedure really returns nothing - */ -#define nlm4clt_decode_norep NULL - -#define PROC(proc, argtype, restype) \ -[NLMPROC_##proc] = { \ - .p_proc = NLMPROC_##proc, \ - .p_encode = (kxdrproc_t) nlm4clt_encode_##argtype, \ - .p_decode = (kxdrproc_t) nlm4clt_decode_##restype, \ - .p_arglen = NLM4_##argtype##_sz, \ - .p_replen = NLM4_##restype##_sz, \ - .p_statidx = NLMPROC_##proc, \ - .p_name = #proc, \ - } - -static struct rpc_procinfo nlm4_procedures[] = { - PROC(TEST, testargs, testres), - PROC(LOCK, lockargs, res), - PROC(CANCEL, cancargs, res), - PROC(UNLOCK, unlockargs, res), - PROC(GRANTED, testargs, res), - PROC(TEST_MSG, testargs, norep), - PROC(LOCK_MSG, lockargs, norep), - PROC(CANCEL_MSG, cancargs, norep), - PROC(UNLOCK_MSG, unlockargs, norep), - PROC(GRANTED_MSG, testargs, norep), - PROC(TEST_RES, testres, norep), - PROC(LOCK_RES, res, norep), - PROC(CANCEL_RES, res, norep), - PROC(UNLOCK_RES, res, norep), - PROC(GRANTED_RES, res, norep), -#ifdef NLMCLNT_SUPPORT_SHARES - PROC(SHARE, shareargs, shareres), - PROC(UNSHARE, shareargs, shareres), - PROC(NM_LOCK, lockargs, res), - PROC(FREE_ALL, notify, void), -#endif -}; - -struct rpc_version nlm_version4 = { - .number = 4, - .nrprocs = 24, - .procs = nlm4_procedures, -}; -- cgit v1.2.2 From a033db487eec09afde00a3562842982a8053c887 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 14 Dec 2010 14:57:22 +0000 Subject: NFSD: Update XDR encoders in NFSv4 callback client Clean up. Remove old-style NFSv4 XDR macros in favor of the style now used in fs/nfs/nfs4xdr.c. These were forgotten during the recent nfs4xdr.c rewrite. Signed-off-by: Chuck Lever Tested-by: J. Bruce Fields Signed-off-by: Trond Myklebust --- fs/nfsd/nfs4callback.c | 255 ++++++++++++++++++++++++++++++++++--------------- 1 file changed, 178 insertions(+), 77 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index 143da2eecd7b..d8148cc461e7 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -50,11 +50,6 @@ enum { NFSPROC4_CLNT_CB_SEQUENCE, }; -enum nfs_cb_opnum4 { - OP_CB_RECALL = 4, - OP_CB_SEQUENCE = 11, -}; - #define NFS4_MAXTAGLEN 20 #define NFS4_enc_cb_null_sz 0 @@ -79,30 +74,6 @@ enum nfs_cb_opnum4 { cb_sequence_dec_sz + \ op_dec_sz) -/* -* Generic encode routines from fs/nfs/nfs4xdr.c -*/ -static inline __be32 * -xdr_writemem(__be32 *p, const void *ptr, int nbytes) -{ - int tmp = XDR_QUADLEN(nbytes); - if (!tmp) - return p; - p[tmp-1] = 0; - memcpy(p, ptr, nbytes); - return p + tmp; -} - -#define WRITE32(n) *p++ = htonl(n) -#define WRITEMEM(ptr,nbytes) do { \ - p = xdr_writemem(p, ptr, nbytes); \ -} while (0) -#define RESERVE_SPACE(nbytes) do { \ - p = xdr_reserve_space(xdr, nbytes); \ - if (!p) dprintk("NFSD: RESERVE_SPACE(%d) failed in function %s\n", (int) (nbytes), __func__); \ - BUG_ON(!p); \ -} while (0) - /* * Generic decode routines from fs/nfs/nfs4xdr.c */ @@ -197,102 +168,232 @@ nfs_cb_stat_to_errno(int stat) return stat; } +static __be32 *xdr_encode_empty_array(__be32 *p) +{ + *p++ = xdr_zero; + return p; +} + +/* + * Encode/decode NFSv4 CB basic data types + * + * Basic NFSv4 callback data types are defined in section 15 of RFC + * 3530: "Network File System (NFS) version 4 Protocol" and section + * 20 of RFC 5661: "Network File System (NFS) Version 4 Minor Version + * 1 Protocol" + */ + +/* + * nfs_cb_opnum4 + * + * enum nfs_cb_opnum4 { + * OP_CB_GETATTR = 3, + * ... + * }; + */ +enum nfs_cb_opnum4 { + OP_CB_GETATTR = 3, + OP_CB_RECALL = 4, + OP_CB_LAYOUTRECALL = 5, + OP_CB_NOTIFY = 6, + OP_CB_PUSH_DELEG = 7, + OP_CB_RECALL_ANY = 8, + OP_CB_RECALLABLE_OBJ_AVAIL = 9, + OP_CB_RECALL_SLOT = 10, + OP_CB_SEQUENCE = 11, + OP_CB_WANTS_CANCELLED = 12, + OP_CB_NOTIFY_LOCK = 13, + OP_CB_NOTIFY_DEVICEID = 14, + OP_CB_ILLEGAL = 10044 +}; + +static void encode_nfs_cb_opnum4(struct xdr_stream *xdr, enum nfs_cb_opnum4 op) +{ + __be32 *p; + + p = xdr_reserve_space(xdr, 4); + *p = cpu_to_be32(op); +} + +/* + * nfs_fh4 + * + * typedef opaque nfs_fh4; + */ +static void encode_nfs_fh4(struct xdr_stream *xdr, const struct knfsd_fh *fh) +{ + u32 length = fh->fh_size; + __be32 *p; + + BUG_ON(length > NFS4_FHSIZE); + p = xdr_reserve_space(xdr, 4 + length); + xdr_encode_opaque(p, &fh->fh_base, length); +} + /* - * XDR encode + * stateid4 + * + * struct stateid4 { + * uint32_t seqid; + * opaque other[12]; + * }; */ +static void encode_stateid4(struct xdr_stream *xdr, const stateid_t *sid) +{ + __be32 *p; + + p = xdr_reserve_space(xdr, NFS4_STATEID_SIZE); + *p++ = cpu_to_be32(sid->si_generation); + xdr_encode_opaque_fixed(p, &sid->si_opaque, NFS4_STATEID_OTHER_SIZE); +} -static void -encode_stateid(struct xdr_stream *xdr, stateid_t *sid) +/* + * sessionid4 + * + * typedef opaque sessionid4[NFS4_SESSIONID_SIZE]; + */ +static void encode_sessionid4(struct xdr_stream *xdr, + const struct nfsd4_session *session) { __be32 *p; - RESERVE_SPACE(sizeof(stateid_t)); - WRITE32(sid->si_generation); - WRITEMEM(&sid->si_opaque, sizeof(stateid_opaque_t)); + p = xdr_reserve_space(xdr, NFS4_MAX_SESSIONID_LEN); + xdr_encode_opaque_fixed(p, session->se_sessionid.data, + NFS4_MAX_SESSIONID_LEN); } -static void -encode_cb_compound_hdr(struct xdr_stream *xdr, struct nfs4_cb_compound_hdr *hdr) +/* + * CB_COMPOUND4args + * + * struct CB_COMPOUND4args { + * utf8str_cs tag; + * uint32_t minorversion; + * uint32_t callback_ident; + * nfs_cb_argop4 argarray<>; + * }; +*/ +static void encode_cb_compound4args(struct xdr_stream *xdr, + struct nfs4_cb_compound_hdr *hdr) { __be32 * p; - RESERVE_SPACE(16); - WRITE32(0); /* tag length is always 0 */ - WRITE32(hdr->minorversion); - WRITE32(hdr->ident); + p = xdr_reserve_space(xdr, 4 + 4 + 4 + 4); + p = xdr_encode_empty_array(p); /* empty tag */ + *p++ = cpu_to_be32(hdr->minorversion); + *p++ = cpu_to_be32(hdr->ident); + hdr->nops_p = p; - WRITE32(hdr->nops); + *p = cpu_to_be32(hdr->nops); /* argarray element count */ } +/* + * Update argarray element count + */ static void encode_cb_nops(struct nfs4_cb_compound_hdr *hdr) { - *hdr->nops_p = htonl(hdr->nops); + BUG_ON(hdr->nops > NFS4_MAX_BACK_CHANNEL_OPS); + *hdr->nops_p = cpu_to_be32(hdr->nops); } -static void -encode_cb_recall(struct xdr_stream *xdr, struct nfs4_delegation *dp, - struct nfs4_cb_compound_hdr *hdr) +/* + * CB_RECALL4args + * + * struct CB_RECALL4args { + * stateid4 stateid; + * bool truncate; + * nfs_fh4 fh; + * }; + */ +static void encode_cb_recall4args(struct xdr_stream *xdr, + const struct nfs4_delegation *dp, + struct nfs4_cb_compound_hdr *hdr) { __be32 *p; - int len = dp->dl_fh.fh_size; - - RESERVE_SPACE(4); - WRITE32(OP_CB_RECALL); - encode_stateid(xdr, &dp->dl_stateid); - RESERVE_SPACE(8 + (XDR_QUADLEN(len) << 2)); - WRITE32(0); /* truncate optimization not implemented */ - WRITE32(len); - WRITEMEM(&dp->dl_fh.fh_base, len); + + encode_nfs_cb_opnum4(xdr, OP_CB_RECALL); + encode_stateid4(xdr, &dp->dl_stateid); + + p = xdr_reserve_space(xdr, 4); + *p++ = xdr_zero; /* truncate */ + + encode_nfs_fh4(xdr, &dp->dl_fh); + hdr->nops++; } -static void -encode_cb_sequence(struct xdr_stream *xdr, struct nfsd4_callback *cb, - struct nfs4_cb_compound_hdr *hdr) +/* + * CB_SEQUENCE4args + * + * struct CB_SEQUENCE4args { + * sessionid4 csa_sessionid; + * sequenceid4 csa_sequenceid; + * slotid4 csa_slotid; + * slotid4 csa_highest_slotid; + * bool csa_cachethis; + * referring_call_list4 csa_referring_call_lists<>; + * }; + */ +static void encode_cb_sequence4args(struct xdr_stream *xdr, + const struct nfsd4_callback *cb, + struct nfs4_cb_compound_hdr *hdr) { + struct nfsd4_session *session = cb->cb_clp->cl_cb_session; __be32 *p; - struct nfsd4_session *ses = cb->cb_clp->cl_cb_session; if (hdr->minorversion == 0) return; - RESERVE_SPACE(1 + NFS4_MAX_SESSIONID_LEN + 20); + encode_nfs_cb_opnum4(xdr, OP_CB_SEQUENCE); + encode_sessionid4(xdr, session); + + p = xdr_reserve_space(xdr, 4 + 4 + 4 + 4 + 4); + *p++ = cpu_to_be32(session->se_cb_seq_nr); /* csa_sequenceid */ + *p++ = xdr_zero; /* csa_slotid */ + *p++ = xdr_zero; /* csa_highest_slotid */ + *p++ = xdr_zero; /* csa_cachethis */ + xdr_encode_empty_array(p); /* csa_referring_call_lists */ - WRITE32(OP_CB_SEQUENCE); - WRITEMEM(ses->se_sessionid.data, NFS4_MAX_SESSIONID_LEN); - WRITE32(ses->se_cb_seq_nr); - WRITE32(0); /* slotid, always 0 */ - WRITE32(0); /* highest slotid always 0 */ - WRITE32(0); /* cachethis always 0 */ - WRITE32(0); /* FIXME: support referring_call_lists */ hdr->nops++; } -static int -nfs4_xdr_enc_cb_null(struct rpc_rqst *req, __be32 *p) +/* + * NFSv4.0 and NFSv4.1 XDR encode functions + * + * NFSv4.0 callback argument types are defined in section 15 of RFC + * 3530: "Network File System (NFS) version 4 Protocol" and section 20 + * of RFC 5661: "Network File System (NFS) Version 4 Minor Version 1 + * Protocol". + */ + +/* + * NB: Without this zero space reservation, callbacks over krb5p fail + */ +static int nfs4_xdr_enc_cb_null(struct rpc_rqst *req, __be32 *p, void *__unused) { struct xdr_stream xdrs, *xdr = &xdrs; xdr_init_encode(&xdrs, &req->rq_snd_buf, p); - RESERVE_SPACE(0); + xdr_reserve_space(xdr, 0); return 0; } -static int -nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, __be32 *p, - struct nfsd4_callback *cb) +/* + * 20.2. Operation 4: CB_RECALL - Recall a Delegation + */ +static int nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, __be32 *p, + const struct nfsd4_callback *cb) { struct xdr_stream xdr; - struct nfs4_delegation *args = cb->cb_op; + const struct nfs4_delegation *args = cb->cb_op; struct nfs4_cb_compound_hdr hdr = { .ident = cb->cb_clp->cl_cb_ident, .minorversion = cb->cb_minorversion, }; xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_cb_compound_hdr(&xdr, &hdr); - encode_cb_sequence(&xdr, cb, &hdr); - encode_cb_recall(&xdr, args, &hdr); + encode_cb_compound4args(&xdr, &hdr); + encode_cb_sequence4args(&xdr, cb, &hdr); + encode_cb_recall4args(&xdr, args, &hdr); encode_cb_nops(&hdr); return 0; } -- cgit v1.2.2 From 85a56480191ca9f08fc775c129b9eb5c8c1f2c05 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 14 Dec 2010 14:57:32 +0000 Subject: NFSD: Update XDR decoders in NFSv4 callback client Clean up. Remove old-style NFSv4 XDR macros in favor of the style now used in fs/nfs/nfs4xdr.c. These were forgotten during the recent nfs4xdr.c rewrite. Additional whitespace cleanup adds to the size of this patch. Signed-off-by: Chuck Lever Tested-by: J. Bruce Fields Signed-off-by: Trond Myklebust --- fs/nfsd/nfs4callback.c | 415 ++++++++++++++++++++++++++++--------------------- 1 file changed, 239 insertions(+), 176 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index d8148cc461e7..c3c6a903144c 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -74,37 +74,6 @@ enum { cb_sequence_dec_sz + \ op_dec_sz) -/* - * Generic decode routines from fs/nfs/nfs4xdr.c - */ -#define DECODE_TAIL \ - status = 0; \ -out: \ - return status; \ -xdr_error: \ - dprintk("NFSD: xdr error! (%s:%d)\n", __FILE__, __LINE__); \ - status = -EIO; \ - goto out - -#define READ32(x) (x) = ntohl(*p++) -#define READ64(x) do { \ - (x) = (u64)ntohl(*p++) << 32; \ - (x) |= ntohl(*p++); \ -} while (0) -#define READTIME(x) do { \ - p++; \ - (x.tv_sec) = ntohl(*p++); \ - (x.tv_nsec) = ntohl(*p++); \ -} while (0) -#define READ_BUF(nbytes) do { \ - p = xdr_inline_decode(xdr, nbytes); \ - if (!p) { \ - dprintk("NFSD: %s: reply buffer overflowed in line %d.\n", \ - __func__, __LINE__); \ - return -EIO; \ - } \ -} while (0) - struct nfs4_cb_compound_hdr { /* args */ u32 ident; /* minorversion 0 only */ @@ -115,57 +84,14 @@ struct nfs4_cb_compound_hdr { int status; }; -static struct { -int stat; -int errno; -} nfs_cb_errtbl[] = { - { NFS4_OK, 0 }, - { NFS4ERR_PERM, EPERM }, - { NFS4ERR_NOENT, ENOENT }, - { NFS4ERR_IO, EIO }, - { NFS4ERR_NXIO, ENXIO }, - { NFS4ERR_ACCESS, EACCES }, - { NFS4ERR_EXIST, EEXIST }, - { NFS4ERR_XDEV, EXDEV }, - { NFS4ERR_NOTDIR, ENOTDIR }, - { NFS4ERR_ISDIR, EISDIR }, - { NFS4ERR_INVAL, EINVAL }, - { NFS4ERR_FBIG, EFBIG }, - { NFS4ERR_NOSPC, ENOSPC }, - { NFS4ERR_ROFS, EROFS }, - { NFS4ERR_MLINK, EMLINK }, - { NFS4ERR_NAMETOOLONG, ENAMETOOLONG }, - { NFS4ERR_NOTEMPTY, ENOTEMPTY }, - { NFS4ERR_DQUOT, EDQUOT }, - { NFS4ERR_STALE, ESTALE }, - { NFS4ERR_BADHANDLE, EBADHANDLE }, - { NFS4ERR_BAD_COOKIE, EBADCOOKIE }, - { NFS4ERR_NOTSUPP, ENOTSUPP }, - { NFS4ERR_TOOSMALL, ETOOSMALL }, - { NFS4ERR_SERVERFAULT, ESERVERFAULT }, - { NFS4ERR_BADTYPE, EBADTYPE }, - { NFS4ERR_LOCKED, EAGAIN }, - { NFS4ERR_RESOURCE, EREMOTEIO }, - { NFS4ERR_SYMLINK, ELOOP }, - { NFS4ERR_OP_ILLEGAL, EOPNOTSUPP }, - { NFS4ERR_DEADLOCK, EDEADLK }, - { -1, EIO } -}; - -static int -nfs_cb_stat_to_errno(int stat) +/* + * Handle decode buffer overflows out-of-line. + */ +static void print_overflow_msg(const char *func, const struct xdr_stream *xdr) { - int i; - for (i = 0; nfs_cb_errtbl[i].stat != -1; i++) { - if (nfs_cb_errtbl[i].stat == stat) - return nfs_cb_errtbl[i].errno; - } - /* If we cannot translate the error, the recovery routines should - * handle it. - * Note: remaining NFSv4 error codes have values > 10000, so should - * not conflict with native Linux error codes. - */ - return stat; + dprintk("NFS: %s prematurely hit the end of our receive buffer. " + "Remaining buffer length is %tu words.\n", + func, xdr->end - xdr->p); } static __be32 *xdr_encode_empty_array(__be32 *p) @@ -262,6 +188,89 @@ static void encode_sessionid4(struct xdr_stream *xdr, NFS4_MAX_SESSIONID_LEN); } +/* + * nfsstat4 + */ +static const struct { + int stat; + int errno; +} nfs_cb_errtbl[] = { + { NFS4_OK, 0 }, + { NFS4ERR_PERM, -EPERM }, + { NFS4ERR_NOENT, -ENOENT }, + { NFS4ERR_IO, -EIO }, + { NFS4ERR_NXIO, -ENXIO }, + { NFS4ERR_ACCESS, -EACCES }, + { NFS4ERR_EXIST, -EEXIST }, + { NFS4ERR_XDEV, -EXDEV }, + { NFS4ERR_NOTDIR, -ENOTDIR }, + { NFS4ERR_ISDIR, -EISDIR }, + { NFS4ERR_INVAL, -EINVAL }, + { NFS4ERR_FBIG, -EFBIG }, + { NFS4ERR_NOSPC, -ENOSPC }, + { NFS4ERR_ROFS, -EROFS }, + { NFS4ERR_MLINK, -EMLINK }, + { NFS4ERR_NAMETOOLONG, -ENAMETOOLONG }, + { NFS4ERR_NOTEMPTY, -ENOTEMPTY }, + { NFS4ERR_DQUOT, -EDQUOT }, + { NFS4ERR_STALE, -ESTALE }, + { NFS4ERR_BADHANDLE, -EBADHANDLE }, + { NFS4ERR_BAD_COOKIE, -EBADCOOKIE }, + { NFS4ERR_NOTSUPP, -ENOTSUPP }, + { NFS4ERR_TOOSMALL, -ETOOSMALL }, + { NFS4ERR_SERVERFAULT, -ESERVERFAULT }, + { NFS4ERR_BADTYPE, -EBADTYPE }, + { NFS4ERR_LOCKED, -EAGAIN }, + { NFS4ERR_RESOURCE, -EREMOTEIO }, + { NFS4ERR_SYMLINK, -ELOOP }, + { NFS4ERR_OP_ILLEGAL, -EOPNOTSUPP }, + { NFS4ERR_DEADLOCK, -EDEADLK }, + { -1, -EIO } +}; + +/* + * If we cannot translate the error, the recovery routines should + * handle it. + * + * Note: remaining NFSv4 error codes have values > 10000, so should + * not conflict with native Linux error codes. + */ +static int nfs_cb_stat_to_errno(int status) +{ + int i; + + for (i = 0; nfs_cb_errtbl[i].stat != -1; i++) { + if (nfs_cb_errtbl[i].stat == status) + return nfs_cb_errtbl[i].errno; + } + + dprintk("NFSD: Unrecognized NFS CB status value: %u\n", status); + return -status; +} + +static int decode_cb_op_status(struct xdr_stream *xdr, enum nfs_opnum4 expected, + enum nfsstat4 *status) +{ + __be32 *p; + u32 op; + + p = xdr_inline_decode(xdr, 4 + 4); + if (unlikely(p == NULL)) + goto out_overflow; + op = be32_to_cpup(p++); + if (unlikely(op != expected)) + goto out_unexpected; + *status = be32_to_cpup(p); + return 0; +out_overflow: + print_overflow_msg(__func__, xdr); + return -EIO; +out_unexpected: + dprintk("NFSD: Callback server returned operation %d but " + "we issued a request for %d\n", op, expected); + return -EIO; +} + /* * CB_COMPOUND4args * @@ -295,6 +304,37 @@ static void encode_cb_nops(struct nfs4_cb_compound_hdr *hdr) *hdr->nops_p = cpu_to_be32(hdr->nops); } +/* + * CB_COMPOUND4res + * + * struct CB_COMPOUND4res { + * nfsstat4 status; + * utf8str_cs tag; + * nfs_cb_resop4 resarray<>; + * }; + */ +static int decode_cb_compound4res(struct xdr_stream *xdr, + struct nfs4_cb_compound_hdr *hdr) +{ + u32 length; + __be32 *p; + + p = xdr_inline_decode(xdr, 4 + 4); + if (unlikely(p == NULL)) + goto out_overflow; + hdr->status = be32_to_cpup(p++); + /* Ignore the tag */ + length = be32_to_cpup(p++); + p = xdr_inline_decode(xdr, length + 4); + if (unlikely(p == NULL)) + goto out_overflow; + hdr->nops = be32_to_cpup(p); + return 0; +out_overflow: + print_overflow_msg(__func__, xdr); + return -EIO; +} + /* * CB_RECALL4args * @@ -356,6 +396,97 @@ static void encode_cb_sequence4args(struct xdr_stream *xdr, hdr->nops++; } +/* + * CB_SEQUENCE4resok + * + * struct CB_SEQUENCE4resok { + * sessionid4 csr_sessionid; + * sequenceid4 csr_sequenceid; + * slotid4 csr_slotid; + * slotid4 csr_highest_slotid; + * slotid4 csr_target_highest_slotid; + * }; + * + * union CB_SEQUENCE4res switch (nfsstat4 csr_status) { + * case NFS4_OK: + * CB_SEQUENCE4resok csr_resok4; + * default: + * void; + * }; + * + * Our current back channel implmentation supports a single backchannel + * with a single slot. + */ +static int decode_cb_sequence4resok(struct xdr_stream *xdr, + struct nfsd4_callback *cb) +{ + struct nfsd4_session *session = cb->cb_clp->cl_cb_session; + struct nfs4_sessionid id; + int status; + __be32 *p; + u32 dummy; + + status = -ESERVERFAULT; + + /* + * If the server returns different values for sessionID, slotID or + * sequence number, the server is looney tunes. + */ + p = xdr_inline_decode(xdr, NFS4_MAX_SESSIONID_LEN + 4 + 4); + if (unlikely(p == NULL)) + goto out_overflow; + memcpy(id.data, p, NFS4_MAX_SESSIONID_LEN); + if (memcmp(id.data, session->se_sessionid.data, + NFS4_MAX_SESSIONID_LEN) != 0) { + dprintk("NFS: %s Invalid session id\n", __func__); + goto out; + } + p += XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN); + + dummy = be32_to_cpup(p++); + if (dummy != session->se_cb_seq_nr) { + dprintk("NFS: %s Invalid sequence number\n", __func__); + goto out; + } + + dummy = be32_to_cpup(p++); + if (dummy != 0) { + dprintk("NFS: %s Invalid slotid\n", __func__); + goto out; + } + + /* + * FIXME: process highest slotid and target highest slotid + */ + status = 0; +out: + return status; +out_overflow: + print_overflow_msg(__func__, xdr); + return -EIO; +} + +static int decode_cb_sequence4res(struct xdr_stream *xdr, + struct nfsd4_callback *cb) +{ + enum nfsstat4 nfserr; + int status; + + if (cb->cb_minorversion == 0) + return 0; + + status = decode_cb_op_status(xdr, OP_CB_SEQUENCE, &nfserr); + if (unlikely(status)) + goto out; + if (unlikely(nfserr != NFS4_OK)) + goto out_default; + status = decode_cb_sequence4resok(xdr, cb); +out: + return status; +out_default: + return nfs_cb_stat_to_errno(status); +} + /* * NFSv4.0 and NFSv4.1 XDR encode functions * @@ -399,119 +530,51 @@ static int nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, __be32 *p, } -static int -decode_cb_compound_hdr(struct xdr_stream *xdr, struct nfs4_cb_compound_hdr *hdr){ - __be32 *p; - u32 taglen; - - READ_BUF(8); - READ32(hdr->status); - /* We've got no use for the tag; ignore it: */ - READ32(taglen); - READ_BUF(taglen + 4); - p += XDR_QUADLEN(taglen); - READ32(hdr->nops); - return 0; -} - -static int -decode_cb_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected) -{ - __be32 *p; - u32 op; - int32_t nfserr; - - READ_BUF(8); - READ32(op); - if (op != expected) { - dprintk("NFSD: decode_cb_op_hdr: Callback server returned " - " operation %d but we issued a request for %d\n", - op, expected); - return -EIO; - } - READ32(nfserr); - if (nfserr != NFS_OK) - return -nfs_cb_stat_to_errno(nfserr); - return 0; -} - /* - * Our current back channel implmentation supports a single backchannel - * with a single slot. + * NFSv4.0 and NFSv4.1 XDR decode functions + * + * NFSv4.0 callback result types are defined in section 15 of RFC + * 3530: "Network File System (NFS) version 4 Protocol" and section 20 + * of RFC 5661: "Network File System (NFS) Version 4 Minor Version 1 + * Protocol". */ -static int -decode_cb_sequence(struct xdr_stream *xdr, struct nfsd4_callback *cb, - struct rpc_rqst *rqstp) -{ - struct nfsd4_session *ses = cb->cb_clp->cl_cb_session; - struct nfs4_sessionid id; - int status; - u32 dummy; - __be32 *p; - - if (cb->cb_minorversion == 0) - return 0; - - status = decode_cb_op_hdr(xdr, OP_CB_SEQUENCE); - if (status) - return status; - - /* - * If the server returns different values for sessionID, slotID or - * sequence number, the server is looney tunes. - */ - status = -ESERVERFAULT; - - READ_BUF(NFS4_MAX_SESSIONID_LEN + 16); - memcpy(id.data, p, NFS4_MAX_SESSIONID_LEN); - p += XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN); - if (memcmp(id.data, ses->se_sessionid.data, NFS4_MAX_SESSIONID_LEN)) { - dprintk("%s Invalid session id\n", __func__); - goto out; - } - READ32(dummy); - if (dummy != ses->se_cb_seq_nr) { - dprintk("%s Invalid sequence number\n", __func__); - goto out; - } - READ32(dummy); /* slotid must be 0 */ - if (dummy != 0) { - dprintk("%s Invalid slotid\n", __func__); - goto out; - } - /* FIXME: process highest slotid and target highest slotid */ - status = 0; -out: - return status; -} - -static int -nfs4_xdr_dec_cb_null(struct rpc_rqst *req, __be32 *p) +static int nfs4_xdr_dec_cb_null(struct rpc_rqst *req, __be32 *p, void *__unused) { return 0; } -static int -nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp, __be32 *p, - struct nfsd4_callback *cb) +/* + * 20.2. Operation 4: CB_RECALL - Recall a Delegation + */ +static int nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp, __be32 *p, + struct nfsd4_callback *cb) { struct xdr_stream xdr; struct nfs4_cb_compound_hdr hdr; + enum nfsstat4 nfserr; int status; xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); - status = decode_cb_compound_hdr(&xdr, &hdr); - if (status) + status = decode_cb_compound4res(&xdr, &hdr); + if (unlikely(status)) goto out; - if (cb) { - status = decode_cb_sequence(&xdr, cb, rqstp); - if (status) + + if (cb != NULL) { + status = decode_cb_sequence4res(&xdr, cb); + if (unlikely(status)) goto out; } - status = decode_cb_op_hdr(&xdr, OP_CB_RECALL); + + status = decode_cb_op_status(&xdr, OP_CB_RECALL, &nfserr); + if (unlikely(status)) + goto out; + if (unlikely(nfserr != NFS4_OK)) + goto out_default; out: return status; +out_default: + return nfs_cb_stat_to_errno(status); } /* -- cgit v1.2.2 From 7d93bd71cb3e2629cc88bc59f393bd4df4162b94 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 14 Dec 2010 14:57:42 +0000 Subject: NFS: Repair whitespace damage in NFS PROC macro Clean up. When I was making other changes in this area, checkscript.pl complained about the use of leading blanks in the PROC macros in the xdr files. Signed-off-by: Chuck Lever Tested-by: J. Bruce Fields Signed-off-by: Trond Myklebust --- fs/nfs/nfs2xdr.c | 30 ++++++++--------- fs/nfs/nfs4xdr.c | 90 +++++++++++++++++++++++++------------------------- fs/nfsd/nfs4callback.c | 52 ++++++++++++++--------------- 3 files changed, 86 insertions(+), 86 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c index 70df08a84ead..0343175fe6c0 100644 --- a/fs/nfs/nfs2xdr.c +++ b/fs/nfs/nfs2xdr.c @@ -1193,21 +1193,21 @@ int nfs_stat_to_errno(enum nfs_stat status) .p_name = #proc, \ } struct rpc_procinfo nfs_procedures[] = { - PROC(GETATTR, fhandle, attrstat, 1), - PROC(SETATTR, sattrargs, attrstat, 0), - PROC(LOOKUP, diropargs, diropres, 2), - PROC(READLINK, readlinkargs, readlinkres, 3), - PROC(READ, readargs, readres, 3), - PROC(WRITE, writeargs, writeres, 4), - PROC(CREATE, createargs, diropres, 0), - PROC(REMOVE, removeargs, stat, 0), - PROC(RENAME, renameargs, stat, 0), - PROC(LINK, linkargs, stat, 0), - PROC(SYMLINK, symlinkargs, stat, 0), - PROC(MKDIR, createargs, diropres, 0), - PROC(RMDIR, diropargs, stat, 0), - PROC(READDIR, readdirargs, readdirres, 3), - PROC(STATFS, fhandle, statfsres, 0), + PROC(GETATTR, fhandle, attrstat, 1), + PROC(SETATTR, sattrargs, attrstat, 0), + PROC(LOOKUP, diropargs, diropres, 2), + PROC(READLINK, readlinkargs, readlinkres, 3), + PROC(READ, readargs, readres, 3), + PROC(WRITE, writeargs, writeres, 4), + PROC(CREATE, createargs, diropres, 0), + PROC(REMOVE, removeargs, stat, 0), + PROC(RENAME, renameargs, stat, 0), + PROC(LINK, linkargs, stat, 0), + PROC(SYMLINK, symlinkargs, stat, 0), + PROC(MKDIR, createargs, diropres, 0), + PROC(RMDIR, diropargs, stat, 0), + PROC(READDIR, readdirargs, readdirres, 3), + PROC(STATFS, fhandle, statfsres, 0), }; struct rpc_version nfs_version2 = { diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 9f1826b012e6..a48a43c75111 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -6301,8 +6301,8 @@ nfs4_stat_to_errno(int stat) #define PROC(proc, argtype, restype) \ [NFSPROC4_CLNT_##proc] = { \ .p_proc = NFSPROC4_COMPOUND, \ - .p_encode = (kxdrproc_t) nfs4_xdr_##argtype, \ - .p_decode = (kxdrproc_t) nfs4_xdr_##restype, \ + .p_encode = (kxdrproc_t)nfs4_xdr_##argtype, \ + .p_decode = (kxdrproc_t)nfs4_xdr_##restype, \ .p_arglen = NFS4_##argtype##_sz, \ .p_replen = NFS4_##restype##_sz, \ .p_statidx = NFSPROC4_CLNT_##proc, \ @@ -6310,50 +6310,50 @@ nfs4_stat_to_errno(int stat) } struct rpc_procinfo nfs4_procedures[] = { - PROC(READ, enc_read, dec_read), - PROC(WRITE, enc_write, dec_write), - PROC(COMMIT, enc_commit, dec_commit), - PROC(OPEN, enc_open, dec_open), - PROC(OPEN_CONFIRM, enc_open_confirm, dec_open_confirm), - PROC(OPEN_NOATTR, enc_open_noattr, dec_open_noattr), - PROC(OPEN_DOWNGRADE, enc_open_downgrade, dec_open_downgrade), - PROC(CLOSE, enc_close, dec_close), - PROC(SETATTR, enc_setattr, dec_setattr), - PROC(FSINFO, enc_fsinfo, dec_fsinfo), - PROC(RENEW, enc_renew, dec_renew), - PROC(SETCLIENTID, enc_setclientid, dec_setclientid), - PROC(SETCLIENTID_CONFIRM, enc_setclientid_confirm, dec_setclientid_confirm), - PROC(LOCK, enc_lock, dec_lock), - PROC(LOCKT, enc_lockt, dec_lockt), - PROC(LOCKU, enc_locku, dec_locku), - PROC(ACCESS, enc_access, dec_access), - PROC(GETATTR, enc_getattr, dec_getattr), - PROC(LOOKUP, enc_lookup, dec_lookup), - PROC(LOOKUP_ROOT, enc_lookup_root, dec_lookup_root), - PROC(REMOVE, enc_remove, dec_remove), - PROC(RENAME, enc_rename, dec_rename), - PROC(LINK, enc_link, dec_link), - PROC(SYMLINK, enc_symlink, dec_symlink), - PROC(CREATE, enc_create, dec_create), - PROC(PATHCONF, enc_pathconf, dec_pathconf), - PROC(STATFS, enc_statfs, dec_statfs), - PROC(READLINK, enc_readlink, dec_readlink), - PROC(READDIR, enc_readdir, dec_readdir), - PROC(SERVER_CAPS, enc_server_caps, dec_server_caps), - PROC(DELEGRETURN, enc_delegreturn, dec_delegreturn), - PROC(GETACL, enc_getacl, dec_getacl), - PROC(SETACL, enc_setacl, dec_setacl), - PROC(FS_LOCATIONS, enc_fs_locations, dec_fs_locations), - PROC(RELEASE_LOCKOWNER, enc_release_lockowner, dec_release_lockowner), + PROC(READ, enc_read, dec_read), + PROC(WRITE, enc_write, dec_write), + PROC(COMMIT, enc_commit, dec_commit), + PROC(OPEN, enc_open, dec_open), + PROC(OPEN_CONFIRM, enc_open_confirm, dec_open_confirm), + PROC(OPEN_NOATTR, enc_open_noattr, dec_open_noattr), + PROC(OPEN_DOWNGRADE, enc_open_downgrade, dec_open_downgrade), + PROC(CLOSE, enc_close, dec_close), + PROC(SETATTR, enc_setattr, dec_setattr), + PROC(FSINFO, enc_fsinfo, dec_fsinfo), + PROC(RENEW, enc_renew, dec_renew), + PROC(SETCLIENTID, enc_setclientid, dec_setclientid), + PROC(SETCLIENTID_CONFIRM, enc_setclientid_confirm, dec_setclientid_confirm), + PROC(LOCK, enc_lock, dec_lock), + PROC(LOCKT, enc_lockt, dec_lockt), + PROC(LOCKU, enc_locku, dec_locku), + PROC(ACCESS, enc_access, dec_access), + PROC(GETATTR, enc_getattr, dec_getattr), + PROC(LOOKUP, enc_lookup, dec_lookup), + PROC(LOOKUP_ROOT, enc_lookup_root, dec_lookup_root), + PROC(REMOVE, enc_remove, dec_remove), + PROC(RENAME, enc_rename, dec_rename), + PROC(LINK, enc_link, dec_link), + PROC(SYMLINK, enc_symlink, dec_symlink), + PROC(CREATE, enc_create, dec_create), + PROC(PATHCONF, enc_pathconf, dec_pathconf), + PROC(STATFS, enc_statfs, dec_statfs), + PROC(READLINK, enc_readlink, dec_readlink), + PROC(READDIR, enc_readdir, dec_readdir), + PROC(SERVER_CAPS, enc_server_caps, dec_server_caps), + PROC(DELEGRETURN, enc_delegreturn, dec_delegreturn), + PROC(GETACL, enc_getacl, dec_getacl), + PROC(SETACL, enc_setacl, dec_setacl), + PROC(FS_LOCATIONS, enc_fs_locations, dec_fs_locations), + PROC(RELEASE_LOCKOWNER, enc_release_lockowner, dec_release_lockowner), #if defined(CONFIG_NFS_V4_1) - PROC(EXCHANGE_ID, enc_exchange_id, dec_exchange_id), - PROC(CREATE_SESSION, enc_create_session, dec_create_session), - PROC(DESTROY_SESSION, enc_destroy_session, dec_destroy_session), - PROC(SEQUENCE, enc_sequence, dec_sequence), - PROC(GET_LEASE_TIME, enc_get_lease_time, dec_get_lease_time), - PROC(RECLAIM_COMPLETE, enc_reclaim_complete, dec_reclaim_complete), - PROC(GETDEVICEINFO, enc_getdeviceinfo, dec_getdeviceinfo), - PROC(LAYOUTGET, enc_layoutget, dec_layoutget), + PROC(EXCHANGE_ID, enc_exchange_id, dec_exchange_id), + PROC(CREATE_SESSION, enc_create_session, dec_create_session), + PROC(DESTROY_SESSION, enc_destroy_session, dec_destroy_session), + PROC(SEQUENCE, enc_sequence, dec_sequence), + PROC(GET_LEASE_TIME, enc_get_lease_time, dec_get_lease_time), + PROC(RECLAIM_COMPLETE, enc_reclaim_complete, dec_reclaim_complete), + PROC(GETDEVICEINFO, enc_getdeviceinfo, dec_getdeviceinfo), + PROC(LAYOUTGET, enc_layoutget, dec_layoutget), #endif /* CONFIG_NFS_V4_1 */ }; diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index c3c6a903144c..6529534d7aae 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -580,23 +580,23 @@ out_default: /* * RPC procedure tables */ -#define PROC(proc, call, argtype, restype) \ -[NFSPROC4_CLNT_##proc] = { \ - .p_proc = NFSPROC4_CB_##call, \ - .p_encode = (kxdrproc_t) nfs4_xdr_##argtype, \ - .p_decode = (kxdrproc_t) nfs4_xdr_##restype, \ - .p_arglen = NFS4_##argtype##_sz, \ - .p_replen = NFS4_##restype##_sz, \ - .p_statidx = NFSPROC4_CB_##call, \ - .p_name = #proc, \ -} - -static struct rpc_procinfo nfs4_cb_procedures[] = { - PROC(CB_NULL, NULL, enc_cb_null, dec_cb_null), - PROC(CB_RECALL, COMPOUND, enc_cb_recall, dec_cb_recall), +#define PROC(proc, call, argtype, restype) \ +[NFSPROC4_CLNT_##proc] = { \ + .p_proc = NFSPROC4_CB_##call, \ + .p_encode = (kxdrproc_t)nfs4_xdr_enc_##argtype, \ + .p_decode = (kxdrproc_t)nfs4_xdr_dec_##restype, \ + .p_arglen = NFS4_enc_##argtype##_sz, \ + .p_replen = NFS4_dec_##restype##_sz, \ + .p_statidx = NFSPROC4_CB_##call, \ + .p_name = #proc, \ +} + +static struct rpc_procinfo nfs4_cb_procedures[] = { + PROC(CB_NULL, NULL, cb_null, cb_null), + PROC(CB_RECALL, COMPOUND, cb_recall, cb_recall), }; -static struct rpc_version nfs_cb_version4 = { +static struct rpc_version nfs_cb_version4 = { /* * Note on the callback rpc program version number: despite language in rfc * 5661 section 18.36.3 requiring servers to use 4 in this field, the @@ -604,29 +604,29 @@ static struct rpc_version nfs_cb_version4 = { * in practice that appears to be what implementations use. The section * 18.36.3 language is expected to be fixed in an erratum. */ - .number = 1, - .nrprocs = ARRAY_SIZE(nfs4_cb_procedures), - .procs = nfs4_cb_procedures + .number = 1, + .nrprocs = ARRAY_SIZE(nfs4_cb_procedures), + .procs = nfs4_cb_procedures }; -static struct rpc_version * nfs_cb_version[] = { +static struct rpc_version *nfs_cb_version[] = { &nfs_cb_version4, }; static struct rpc_program cb_program; static struct rpc_stat cb_stats = { - .program = &cb_program + .program = &cb_program }; #define NFS4_CALLBACK 0x40000000 static struct rpc_program cb_program = { - .name = "nfs4_cb", - .number = NFS4_CALLBACK, - .nrvers = ARRAY_SIZE(nfs_cb_version), - .version = nfs_cb_version, - .stats = &cb_stats, - .pipe_dir_name = "/nfsd4_cb", + .name = "nfs4_cb", + .number = NFS4_CALLBACK, + .nrvers = ARRAY_SIZE(nfs_cb_version), + .version = nfs_cb_version, + .stats = &cb_stats, + .pipe_dir_name = "/nfsd4_cb", }; static int max_cb_time(void) -- cgit v1.2.2 From d8367c504e39528a057a5d7a267b6724f7fdb4b8 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 14 Dec 2010 14:57:52 +0000 Subject: lockd: Move nlmdbg_cookie2a() to svclock.c Clean up. nlmdbg_cookie2a() is used only in svclock.c. Signed-off-by: Chuck Lever Tested-by: J. Bruce Fields Signed-off-by: Trond Myklebust --- fs/lockd/svclock.c | 30 ++++++++++++++++++++++++++++++ fs/lockd/xdr.c | 29 ----------------------------- 2 files changed, 30 insertions(+), 29 deletions(-) (limited to 'fs') diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c index ef5659b211e9..9266c4600208 100644 --- a/fs/lockd/svclock.c +++ b/fs/lockd/svclock.c @@ -46,6 +46,7 @@ static void nlmsvc_remove_block(struct nlm_block *block); static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock); static void nlmsvc_freegrantargs(struct nlm_rqst *call); static const struct rpc_call_ops nlmsvc_grant_ops; +static const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie); /* * The list of blocked locks to retry @@ -934,3 +935,32 @@ nlmsvc_retry_blocked(void) return timeout; } + +#ifdef RPC_DEBUG +static const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie) +{ + /* + * We can get away with a static buffer because we're only + * called with BKL held. + */ + static char buf[2*NLM_MAXCOOKIELEN+1]; + unsigned int i, len = sizeof(buf); + char *p = buf; + + len--; /* allow for trailing \0 */ + if (len < 3) + return "???"; + for (i = 0 ; i < cookie->len ; i++) { + if (len < 2) { + strcpy(p-3, "..."); + break; + } + sprintf(p, "%02x", cookie->data[i]); + p += 2; + len -= 2; + } + *p = '\0'; + + return buf; +} +#endif diff --git a/fs/lockd/xdr.c b/fs/lockd/xdr.c index 0eb694dc497b..964666c68a86 100644 --- a/fs/lockd/xdr.c +++ b/fs/lockd/xdr.c @@ -341,32 +341,3 @@ nlmsvc_encode_void(struct svc_rqst *rqstp, __be32 *p, void *dummy) { return xdr_ressize_check(rqstp, p); } - -#ifdef RPC_DEBUG -const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie) -{ - /* - * We can get away with a static buffer because we're only - * called with BKL held. - */ - static char buf[2*NLM_MAXCOOKIELEN+1]; - unsigned int i, len = sizeof(buf); - char *p = buf; - - len--; /* allow for trailing \0 */ - if (len < 3) - return "???"; - for (i = 0 ; i < cookie->len ; i++) { - if (len < 2) { - strcpy(p-3, "..."); - break; - } - sprintf(p, "%02x", cookie->data[i]); - p += 2; - len -= 2; - } - *p = '\0'; - - return buf; -} -#endif -- cgit v1.2.2 From 8111f373600cd43b3198b48b9238e3ad2fd9908d Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 14 Dec 2010 14:58:01 +0000 Subject: NFS: Fix hdrlen calculation in NFSv4's decode_read() When computing the length of the header, be sure to include the four octets consumed by "count". Signed-off-by: Chuck Lever Tested-by: J. Bruce Fields Signed-off-by: Trond Myklebust --- fs/nfs/nfs4xdr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index a48a43c75111..868815c55450 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -4475,7 +4475,7 @@ static int decode_read(struct xdr_stream *xdr, struct rpc_rqst *req, struct nfs_ goto out_overflow; eof = be32_to_cpup(p++); count = be32_to_cpup(p); - hdrlen = (u8 *) p - (u8 *) iov->iov_base; + hdrlen = (u8 *) xdr->p - (u8 *) iov->iov_base; recvd = req->rq_rcv_buf.len - hdrlen; if (count > recvd) { dprintk("NFS: server cheating in read reply: " -- cgit v1.2.2 From 573c4e1ef53a6b891b73cc2257e1604da754a2e4 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 14 Dec 2010 14:58:11 +0000 Subject: NFS: Simplify ->decode_dirent() calling sequence Clean up. The pointer returned by ->decode_dirent() is no longer used as a pointer. The only call site (xdr_decode() in fs/nfs/dir.c) simply extracts the errno value encoded in the pointer. Replace the returned pointer with a standard integer errno return value. Also, pass the "server" argument as part of the nfs_entry instead of as a separate parameter. It's faster to derive "server" in nfs_readdir_xdr_to_array() since we already have the directory's inode handy. "server" ought to be invariant for a set of entries in the same directory, right? The legacy versions of decode_dirent() don't use "server" anyway, so it's wasted work for them to derive and pass "server" for each entry. Signed-off-by: Chuck Lever Tested-by: J. Bruce Fields Signed-off-by: Trond Myklebust --- fs/nfs/dir.c | 15 ++++++++------- fs/nfs/internal.h | 9 ++++++--- fs/nfs/nfs2xdr.c | 18 +++++++++--------- fs/nfs/nfs3xdr.c | 28 ++++++++++++++-------------- fs/nfs/nfs4_fs.h | 1 - fs/nfs/nfs4xdr.c | 29 ++++++++++++++++++++++------- 6 files changed, 59 insertions(+), 41 deletions(-) (limited to 'fs') diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 996dd8989a91..3e2123fe79f5 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -172,7 +172,7 @@ struct nfs_cache_array { struct nfs_cache_array_entry array[0]; }; -typedef __be32 * (*decode_dirent_t)(struct xdr_stream *, struct nfs_entry *, struct nfs_server *, int); +typedef int (*decode_dirent_t)(struct xdr_stream *, struct nfs_entry *, int); typedef struct { struct file *file; struct page *page; @@ -378,14 +378,14 @@ error: return error; } -/* Fill in an entry based on the xdr code stored in desc->page */ -static -int xdr_decode(nfs_readdir_descriptor_t *desc, struct nfs_entry *entry, struct xdr_stream *stream) +static int xdr_decode(nfs_readdir_descriptor_t *desc, + struct nfs_entry *entry, struct xdr_stream *xdr) { - __be32 *p = desc->decode(stream, entry, NFS_SERVER(desc->file->f_path.dentry->d_inode), desc->plus); - if (IS_ERR(p)) - return PTR_ERR(p); + int error; + error = desc->decode(xdr, entry, desc->plus); + if (error) + return error; entry->fattr->time_start = desc->timestamp; entry->fattr->gencount = desc->gencount; return 0; @@ -566,6 +566,7 @@ int nfs_readdir_xdr_to_array(nfs_readdir_descriptor_t *desc, struct page *page, entry.eof = 0; entry.fh = nfs_alloc_fhandle(); entry.fattr = nfs_alloc_fattr(); + entry.server = NFS_SERVER(inode); if (entry.fh == NULL || entry.fattr == NULL) goto out; diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 6c6a9955bae9..435eae3666bd 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -187,15 +187,18 @@ extern void nfs_destroy_directcache(void); /* nfs2xdr.c */ extern int nfs_stat_to_errno(enum nfs_stat); extern struct rpc_procinfo nfs_procedures[]; -extern __be32 *nfs2_decode_dirent(struct xdr_stream *, struct nfs_entry *, struct nfs_server *, int); +extern int nfs2_decode_dirent(struct xdr_stream *, + struct nfs_entry *, int); /* nfs3xdr.c */ extern struct rpc_procinfo nfs3_procedures[]; -extern __be32 *nfs3_decode_dirent(struct xdr_stream *, struct nfs_entry *, struct nfs_server *, int); +extern int nfs3_decode_dirent(struct xdr_stream *, + struct nfs_entry *, int); /* nfs4xdr.c */ #ifdef CONFIG_NFS_V4 -extern __be32 *nfs4_decode_dirent(struct xdr_stream *, struct nfs_entry *, struct nfs_server *, int); +extern int nfs4_decode_dirent(struct xdr_stream *, + struct nfs_entry *, int); #endif #ifdef CONFIG_NFS_V4_1 extern const u32 nfs41_maxread_overhead; diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c index 0343175fe6c0..a9b848edbd2e 100644 --- a/fs/nfs/nfs2xdr.c +++ b/fs/nfs/nfs2xdr.c @@ -936,10 +936,10 @@ static int nfs2_xdr_dec_writeres(struct rpc_rqst *req, __be32 *p, * the local page cache. * @xdr: XDR stream where entry resides * @entry: buffer to fill in with entry data - * @server: nfs_server data for this directory * @plus: boolean indicating whether this should be a readdirplus entry * - * Returns the position of the next item in the buffer, or an ERR_PTR. + * Returns zero if successful, otherwise a negative errno value is + * returned. * * This function is not invoked during READDIR reply decoding, but * rather whenever an application invokes the getdents(2) system call @@ -954,8 +954,8 @@ static int nfs2_xdr_dec_writeres(struct rpc_rqst *req, __be32 *p, * entry *nextentry; * }; */ -__be32 *nfs2_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, - struct nfs_server *server, int plus) +int nfs2_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, + int plus) { __be32 *p; int error; @@ -968,9 +968,9 @@ __be32 *nfs2_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, if (unlikely(p == NULL)) goto out_overflow; if (*p++ == xdr_zero) - return ERR_PTR(-EAGAIN); + return -EAGAIN; entry->eof = 1; - return ERR_PTR(-EBADCOOKIE); + return -EBADCOOKIE; } p = xdr_inline_decode(xdr, 4); @@ -980,7 +980,7 @@ __be32 *nfs2_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, error = decode_filename_inline(xdr, &entry->name, &entry->len); if (unlikely(error)) - return ERR_PTR(error); + return error; /* * The type (size and byte order) of nfscookie isn't defined in @@ -999,11 +999,11 @@ __be32 *nfs2_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, entry->eof = 0; if (p != NULL) entry->eof = (p[0] == xdr_zero) && (p[1] != xdr_zero); - return p; + return 0; out_overflow: print_overflow_msg(__func__, xdr); - return ERR_PTR(-EAGAIN); + return -EAGAIN; } /* diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c index c97d00fe849a..15c93ccd90c5 100644 --- a/fs/nfs/nfs3xdr.c +++ b/fs/nfs/nfs3xdr.c @@ -1970,10 +1970,10 @@ out_status: * the local page cache * @xdr: XDR stream where entry resides * @entry: buffer to fill in with entry data - * @server: nfs_server data for this directory * @plus: boolean indicating whether this should be a readdirplus entry * - * Returns the position of the next item in the buffer, or an ERR_PTR. + * Returns zero if successful, otherwise a negative errno value is + * returned. * * This function is not invoked during READDIR reply decoding, but * rather whenever an application invokes the getdents(2) system call @@ -2000,8 +2000,8 @@ out_status: * entryplus3 *nextentry; * }; */ -__be32 *nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, - struct nfs_server *server, int plus) +int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, + int plus) { struct nfs_entry old = *entry; __be32 *p; @@ -2015,23 +2015,23 @@ __be32 *nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, if (unlikely(p == NULL)) goto out_overflow; if (*p == xdr_zero) - return ERR_PTR(-EAGAIN); + return -EAGAIN; entry->eof = 1; - return ERR_PTR(-EBADCOOKIE); + return -EBADCOOKIE; } error = decode_fileid3(xdr, &entry->ino); if (unlikely(error)) - return ERR_PTR(error); + return error; error = decode_inline_filename3(xdr, &entry->name, &entry->len); if (unlikely(error)) - return ERR_PTR(error); + return error; entry->prev_cookie = entry->cookie; error = decode_cookie3(xdr, &entry->cookie); if (unlikely(error)) - return ERR_PTR(error); + return error; entry->d_type = DT_UNKNOWN; @@ -2039,7 +2039,7 @@ __be32 *nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, entry->fattr->valid = 0; error = decode_post_op_attr(xdr, entry->fattr); if (unlikely(error)) - return ERR_PTR(error); + return error; if (entry->fattr->valid & NFS_ATTR_FATTR_V3) entry->d_type = nfs_umode_to_dtype(entry->fattr->mode); @@ -2052,7 +2052,7 @@ __be32 *nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, if (unlikely(error)) { if (error == -E2BIG) goto out_truncated; - return ERR_PTR(error); + return error; } } else zero_nfs_fh3(entry->fh); @@ -2063,15 +2063,15 @@ __be32 *nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, entry->eof = 0; if (p != NULL) entry->eof = (p[0] == xdr_zero) && (p[1] != xdr_zero); - return p; + return 0; out_overflow: print_overflow_msg(__func__, xdr); - return ERR_PTR(-EAGAIN); + return -EAGAIN; out_truncated: dprintk("NFS: directory entry contains invalid file handle\n"); *entry = old; - return ERR_PTR(-EAGAIN); + return -EAGAIN; } /* diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 9fa496387fdf..7a6eecffcaeb 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -331,7 +331,6 @@ extern void nfs_free_seqid(struct nfs_seqid *seqid); extern const nfs4_stateid zero_stateid; /* nfs4xdr.c */ -extern __be32 *nfs4_decode_dirent(struct xdr_stream *, struct nfs_entry *, struct nfs_server *, int); extern struct rpc_procinfo nfs4_procedures[]; struct nfs4_mount_data; diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 868815c55450..be9f00ab0d18 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -6159,8 +6159,22 @@ out: } #endif /* CONFIG_NFS_V4_1 */ -__be32 *nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, - struct nfs_server *server, int plus) +/** + * nfs4_decode_dirent - Decode a single NFSv4 directory entry stored in + * the local page cache. + * @xdr: XDR stream where entry resides + * @entry: buffer to fill in with entry data + * @plus: boolean indicating whether this should be a readdirplus entry + * + * Returns zero if successful, otherwise a negative errno value is + * returned. + * + * This function is not invoked during READDIR reply decoding, but + * rather whenever an application invokes the getdents(2) system call + * on a directory already in our cache. + */ +int nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, + int plus) { uint32_t bitmap[2] = {0}; uint32_t len; @@ -6172,9 +6186,9 @@ __be32 *nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, if (unlikely(!p)) goto out_overflow; if (!ntohl(*p++)) - return ERR_PTR(-EAGAIN); + return -EAGAIN; entry->eof = 1; - return ERR_PTR(-EBADCOOKIE); + return -EBADCOOKIE; } p = xdr_inline_decode(xdr, 12); @@ -6203,7 +6217,8 @@ __be32 *nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, if (decode_attr_length(xdr, &len, &p) < 0) goto out_overflow; - if (decode_getfattr_attrs(xdr, bitmap, entry->fattr, entry->fh, server, 1) < 0) + if (decode_getfattr_attrs(xdr, bitmap, entry->fattr, entry->fh, + entry->server, 1) < 0) goto out_overflow; if (entry->fattr->valid & NFS_ATTR_FATTR_FILEID) entry->ino = entry->fattr->fileid; @@ -6221,11 +6236,11 @@ __be32 *nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, else entry->eof = 0; - return p; + return 0; out_overflow: print_overflow_msg(__func__, xdr); - return ERR_PTR(-EAGAIN); + return -EAGAIN; } /* -- cgit v1.2.2 From ead00597882c4ee3c534d6880cc3bcb4d412cc4b Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 14 Dec 2010 14:58:21 +0000 Subject: NFS: Squelch compiler warning in decode_getdeviceinfo() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Clean up. .../linux/nfs-2.6/fs/nfs/nfs4xdr.c: In function ‘decode_getdeviceinfo’: .../linux/nfs-2.6/fs/nfs/nfs4xdr.c:5008: warning: comparison between signed and unsigned integer expressions Signed-off-by: Chuck Lever Tested-by: J. Bruce Fields Signed-off-by: Trond Myklebust --- fs/nfs/nfs4xdr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index be9f00ab0d18..a15fe99fea86 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -5000,7 +5000,7 @@ static int decode_getdeviceinfo(struct xdr_stream *xdr, goto out_overflow; len = be32_to_cpup(p); if (len) { - int i; + uint32_t i; p = xdr_inline_decode(xdr, 4 * len); if (unlikely(!p)) -- cgit v1.2.2 From 49b170047f4a9fe1483132e14a11bdf493bdb8af Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 14 Dec 2010 14:58:30 +0000 Subject: NSM: Avoid return code checking in NSM XDR encoder functions Clean up. The trend in the other XDR encoder functions is to BUG() when encoding problems occur, since a problem here is always due to a local coding error. Then, instead of a status, zero is unconditionally returned. Update the NSM XDR encoders to behave this way. To finish the update, use the new-style be32_to_cpup() and cpu_to_be32() macros, and compute the buffer sizes using raw integers instead of sizeof(). This matches the conventions used in other XDR functions Signed-off-by: Chuck Lever Tested-by: J. Bruce Fields Signed-off-by: Trond Myklebust --- fs/lockd/mon.c | 68 +++++++++++++++++++++------------------------------------- 1 file changed, 25 insertions(+), 43 deletions(-) (limited to 'fs') diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c index e0c918949644..d812818d0258 100644 --- a/fs/lockd/mon.c +++ b/fs/lockd/mon.c @@ -401,26 +401,22 @@ void nsm_release(struct nsm_handle *nsm) * Status Monitor wire protocol. */ -static int encode_nsm_string(struct xdr_stream *xdr, const char *string) +static void encode_nsm_string(struct xdr_stream *xdr, const char *string) { const u32 len = strlen(string); __be32 *p; - if (unlikely(len > SM_MAXSTRLEN)) - return -EIO; - p = xdr_reserve_space(xdr, sizeof(u32) + len); - if (unlikely(p == NULL)) - return -EIO; + BUG_ON(len > SM_MAXSTRLEN); + p = xdr_reserve_space(xdr, 4 + len); xdr_encode_opaque(p, string, len); - return 0; } /* * "mon_name" specifies the host to be monitored. */ -static int encode_mon_name(struct xdr_stream *xdr, const struct nsm_args *argp) +static void encode_mon_name(struct xdr_stream *xdr, const struct nsm_args *argp) { - return encode_nsm_string(xdr, argp->mon_name); + encode_nsm_string(xdr, argp->mon_name); } /* @@ -429,35 +425,25 @@ static int encode_mon_name(struct xdr_stream *xdr, const struct nsm_args *argp) * (via the NLMPROC_SM_NOTIFY call) that the state of host "mon_name" * has changed. */ -static int encode_my_id(struct xdr_stream *xdr, const struct nsm_args *argp) +static void encode_my_id(struct xdr_stream *xdr, const struct nsm_args *argp) { - int status; __be32 *p; - status = encode_nsm_string(xdr, utsname()->nodename); - if (unlikely(status != 0)) - return status; - p = xdr_reserve_space(xdr, 3 * sizeof(u32)); - if (unlikely(p == NULL)) - return -EIO; - *p++ = htonl(argp->prog); - *p++ = htonl(argp->vers); - *p++ = htonl(argp->proc); - return 0; + encode_nsm_string(xdr, utsname()->nodename); + p = xdr_reserve_space(xdr, 4 + 4 + 4); + *p++ = cpu_to_be32(argp->prog); + *p++ = cpu_to_be32(argp->vers); + *p = cpu_to_be32(argp->proc); } /* * The "mon_id" argument specifies the non-private arguments * of an NSMPROC_MON or NSMPROC_UNMON call. */ -static int encode_mon_id(struct xdr_stream *xdr, const struct nsm_args *argp) +static void encode_mon_id(struct xdr_stream *xdr, const struct nsm_args *argp) { - int status; - - status = encode_mon_name(xdr, argp); - if (unlikely(status != 0)) - return status; - return encode_my_id(xdr, argp); + encode_mon_name(xdr, argp); + encode_my_id(xdr, argp); } /* @@ -465,28 +451,23 @@ static int encode_mon_id(struct xdr_stream *xdr, const struct nsm_args *argp) * by the NSMPROC_MON call. This information will be supplied in the * NLMPROC_SM_NOTIFY call. */ -static int encode_priv(struct xdr_stream *xdr, const struct nsm_args *argp) +static void encode_priv(struct xdr_stream *xdr, const struct nsm_args *argp) { __be32 *p; p = xdr_reserve_space(xdr, SM_PRIV_SIZE); - if (unlikely(p == NULL)) - return -EIO; xdr_encode_opaque_fixed(p, argp->priv->data, SM_PRIV_SIZE); - return 0; } static int xdr_enc_mon(struct rpc_rqst *req, __be32 *p, const struct nsm_args *argp) { struct xdr_stream xdr; - int status; xdr_init_encode(&xdr, &req->rq_snd_buf, p); - status = encode_mon_id(&xdr, argp); - if (unlikely(status)) - return status; - return encode_priv(&xdr, argp); + encode_mon_id(&xdr, argp); + encode_priv(&xdr, argp); + return 0; } static int xdr_enc_unmon(struct rpc_rqst *req, __be32 *p, @@ -495,7 +476,8 @@ static int xdr_enc_unmon(struct rpc_rqst *req, __be32 *p, struct xdr_stream xdr; xdr_init_encode(&xdr, &req->rq_snd_buf, p); - return encode_mon_id(&xdr, argp); + encode_mon_id(&xdr, argp); + return 0; } static int xdr_dec_stat_res(struct rpc_rqst *rqstp, __be32 *p, @@ -504,11 +486,11 @@ static int xdr_dec_stat_res(struct rpc_rqst *rqstp, __be32 *p, struct xdr_stream xdr; xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); - p = xdr_inline_decode(&xdr, 2 * sizeof(u32)); + p = xdr_inline_decode(&xdr, 4 + 4); if (unlikely(p == NULL)) return -EIO; - resp->status = ntohl(*p++); - resp->state = ntohl(*p); + resp->status = be32_to_cpup(p++); + resp->state = be32_to_cpup(p); dprintk("lockd: xdr_dec_stat_res status %d state %d\n", resp->status, resp->state); @@ -521,10 +503,10 @@ static int xdr_dec_stat(struct rpc_rqst *rqstp, __be32 *p, struct xdr_stream xdr; xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); - p = xdr_inline_decode(&xdr, sizeof(u32)); + p = xdr_inline_decode(&xdr, 4); if (unlikely(p == NULL)) return -EIO; - resp->state = ntohl(*p); + resp->state = be32_to_cpup(p); dprintk("lockd: xdr_dec_stat state %d\n", resp->state); return 0; -- cgit v1.2.2 From 98eb2b4f9323bcf2a46476576d3155758cb0a473 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 14 Dec 2010 14:58:40 +0000 Subject: NFS: Avoid return code checking in mount XDR encoder functions Clean up. The trend in the other XDR encoder functions is to BUG() when encoding problems occur, since a problem here is always due to a local coding error. Then, instead of a status, zero is unconditionally returned. Update the mount client XDR encoders to behave this way. To finish the update, use the new-style be32_to_cpup() and cpu_to_be32() macros, and compute the buffer sizes using raw integers instead of sizeof(). This matches the conventions used in other XDR functions. Signed-off-by: Chuck Lever Tested-by: J. Bruce Fields Signed-off-by: Trond Myklebust --- fs/nfs/mount_clnt.c | 35 +++++++++++++++-------------------- 1 file changed, 15 insertions(+), 20 deletions(-) (limited to 'fs') diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c index 4f981f1f6689..c82547e49ba1 100644 --- a/fs/nfs/mount_clnt.c +++ b/fs/nfs/mount_clnt.c @@ -280,20 +280,14 @@ out_call_err: * XDR encode/decode functions for MOUNT */ -static int encode_mntdirpath(struct xdr_stream *xdr, const char *pathname) +static void encode_mntdirpath(struct xdr_stream *xdr, const char *pathname) { const u32 pathname_len = strlen(pathname); __be32 *p; - if (unlikely(pathname_len > MNTPATHLEN)) - return -EIO; - - p = xdr_reserve_space(xdr, sizeof(u32) + pathname_len); - if (unlikely(p == NULL)) - return -EIO; + BUG_ON(pathname_len > MNTPATHLEN); + p = xdr_reserve_space(xdr, 4 + pathname_len); xdr_encode_opaque(p, pathname, pathname_len); - - return 0; } static int mnt_enc_dirpath(struct rpc_rqst *req, __be32 *p, @@ -302,7 +296,8 @@ static int mnt_enc_dirpath(struct rpc_rqst *req, __be32 *p, struct xdr_stream xdr; xdr_init_encode(&xdr, &req->rq_snd_buf, p); - return encode_mntdirpath(&xdr, dirpath); + encode_mntdirpath(&xdr, dirpath); + return 0; } /* @@ -320,10 +315,10 @@ static int decode_status(struct xdr_stream *xdr, struct mountres *res) u32 status; __be32 *p; - p = xdr_inline_decode(xdr, sizeof(status)); + p = xdr_inline_decode(xdr, 4); if (unlikely(p == NULL)) return -EIO; - status = ntohl(*p); + status = be32_to_cpup(p); for (i = 0; i < ARRAY_SIZE(mnt_errtbl); i++) { if (mnt_errtbl[i].status == status) { @@ -371,10 +366,10 @@ static int decode_fhs_status(struct xdr_stream *xdr, struct mountres *res) u32 status; __be32 *p; - p = xdr_inline_decode(xdr, sizeof(status)); + p = xdr_inline_decode(xdr, 4); if (unlikely(p == NULL)) return -EIO; - status = ntohl(*p); + status = be32_to_cpup(p); for (i = 0; i < ARRAY_SIZE(mnt3_errtbl); i++) { if (mnt3_errtbl[i].status == status) { @@ -394,11 +389,11 @@ static int decode_fhandle3(struct xdr_stream *xdr, struct mountres *res) u32 size; __be32 *p; - p = xdr_inline_decode(xdr, sizeof(size)); + p = xdr_inline_decode(xdr, 4); if (unlikely(p == NULL)) return -EIO; - size = ntohl(*p++); + size = be32_to_cpup(p); if (size > NFS3_FHSIZE || size == 0) return -EIO; @@ -421,15 +416,15 @@ static int decode_auth_flavors(struct xdr_stream *xdr, struct mountres *res) if (*count == 0) return 0; - p = xdr_inline_decode(xdr, sizeof(entries)); + p = xdr_inline_decode(xdr, 4); if (unlikely(p == NULL)) return -EIO; - entries = ntohl(*p); + entries = be32_to_cpup(p); dprintk("NFS: received %u auth flavors\n", entries); if (entries > NFS_MAX_SECFLAVORS) entries = NFS_MAX_SECFLAVORS; - p = xdr_inline_decode(xdr, sizeof(u32) * entries); + p = xdr_inline_decode(xdr, 4 * entries); if (unlikely(p == NULL)) return -EIO; @@ -437,7 +432,7 @@ static int decode_auth_flavors(struct xdr_stream *xdr, struct mountres *res) entries = *count; for (i = 0; i < entries; i++) { - flavors[i] = ntohl(*p++); + flavors[i] = be32_to_cpup(p++); dprintk("NFS: auth flavor[%u]: %d\n", i, flavors[i]); } *count = i; -- cgit v1.2.2 From b43cd8c153f6902100ed50c1f7e11a470c73a73f Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 14 Dec 2010 14:58:49 +0000 Subject: NFS: Remove unused UMNT response data structure Clean up. The UMNT request has a NULL response. There's no need to set up a mountres structure for it. Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- fs/nfs/mount_clnt.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'fs') diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c index c82547e49ba1..97c3ec793305 100644 --- a/fs/nfs/mount_clnt.c +++ b/fs/nfs/mount_clnt.c @@ -236,10 +236,8 @@ void nfs_umount(const struct nfs_mount_request *info) .authflavor = RPC_AUTH_UNIX, .flags = RPC_CLNT_CREATE_NOPING, }; - struct mountres result; struct rpc_message msg = { .rpc_argp = info->dirpath, - .rpc_resp = &result, }; struct rpc_clnt *clnt; int status; -- cgit v1.2.2 From 9f06c719f474be7003763284a990bed6377bb0d4 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 14 Dec 2010 14:59:18 +0000 Subject: SUNRPC: New xdr_streams XDR encoder API Now that all client-side XDR encoder routines use xdr_streams, there should be no need to support the legacy calling sequence [rpc_rqst *, __be32 *, RPC arg *] anywhere. We can construct an xdr_stream in the generic RPC code, instead of in each encoder function. Also, all the client-side encoder functions return 0 now, making a return value superfluous. Take this opportunity to convert them to return void instead. This is a refactoring change. It should not cause different behavior. Signed-off-by: Chuck Lever Tested-by: J. Bruce Fields Signed-off-by: Trond Myklebust --- fs/lockd/clnt4xdr.c | 92 +++---- fs/lockd/clntxdr.c | 92 +++---- fs/lockd/mon.c | 26 +- fs/nfs/mount_clnt.c | 18 +- fs/nfs/nfs2xdr.c | 147 +++++------ fs/nfs/nfs3xdr.c | 241 +++++++----------- fs/nfs/nfs4xdr.c | 663 ++++++++++++++++++++++--------------------------- fs/nfsd/nfs4callback.c | 22 +- 8 files changed, 542 insertions(+), 759 deletions(-) (limited to 'fs') diff --git a/fs/lockd/clnt4xdr.c b/fs/lockd/clnt4xdr.c index 1a1c3e21ed2c..974f1d9cd323 100644 --- a/fs/lockd/clnt4xdr.c +++ b/fs/lockd/clnt4xdr.c @@ -385,17 +385,15 @@ static void encode_nlm4_lock(struct xdr_stream *xdr, * struct nlm4_lock alock; * }; */ -static int nlm4_xdr_enc_testargs(struct rpc_rqst *req, __be32 *p, - const struct nlm_args *args) +static void nlm4_xdr_enc_testargs(struct rpc_rqst *req, + struct xdr_stream *xdr, + const struct nlm_args *args) { const struct nlm_lock *lock = &args->lock; - struct xdr_stream xdr; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_cookie(&xdr, &args->cookie); - encode_bool(&xdr, lock->fl.fl_type == F_WRLCK); - encode_nlm4_lock(&xdr, lock); - return 0; + encode_cookie(xdr, &args->cookie); + encode_bool(xdr, lock->fl.fl_type == F_WRLCK); + encode_nlm4_lock(xdr, lock); } /* @@ -408,20 +406,18 @@ static int nlm4_xdr_enc_testargs(struct rpc_rqst *req, __be32 *p, * int state; * }; */ -static int nlm4_xdr_enc_lockargs(struct rpc_rqst *req, __be32 *p, - const struct nlm_args *args) +static void nlm4_xdr_enc_lockargs(struct rpc_rqst *req, + struct xdr_stream *xdr, + const struct nlm_args *args) { const struct nlm_lock *lock = &args->lock; - struct xdr_stream xdr; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_cookie(&xdr, &args->cookie); - encode_bool(&xdr, args->block); - encode_bool(&xdr, lock->fl.fl_type == F_WRLCK); - encode_nlm4_lock(&xdr, lock); - encode_bool(&xdr, args->reclaim); - encode_int32(&xdr, args->state); - return 0; + encode_cookie(xdr, &args->cookie); + encode_bool(xdr, args->block); + encode_bool(xdr, lock->fl.fl_type == F_WRLCK); + encode_nlm4_lock(xdr, lock); + encode_bool(xdr, args->reclaim); + encode_int32(xdr, args->state); } /* @@ -432,18 +428,16 @@ static int nlm4_xdr_enc_lockargs(struct rpc_rqst *req, __be32 *p, * struct nlm4_lock alock; * }; */ -static int nlm4_xdr_enc_cancargs(struct rpc_rqst *req, __be32 *p, - const struct nlm_args *args) +static void nlm4_xdr_enc_cancargs(struct rpc_rqst *req, + struct xdr_stream *xdr, + const struct nlm_args *args) { const struct nlm_lock *lock = &args->lock; - struct xdr_stream xdr; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_cookie(&xdr, &args->cookie); - encode_bool(&xdr, args->block); - encode_bool(&xdr, lock->fl.fl_type == F_WRLCK); - encode_nlm4_lock(&xdr, lock); - return 0; + encode_cookie(xdr, &args->cookie); + encode_bool(xdr, args->block); + encode_bool(xdr, lock->fl.fl_type == F_WRLCK); + encode_nlm4_lock(xdr, lock); } /* @@ -452,16 +446,14 @@ static int nlm4_xdr_enc_cancargs(struct rpc_rqst *req, __be32 *p, * struct nlm4_lock alock; * }; */ -static int nlm4_xdr_enc_unlockargs(struct rpc_rqst *req, __be32 *p, - const struct nlm_args *args) +static void nlm4_xdr_enc_unlockargs(struct rpc_rqst *req, + struct xdr_stream *xdr, + const struct nlm_args *args) { const struct nlm_lock *lock = &args->lock; - struct xdr_stream xdr; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_cookie(&xdr, &args->cookie); - encode_nlm4_lock(&xdr, lock); - return 0; + encode_cookie(xdr, &args->cookie); + encode_nlm4_lock(xdr, lock); } /* @@ -470,15 +462,12 @@ static int nlm4_xdr_enc_unlockargs(struct rpc_rqst *req, __be32 *p, * nlm4_stat stat; * }; */ -static int nlm4_xdr_enc_res(struct rpc_rqst *req, __be32 *p, - const struct nlm_res *result) +static void nlm4_xdr_enc_res(struct rpc_rqst *req, + struct xdr_stream *xdr, + const struct nlm_res *result) { - struct xdr_stream xdr; - - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_cookie(&xdr, &result->cookie); - encode_nlm4_stat(&xdr, result->status); - return 0; + encode_cookie(xdr, &result->cookie); + encode_nlm4_stat(xdr, result->status); } /* @@ -494,17 +483,14 @@ static int nlm4_xdr_enc_res(struct rpc_rqst *req, __be32 *p, * nlm4_testrply test_stat; * }; */ -static int nlm4_xdr_enc_testres(struct rpc_rqst *req, __be32 *p, - const struct nlm_res *result) +static void nlm4_xdr_enc_testres(struct rpc_rqst *req, + struct xdr_stream *xdr, + const struct nlm_res *result) { - struct xdr_stream xdr; - - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_cookie(&xdr, &result->cookie); - encode_nlm4_stat(&xdr, result->status); + encode_cookie(xdr, &result->cookie); + encode_nlm4_stat(xdr, result->status); if (result->status == nlm_lck_denied) - encode_nlm4_holder(&xdr, result); - return 0; + encode_nlm4_holder(xdr, result); } @@ -588,7 +574,7 @@ out: #define PROC(proc, argtype, restype) \ [NLMPROC_##proc] = { \ .p_proc = NLMPROC_##proc, \ - .p_encode = (kxdrproc_t)nlm4_xdr_enc_##argtype, \ + .p_encode = (kxdreproc_t)nlm4_xdr_enc_##argtype, \ .p_decode = (kxdrproc_t)nlm4_xdr_dec_##restype, \ .p_arglen = NLM4_##argtype##_sz, \ .p_replen = NLM4_##restype##_sz, \ diff --git a/fs/lockd/clntxdr.c b/fs/lockd/clntxdr.c index 0472f2aff509..c6fda8fb1c5b 100644 --- a/fs/lockd/clntxdr.c +++ b/fs/lockd/clntxdr.c @@ -378,17 +378,15 @@ static void encode_nlm_lock(struct xdr_stream *xdr, * struct nlm_lock alock; * }; */ -static int nlm_xdr_enc_testargs(struct rpc_rqst *req, __be32 *p, - const struct nlm_args *args) +static void nlm_xdr_enc_testargs(struct rpc_rqst *req, + struct xdr_stream *xdr, + const struct nlm_args *args) { const struct nlm_lock *lock = &args->lock; - struct xdr_stream xdr; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_cookie(&xdr, &args->cookie); - encode_bool(&xdr, lock->fl.fl_type == F_WRLCK); - encode_nlm_lock(&xdr, lock); - return 0; + encode_cookie(xdr, &args->cookie); + encode_bool(xdr, lock->fl.fl_type == F_WRLCK); + encode_nlm_lock(xdr, lock); } /* @@ -401,20 +399,18 @@ static int nlm_xdr_enc_testargs(struct rpc_rqst *req, __be32 *p, * int state; * }; */ -static int nlm_xdr_enc_lockargs(struct rpc_rqst *req, __be32 *p, - const struct nlm_args *args) +static void nlm_xdr_enc_lockargs(struct rpc_rqst *req, + struct xdr_stream *xdr, + const struct nlm_args *args) { const struct nlm_lock *lock = &args->lock; - struct xdr_stream xdr; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_cookie(&xdr, &args->cookie); - encode_bool(&xdr, args->block); - encode_bool(&xdr, lock->fl.fl_type == F_WRLCK); - encode_nlm_lock(&xdr, lock); - encode_bool(&xdr, args->reclaim); - encode_int32(&xdr, args->state); - return 0; + encode_cookie(xdr, &args->cookie); + encode_bool(xdr, args->block); + encode_bool(xdr, lock->fl.fl_type == F_WRLCK); + encode_nlm_lock(xdr, lock); + encode_bool(xdr, args->reclaim); + encode_int32(xdr, args->state); } /* @@ -425,18 +421,16 @@ static int nlm_xdr_enc_lockargs(struct rpc_rqst *req, __be32 *p, * struct nlm_lock alock; * }; */ -static int nlm_xdr_enc_cancargs(struct rpc_rqst *req, __be32 *p, - const struct nlm_args *args) +static void nlm_xdr_enc_cancargs(struct rpc_rqst *req, + struct xdr_stream *xdr, + const struct nlm_args *args) { const struct nlm_lock *lock = &args->lock; - struct xdr_stream xdr; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_cookie(&xdr, &args->cookie); - encode_bool(&xdr, args->block); - encode_bool(&xdr, lock->fl.fl_type == F_WRLCK); - encode_nlm_lock(&xdr, lock); - return 0; + encode_cookie(xdr, &args->cookie); + encode_bool(xdr, args->block); + encode_bool(xdr, lock->fl.fl_type == F_WRLCK); + encode_nlm_lock(xdr, lock); } /* @@ -445,16 +439,14 @@ static int nlm_xdr_enc_cancargs(struct rpc_rqst *req, __be32 *p, * struct nlm_lock alock; * }; */ -static int nlm_xdr_enc_unlockargs(struct rpc_rqst *req, __be32 *p, - const struct nlm_args *args) +static void nlm_xdr_enc_unlockargs(struct rpc_rqst *req, + struct xdr_stream *xdr, + const struct nlm_args *args) { const struct nlm_lock *lock = &args->lock; - struct xdr_stream xdr; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_cookie(&xdr, &args->cookie); - encode_nlm_lock(&xdr, lock); - return 0; + encode_cookie(xdr, &args->cookie); + encode_nlm_lock(xdr, lock); } /* @@ -463,15 +455,12 @@ static int nlm_xdr_enc_unlockargs(struct rpc_rqst *req, __be32 *p, * nlm_stat stat; * }; */ -static int nlm_xdr_enc_res(struct rpc_rqst *req, __be32 *p, - const struct nlm_res *result) +static void nlm_xdr_enc_res(struct rpc_rqst *req, + struct xdr_stream *xdr, + const struct nlm_res *result) { - struct xdr_stream xdr; - - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_cookie(&xdr, &result->cookie); - encode_nlm_stat(&xdr, result->status); - return 0; + encode_cookie(xdr, &result->cookie); + encode_nlm_stat(xdr, result->status); } /* @@ -494,16 +483,13 @@ static void encode_nlm_testrply(struct xdr_stream *xdr, encode_nlm_holder(xdr, result); } -static int nlm_xdr_enc_testres(struct rpc_rqst *req, __be32 *p, - const struct nlm_res *result) +static void nlm_xdr_enc_testres(struct rpc_rqst *req, + struct xdr_stream *xdr, + const struct nlm_res *result) { - struct xdr_stream xdr; - - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_cookie(&xdr, &result->cookie); - encode_nlm_stat(&xdr, result->status); - encode_nlm_testrply(&xdr, result); - return 0; + encode_cookie(xdr, &result->cookie); + encode_nlm_stat(xdr, result->status); + encode_nlm_testrply(xdr, result); } @@ -586,7 +572,7 @@ out: #define PROC(proc, argtype, restype) \ [NLMPROC_##proc] = { \ .p_proc = NLMPROC_##proc, \ - .p_encode = (kxdrproc_t)nlm_xdr_enc_##argtype, \ + .p_encode = (kxdreproc_t)nlm_xdr_enc_##argtype, \ .p_decode = (kxdrproc_t)nlm_xdr_dec_##restype, \ .p_arglen = NLM_##argtype##_sz, \ .p_replen = NLM_##restype##_sz, \ diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c index d812818d0258..baa77bc9d825 100644 --- a/fs/lockd/mon.c +++ b/fs/lockd/mon.c @@ -459,25 +459,17 @@ static void encode_priv(struct xdr_stream *xdr, const struct nsm_args *argp) xdr_encode_opaque_fixed(p, argp->priv->data, SM_PRIV_SIZE); } -static int xdr_enc_mon(struct rpc_rqst *req, __be32 *p, - const struct nsm_args *argp) +static void nsm_xdr_enc_mon(struct rpc_rqst *req, struct xdr_stream *xdr, + const struct nsm_args *argp) { - struct xdr_stream xdr; - - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_mon_id(&xdr, argp); - encode_priv(&xdr, argp); - return 0; + encode_mon_id(xdr, argp); + encode_priv(xdr, argp); } -static int xdr_enc_unmon(struct rpc_rqst *req, __be32 *p, - const struct nsm_args *argp) +static void nsm_xdr_enc_unmon(struct rpc_rqst *req, struct xdr_stream *xdr, + const struct nsm_args *argp) { - struct xdr_stream xdr; - - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_mon_id(&xdr, argp); - return 0; + encode_mon_id(xdr, argp); } static int xdr_dec_stat_res(struct rpc_rqst *rqstp, __be32 *p, @@ -524,7 +516,7 @@ static int xdr_dec_stat(struct rpc_rqst *rqstp, __be32 *p, static struct rpc_procinfo nsm_procedures[] = { [NSMPROC_MON] = { .p_proc = NSMPROC_MON, - .p_encode = (kxdrproc_t)xdr_enc_mon, + .p_encode = (kxdreproc_t)nsm_xdr_enc_mon, .p_decode = (kxdrproc_t)xdr_dec_stat_res, .p_arglen = SM_mon_sz, .p_replen = SM_monres_sz, @@ -533,7 +525,7 @@ static struct rpc_procinfo nsm_procedures[] = { }, [NSMPROC_UNMON] = { .p_proc = NSMPROC_UNMON, - .p_encode = (kxdrproc_t)xdr_enc_unmon, + .p_encode = (kxdreproc_t)nsm_xdr_enc_unmon, .p_decode = (kxdrproc_t)xdr_dec_stat, .p_arglen = SM_mon_id_sz, .p_replen = SM_unmonres_sz, diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c index 97c3ec793305..979ebd7af3cb 100644 --- a/fs/nfs/mount_clnt.c +++ b/fs/nfs/mount_clnt.c @@ -288,14 +288,10 @@ static void encode_mntdirpath(struct xdr_stream *xdr, const char *pathname) xdr_encode_opaque(p, pathname, pathname_len); } -static int mnt_enc_dirpath(struct rpc_rqst *req, __be32 *p, - const char *dirpath) +static void mnt_xdr_enc_dirpath(struct rpc_rqst *req, struct xdr_stream *xdr, + const char *dirpath) { - struct xdr_stream xdr; - - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_mntdirpath(&xdr, dirpath); - return 0; + encode_mntdirpath(xdr, dirpath); } /* @@ -460,7 +456,7 @@ static int mnt_dec_mountres3(struct rpc_rqst *req, __be32 *p, static struct rpc_procinfo mnt_procedures[] = { [MOUNTPROC_MNT] = { .p_proc = MOUNTPROC_MNT, - .p_encode = (kxdrproc_t)mnt_enc_dirpath, + .p_encode = (kxdreproc_t)mnt_xdr_enc_dirpath, .p_decode = (kxdrproc_t)mnt_dec_mountres, .p_arglen = MNT_enc_dirpath_sz, .p_replen = MNT_dec_mountres_sz, @@ -469,7 +465,7 @@ static struct rpc_procinfo mnt_procedures[] = { }, [MOUNTPROC_UMNT] = { .p_proc = MOUNTPROC_UMNT, - .p_encode = (kxdrproc_t)mnt_enc_dirpath, + .p_encode = (kxdreproc_t)mnt_xdr_enc_dirpath, .p_arglen = MNT_enc_dirpath_sz, .p_statidx = MOUNTPROC_UMNT, .p_name = "UMOUNT", @@ -479,7 +475,7 @@ static struct rpc_procinfo mnt_procedures[] = { static struct rpc_procinfo mnt3_procedures[] = { [MOUNTPROC3_MNT] = { .p_proc = MOUNTPROC3_MNT, - .p_encode = (kxdrproc_t)mnt_enc_dirpath, + .p_encode = (kxdreproc_t)mnt_xdr_enc_dirpath, .p_decode = (kxdrproc_t)mnt_dec_mountres3, .p_arglen = MNT_enc_dirpath_sz, .p_replen = MNT_dec_mountres3_sz, @@ -488,7 +484,7 @@ static struct rpc_procinfo mnt3_procedures[] = { }, [MOUNTPROC3_UMNT] = { .p_proc = MOUNTPROC3_UMNT, - .p_encode = (kxdrproc_t)mnt_enc_dirpath, + .p_encode = (kxdreproc_t)mnt_xdr_enc_dirpath, .p_arglen = MNT_enc_dirpath_sz, .p_statidx = MOUNTPROC3_UMNT, .p_name = "UMOUNT", diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c index a9b848edbd2e..8f3acbec761f 100644 --- a/fs/nfs/nfs2xdr.c +++ b/fs/nfs/nfs2xdr.c @@ -558,14 +558,11 @@ out_default: * "NFS: Network File System Protocol Specification". */ -static int nfs2_xdr_enc_fhandle(struct rpc_rqst *req, __be32 *p, - const struct nfs_fh *fh) +static void nfs2_xdr_enc_fhandle(struct rpc_rqst *req, + struct xdr_stream *xdr, + const struct nfs_fh *fh) { - struct xdr_stream xdr; - - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_fhandle(&xdr, fh); - return 0; + encode_fhandle(xdr, fh); } /* @@ -576,37 +573,28 @@ static int nfs2_xdr_enc_fhandle(struct rpc_rqst *req, __be32 *p, * sattr attributes; * }; */ -static int nfs2_xdr_enc_sattrargs(struct rpc_rqst *req, __be32 *p, - const struct nfs_sattrargs *args) +static void nfs2_xdr_enc_sattrargs(struct rpc_rqst *req, + struct xdr_stream *xdr, + const struct nfs_sattrargs *args) { - struct xdr_stream xdr; - - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_fhandle(&xdr, args->fh); - encode_sattr(&xdr, args->sattr); - return 0; + encode_fhandle(xdr, args->fh); + encode_sattr(xdr, args->sattr); } -static int nfs2_xdr_enc_diropargs(struct rpc_rqst *req, __be32 *p, - const struct nfs_diropargs *args) +static void nfs2_xdr_enc_diropargs(struct rpc_rqst *req, + struct xdr_stream *xdr, + const struct nfs_diropargs *args) { - struct xdr_stream xdr; - - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_diropargs(&xdr, args->fh, args->name, args->len); - return 0; + encode_diropargs(xdr, args->fh, args->name, args->len); } -static int nfs2_xdr_enc_readlinkargs(struct rpc_rqst *req, __be32 *p, - const struct nfs_readlinkargs *args) +static void nfs2_xdr_enc_readlinkargs(struct rpc_rqst *req, + struct xdr_stream *xdr, + const struct nfs_readlinkargs *args) { - struct xdr_stream xdr; - - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_fhandle(&xdr, args->fh); + encode_fhandle(xdr, args->fh); prepare_reply_buffer(req, args->pages, args->pgbase, args->pglen, NFS_readlinkres_sz); - return 0; } /* @@ -634,17 +622,14 @@ static void encode_readargs(struct xdr_stream *xdr, *p = cpu_to_be32(count); } -static int nfs2_xdr_enc_readargs(struct rpc_rqst *req, __be32 *p, - const struct nfs_readargs *args) +static void nfs2_xdr_enc_readargs(struct rpc_rqst *req, + struct xdr_stream *xdr, + const struct nfs_readargs *args) { - struct xdr_stream xdr; - - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_readargs(&xdr, args); + encode_readargs(xdr, args); prepare_reply_buffer(req, args->pages, args->pgbase, args->count, NFS_readres_sz); req->rq_rcv_buf.flags |= XDRBUF_READ; - return 0; } /* @@ -677,15 +662,12 @@ static void encode_writeargs(struct xdr_stream *xdr, xdr_write_pages(xdr, args->pages, args->pgbase, count); } -static int nfs2_xdr_enc_writeargs(struct rpc_rqst *req, __be32 *p, - const struct nfs_writeargs *args) +static void nfs2_xdr_enc_writeargs(struct rpc_rqst *req, + struct xdr_stream *xdr, + const struct nfs_writeargs *args) { - struct xdr_stream xdr; - - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_writeargs(&xdr, args); - xdr.buf->flags |= XDRBUF_WRITE; - return 0; + encode_writeargs(xdr, args); + xdr->buf->flags |= XDRBUF_WRITE; } /* @@ -696,25 +678,19 @@ static int nfs2_xdr_enc_writeargs(struct rpc_rqst *req, __be32 *p, * sattr attributes; * }; */ -static int nfs2_xdr_enc_createargs(struct rpc_rqst *req, __be32 *p, - const struct nfs_createargs *args) +static void nfs2_xdr_enc_createargs(struct rpc_rqst *req, + struct xdr_stream *xdr, + const struct nfs_createargs *args) { - struct xdr_stream xdr; - - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_diropargs(&xdr, args->fh, args->name, args->len); - encode_sattr(&xdr, args->sattr); - return 0; + encode_diropargs(xdr, args->fh, args->name, args->len); + encode_sattr(xdr, args->sattr); } -static int nfs2_xdr_enc_removeargs(struct rpc_rqst *req, __be32 *p, - const struct nfs_removeargs *args) +static void nfs2_xdr_enc_removeargs(struct rpc_rqst *req, + struct xdr_stream *xdr, + const struct nfs_removeargs *args) { - struct xdr_stream xdr; - - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_diropargs(&xdr, args->fh, args->name.name, args->name.len); - return 0; + encode_diropargs(xdr, args->fh, args->name.name, args->name.len); } /* @@ -725,17 +701,15 @@ static int nfs2_xdr_enc_removeargs(struct rpc_rqst *req, __be32 *p, * diropargs to; * }; */ -static int nfs2_xdr_enc_renameargs(struct rpc_rqst *req, __be32 *p, - const struct nfs_renameargs *args) +static void nfs2_xdr_enc_renameargs(struct rpc_rqst *req, + struct xdr_stream *xdr, + const struct nfs_renameargs *args) { const struct qstr *old = args->old_name; const struct qstr *new = args->new_name; - struct xdr_stream xdr; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_diropargs(&xdr, args->old_dir, old->name, old->len); - encode_diropargs(&xdr, args->new_dir, new->name, new->len); - return 0; + encode_diropargs(xdr, args->old_dir, old->name, old->len); + encode_diropargs(xdr, args->new_dir, new->name, new->len); } /* @@ -746,15 +720,12 @@ static int nfs2_xdr_enc_renameargs(struct rpc_rqst *req, __be32 *p, * diropargs to; * }; */ -static int nfs2_xdr_enc_linkargs(struct rpc_rqst *req, __be32 *p, - const struct nfs_linkargs *args) +static void nfs2_xdr_enc_linkargs(struct rpc_rqst *req, + struct xdr_stream *xdr, + const struct nfs_linkargs *args) { - struct xdr_stream xdr; - - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_fhandle(&xdr, args->fromfh); - encode_diropargs(&xdr, args->tofh, args->toname, args->tolen); - return 0; + encode_fhandle(xdr, args->fromfh); + encode_diropargs(xdr, args->tofh, args->toname, args->tolen); } /* @@ -766,16 +737,13 @@ static int nfs2_xdr_enc_linkargs(struct rpc_rqst *req, __be32 *p, * sattr attributes; * }; */ -static int nfs2_xdr_enc_symlinkargs(struct rpc_rqst *req, __be32 *p, - const struct nfs_symlinkargs *args) +static void nfs2_xdr_enc_symlinkargs(struct rpc_rqst *req, + struct xdr_stream *xdr, + const struct nfs_symlinkargs *args) { - struct xdr_stream xdr; - - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_diropargs(&xdr, args->fromfh, args->fromname, args->fromlen); - encode_path(&xdr, args->pages, args->pathlen); - encode_sattr(&xdr, args->sattr); - return 0; + encode_diropargs(xdr, args->fromfh, args->fromname, args->fromlen); + encode_path(xdr, args->pages, args->pathlen); + encode_sattr(xdr, args->sattr); } /* @@ -799,16 +767,13 @@ static void encode_readdirargs(struct xdr_stream *xdr, *p = cpu_to_be32(args->count); } -static int nfs2_xdr_enc_readdirargs(struct rpc_rqst *req, __be32 *p, - const struct nfs_readdirargs *args) +static void nfs2_xdr_enc_readdirargs(struct rpc_rqst *req, + struct xdr_stream *xdr, + const struct nfs_readdirargs *args) { - struct xdr_stream xdr; - - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_readdirargs(&xdr, args); + encode_readdirargs(xdr, args); prepare_reply_buffer(req, args->pages, 0, args->count, NFS_readdirres_sz); - return 0; } /* @@ -1184,7 +1149,7 @@ int nfs_stat_to_errno(enum nfs_stat status) #define PROC(proc, argtype, restype, timer) \ [NFSPROC_##proc] = { \ .p_proc = NFSPROC_##proc, \ - .p_encode = (kxdrproc_t)nfs2_xdr_enc_##argtype, \ + .p_encode = (kxdreproc_t)nfs2_xdr_enc_##argtype, \ .p_decode = (kxdrproc_t)nfs2_xdr_dec_##restype, \ .p_arglen = NFS_##argtype##_sz, \ .p_replen = NFS_##restype##_sz, \ diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c index 15c93ccd90c5..ae1b1a43f05e 100644 --- a/fs/nfs/nfs3xdr.c +++ b/fs/nfs/nfs3xdr.c @@ -835,14 +835,11 @@ static void encode_diropargs3(struct xdr_stream *xdr, const struct nfs_fh *fh, * nfs_fh3 object; * }; */ -static int nfs3_xdr_enc_getattr3args(struct rpc_rqst *req, __be32 *p, - const struct nfs_fh *fh) +static void nfs3_xdr_enc_getattr3args(struct rpc_rqst *req, + struct xdr_stream *xdr, + const struct nfs_fh *fh) { - struct xdr_stream xdr; - - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_nfs_fh3(&xdr, fh); - return 0; + encode_nfs_fh3(xdr, fh); } /* @@ -876,16 +873,13 @@ static void encode_sattrguard3(struct xdr_stream *xdr, } } -static int nfs3_xdr_enc_setattr3args(struct rpc_rqst *req, __be32 *p, - const struct nfs3_sattrargs *args) +static void nfs3_xdr_enc_setattr3args(struct rpc_rqst *req, + struct xdr_stream *xdr, + const struct nfs3_sattrargs *args) { - struct xdr_stream xdr; - - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_nfs_fh3(&xdr, args->fh); - encode_sattr3(&xdr, args->sattr); - encode_sattrguard3(&xdr, args); - return 0; + encode_nfs_fh3(xdr, args->fh); + encode_sattr3(xdr, args->sattr); + encode_sattrguard3(xdr, args); } /* @@ -895,14 +889,11 @@ static int nfs3_xdr_enc_setattr3args(struct rpc_rqst *req, __be32 *p, * diropargs3 what; * }; */ -static int nfs3_xdr_enc_lookup3args(struct rpc_rqst *req, __be32 *p, - const struct nfs3_diropargs *args) +static void nfs3_xdr_enc_lookup3args(struct rpc_rqst *req, + struct xdr_stream *xdr, + const struct nfs3_diropargs *args) { - struct xdr_stream xdr; - - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_diropargs3(&xdr, args->fh, args->name, args->len); - return 0; + encode_diropargs3(xdr, args->fh, args->name, args->len); } /* @@ -920,14 +911,11 @@ static void encode_access3args(struct xdr_stream *xdr, encode_uint32(xdr, args->access); } -static int nfs3_xdr_enc_access3args(struct rpc_rqst *req, __be32 *p, - const struct nfs3_accessargs *args) +static void nfs3_xdr_enc_access3args(struct rpc_rqst *req, + struct xdr_stream *xdr, + const struct nfs3_accessargs *args) { - struct xdr_stream xdr; - - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_access3args(&xdr, args); - return 0; + encode_access3args(xdr, args); } /* @@ -937,16 +925,13 @@ static int nfs3_xdr_enc_access3args(struct rpc_rqst *req, __be32 *p, * nfs_fh3 symlink; * }; */ -static int nfs3_xdr_enc_readlink3args(struct rpc_rqst *req, __be32 *p, - const struct nfs3_readlinkargs *args) +static void nfs3_xdr_enc_readlink3args(struct rpc_rqst *req, + struct xdr_stream *xdr, + const struct nfs3_readlinkargs *args) { - struct xdr_stream xdr; - - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_nfs_fh3(&xdr, args->fh); + encode_nfs_fh3(xdr, args->fh); prepare_reply_buffer(req, args->pages, args->pgbase, args->pglen, NFS3_readlinkres_sz); - return 0; } /* @@ -970,17 +955,14 @@ static void encode_read3args(struct xdr_stream *xdr, *p = cpu_to_be32(args->count); } -static int nfs3_xdr_enc_read3args(struct rpc_rqst *req, __be32 *p, - const struct nfs_readargs *args) +static void nfs3_xdr_enc_read3args(struct rpc_rqst *req, + struct xdr_stream *xdr, + const struct nfs_readargs *args) { - struct xdr_stream xdr; - - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_read3args(&xdr, args); + encode_read3args(xdr, args); prepare_reply_buffer(req, args->pages, args->pgbase, args->count, NFS3_readres_sz); req->rq_rcv_buf.flags |= XDRBUF_READ; - return 0; } /* @@ -1015,15 +997,12 @@ static void encode_write3args(struct xdr_stream *xdr, xdr_write_pages(xdr, args->pages, args->pgbase, args->count); } -static int nfs3_xdr_enc_write3args(struct rpc_rqst *req, __be32 *p, - const struct nfs_writeargs *args) +static void nfs3_xdr_enc_write3args(struct rpc_rqst *req, + struct xdr_stream *xdr, + const struct nfs_writeargs *args) { - struct xdr_stream xdr; - - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_write3args(&xdr, args); - xdr.buf->flags |= XDRBUF_WRITE; - return 0; + encode_write3args(xdr, args); + xdr->buf->flags |= XDRBUF_WRITE; } /* @@ -1065,15 +1044,12 @@ static void encode_createhow3(struct xdr_stream *xdr, } } -static int nfs3_xdr_enc_create3args(struct rpc_rqst *req, __be32 *p, - const struct nfs3_createargs *args) +static void nfs3_xdr_enc_create3args(struct rpc_rqst *req, + struct xdr_stream *xdr, + const struct nfs3_createargs *args) { - struct xdr_stream xdr; - - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_diropargs3(&xdr, args->fh, args->name, args->len); - encode_createhow3(&xdr, args); - return 0; + encode_diropargs3(xdr, args->fh, args->name, args->len); + encode_createhow3(xdr, args); } /* @@ -1084,15 +1060,12 @@ static int nfs3_xdr_enc_create3args(struct rpc_rqst *req, __be32 *p, * sattr3 attributes; * }; */ -static int nfs3_xdr_enc_mkdir3args(struct rpc_rqst *req, __be32 *p, - const struct nfs3_mkdirargs *args) +static void nfs3_xdr_enc_mkdir3args(struct rpc_rqst *req, + struct xdr_stream *xdr, + const struct nfs3_mkdirargs *args) { - struct xdr_stream xdr; - - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_diropargs3(&xdr, args->fh, args->name, args->len); - encode_sattr3(&xdr, args->sattr); - return 0; + encode_diropargs3(xdr, args->fh, args->name, args->len); + encode_sattr3(xdr, args->sattr); } /* @@ -1115,15 +1088,12 @@ static void encode_symlinkdata3(struct xdr_stream *xdr, encode_nfspath3(xdr, args->pages, args->pathlen); } -static int nfs3_xdr_enc_symlink3args(struct rpc_rqst *req, __be32 *p, - const struct nfs3_symlinkargs *args) +static void nfs3_xdr_enc_symlink3args(struct rpc_rqst *req, + struct xdr_stream *xdr, + const struct nfs3_symlinkargs *args) { - struct xdr_stream xdr; - - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_diropargs3(&xdr, args->fromfh, args->fromname, args->fromlen); - encode_symlinkdata3(&xdr, args); - return 0; + encode_diropargs3(xdr, args->fromfh, args->fromname, args->fromlen); + encode_symlinkdata3(xdr, args); } /* @@ -1178,15 +1148,12 @@ static void encode_mknoddata3(struct xdr_stream *xdr, } } -static int nfs3_xdr_enc_mknod3args(struct rpc_rqst *req, __be32 *p, - const struct nfs3_mknodargs *args) +static void nfs3_xdr_enc_mknod3args(struct rpc_rqst *req, + struct xdr_stream *xdr, + const struct nfs3_mknodargs *args) { - struct xdr_stream xdr; - - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_diropargs3(&xdr, args->fh, args->name, args->len); - encode_mknoddata3(&xdr, args); - return 0; + encode_diropargs3(xdr, args->fh, args->name, args->len); + encode_mknoddata3(xdr, args); } /* @@ -1196,14 +1163,11 @@ static int nfs3_xdr_enc_mknod3args(struct rpc_rqst *req, __be32 *p, * diropargs3 object; * }; */ -static int nfs3_xdr_enc_remove3args(struct rpc_rqst *req, __be32 *p, - const struct nfs_removeargs *args) +static void nfs3_xdr_enc_remove3args(struct rpc_rqst *req, + struct xdr_stream *xdr, + const struct nfs_removeargs *args) { - struct xdr_stream xdr; - - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_diropargs3(&xdr, args->fh, args->name.name, args->name.len); - return 0; + encode_diropargs3(xdr, args->fh, args->name.name, args->name.len); } /* @@ -1214,17 +1178,15 @@ static int nfs3_xdr_enc_remove3args(struct rpc_rqst *req, __be32 *p, * diropargs3 to; * }; */ -static int nfs3_xdr_enc_rename3args(struct rpc_rqst *req, __be32 *p, - const struct nfs_renameargs *args) +static void nfs3_xdr_enc_rename3args(struct rpc_rqst *req, + struct xdr_stream *xdr, + const struct nfs_renameargs *args) { const struct qstr *old = args->old_name; const struct qstr *new = args->new_name; - struct xdr_stream xdr; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_diropargs3(&xdr, args->old_dir, old->name, old->len); - encode_diropargs3(&xdr, args->new_dir, new->name, new->len); - return 0; + encode_diropargs3(xdr, args->old_dir, old->name, old->len); + encode_diropargs3(xdr, args->new_dir, new->name, new->len); } /* @@ -1235,15 +1197,12 @@ static int nfs3_xdr_enc_rename3args(struct rpc_rqst *req, __be32 *p, * diropargs3 link; * }; */ -static int nfs3_xdr_enc_link3args(struct rpc_rqst *req, __be32 *p, - const struct nfs3_linkargs *args) +static void nfs3_xdr_enc_link3args(struct rpc_rqst *req, + struct xdr_stream *xdr, + const struct nfs3_linkargs *args) { - struct xdr_stream xdr; - - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_nfs_fh3(&xdr, args->fromfh); - encode_diropargs3(&xdr, args->tofh, args->toname, args->tolen); - return 0; + encode_nfs_fh3(xdr, args->fromfh); + encode_diropargs3(xdr, args->tofh, args->toname, args->tolen); } /* @@ -1269,16 +1228,13 @@ static void encode_readdir3args(struct xdr_stream *xdr, *p = cpu_to_be32(args->count); } -static int nfs3_xdr_enc_readdir3args(struct rpc_rqst *req, __be32 *p, - const struct nfs3_readdirargs *args) +static void nfs3_xdr_enc_readdir3args(struct rpc_rqst *req, + struct xdr_stream *xdr, + const struct nfs3_readdirargs *args) { - struct xdr_stream xdr; - - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_readdir3args(&xdr, args); + encode_readdir3args(xdr, args); prepare_reply_buffer(req, args->pages, 0, args->count, NFS3_readdirres_sz); - return 0; } /* @@ -1312,16 +1268,13 @@ static void encode_readdirplus3args(struct xdr_stream *xdr, *p = cpu_to_be32(args->count); } -static int nfs3_xdr_enc_readdirplus3args(struct rpc_rqst *req, __be32 *p, - const struct nfs3_readdirargs *args) +static void nfs3_xdr_enc_readdirplus3args(struct rpc_rqst *req, + struct xdr_stream *xdr, + const struct nfs3_readdirargs *args) { - struct xdr_stream xdr; - - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_readdirplus3args(&xdr, args); + encode_readdirplus3args(xdr, args); prepare_reply_buffer(req, args->pages, 0, args->count, NFS3_readdirres_sz); - return 0; } /* @@ -1345,57 +1298,49 @@ static void encode_commit3args(struct xdr_stream *xdr, *p = cpu_to_be32(args->count); } -static int nfs3_xdr_enc_commit3args(struct rpc_rqst *req, __be32 *p, - const struct nfs_writeargs *args) +static void nfs3_xdr_enc_commit3args(struct rpc_rqst *req, + struct xdr_stream *xdr, + const struct nfs_writeargs *args) { - struct xdr_stream xdr; - - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_commit3args(&xdr, args); - return 0; + encode_commit3args(xdr, args); } #ifdef CONFIG_NFS_V3_ACL -static int nfs3_xdr_enc_getacl3args(struct rpc_rqst *req, __be32 *p, - const struct nfs3_getaclargs *args) +static void nfs3_xdr_enc_getacl3args(struct rpc_rqst *req, + struct xdr_stream *xdr, + const struct nfs3_getaclargs *args) { - struct xdr_stream xdr; - - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_nfs_fh3(&xdr, args->fh); - encode_uint32(&xdr, args->mask); + encode_nfs_fh3(xdr, args->fh); + encode_uint32(xdr, args->mask); if (args->mask & (NFS_ACL | NFS_DFACL)) prepare_reply_buffer(req, args->pages, 0, NFSACL_MAXPAGES << PAGE_SHIFT, ACL3_getaclres_sz); - return 0; } -static int nfs3_xdr_enc_setacl3args(struct rpc_rqst *req, __be32 *p, - const struct nfs3_setaclargs *args) +static void nfs3_xdr_enc_setacl3args(struct rpc_rqst *req, + struct xdr_stream *xdr, + const struct nfs3_setaclargs *args) { - struct xdr_stream xdr; unsigned int base; int error; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_nfs_fh3(&xdr, NFS_FH(args->inode)); - encode_uint32(&xdr, args->mask); + encode_nfs_fh3(xdr, NFS_FH(args->inode)); + encode_uint32(xdr, args->mask); if (args->npages != 0) - xdr_write_pages(&xdr, args->pages, 0, args->len); + xdr_write_pages(xdr, args->pages, 0, args->len); base = req->rq_slen; - error = nfsacl_encode(xdr.buf, base, args->inode, + error = nfsacl_encode(xdr->buf, base, args->inode, (args->mask & NFS_ACL) ? args->acl_access : NULL, 1, 0); BUG_ON(error < 0); - error = nfsacl_encode(xdr.buf, base + error, args->inode, + error = nfsacl_encode(xdr->buf, base + error, args->inode, (args->mask & NFS_DFACL) ? args->acl_default : NULL, 1, NFS_ACL_DEFAULT); BUG_ON(error < 0); - return 0; } #endif /* CONFIG_NFS_V3_ACL */ @@ -2506,7 +2451,7 @@ out_default: #define PROC(proc, argtype, restype, timer) \ [NFS3PROC_##proc] = { \ .p_proc = NFS3PROC_##proc, \ - .p_encode = (kxdrproc_t)nfs3_xdr_enc_##argtype##3args, \ + .p_encode = (kxdreproc_t)nfs3_xdr_enc_##argtype##3args, \ .p_decode = (kxdrproc_t)nfs3_xdr_dec_##restype##3res, \ .p_arglen = NFS3_##argtype##args_sz, \ .p_replen = NFS3_##restype##res_sz, \ @@ -2549,7 +2494,7 @@ struct rpc_version nfs_version3 = { static struct rpc_procinfo nfs3_acl_procedures[] = { [ACLPROC3_GETACL] = { .p_proc = ACLPROC3_GETACL, - .p_encode = (kxdrproc_t)nfs3_xdr_enc_getacl3args, + .p_encode = (kxdreproc_t)nfs3_xdr_enc_getacl3args, .p_decode = (kxdrproc_t)nfs3_xdr_dec_getacl3res, .p_arglen = ACL3_getaclargs_sz, .p_replen = ACL3_getaclres_sz, @@ -2558,7 +2503,7 @@ static struct rpc_procinfo nfs3_acl_procedures[] = { }, [ACLPROC3_SETACL] = { .p_proc = ACLPROC3_SETACL, - .p_encode = (kxdrproc_t)nfs3_xdr_enc_setacl3args, + .p_encode = (kxdreproc_t)nfs3_xdr_enc_setacl3args, .p_decode = (kxdrproc_t)nfs3_xdr_dec_setacl3res, .p_arglen = ACL3_setaclargs_sz, .p_replen = ACL3_setaclres_sz, diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index a15fe99fea86..6ec38b3e4a3d 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -1510,7 +1510,7 @@ encode_restorefh(struct xdr_stream *xdr, struct compound_hdr *hdr) hdr->replen += decode_restorefh_maxsz; } -static int +static void encode_setacl(struct xdr_stream *xdr, struct nfs_setaclargs *arg, struct compound_hdr *hdr) { __be32 *p; @@ -1521,14 +1521,12 @@ encode_setacl(struct xdr_stream *xdr, struct nfs_setaclargs *arg, struct compoun p = reserve_space(xdr, 2*4); *p++ = cpu_to_be32(1); *p = cpu_to_be32(FATTR4_WORD0_ACL); - if (arg->acl_len % 4) - return -EINVAL; + BUG_ON(arg->acl_len % 4); p = reserve_space(xdr, 4); *p = cpu_to_be32(arg->acl_len); xdr_write_pages(xdr, arg->acl_pages, arg->acl_pgbase, arg->acl_len); hdr->nops++; hdr->replen += decode_setacl_maxsz; - return 0; } static void @@ -1833,393 +1831,362 @@ static u32 nfs4_xdr_minorversion(const struct nfs4_sequence_args *args) /* * Encode an ACCESS request */ -static int nfs4_xdr_enc_access(struct rpc_rqst *req, __be32 *p, const struct nfs4_accessargs *args) +static void nfs4_xdr_enc_access(struct rpc_rqst *req, struct xdr_stream *xdr, + const struct nfs4_accessargs *args) { - struct xdr_stream xdr; struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_compound_hdr(&xdr, req, &hdr); - encode_sequence(&xdr, &args->seq_args, &hdr); - encode_putfh(&xdr, args->fh, &hdr); - encode_access(&xdr, args->access, &hdr); - encode_getfattr(&xdr, args->bitmask, &hdr); + encode_compound_hdr(xdr, req, &hdr); + encode_sequence(xdr, &args->seq_args, &hdr); + encode_putfh(xdr, args->fh, &hdr); + encode_access(xdr, args->access, &hdr); + encode_getfattr(xdr, args->bitmask, &hdr); encode_nops(&hdr); - return 0; } /* * Encode LOOKUP request */ -static int nfs4_xdr_enc_lookup(struct rpc_rqst *req, __be32 *p, const struct nfs4_lookup_arg *args) +static void nfs4_xdr_enc_lookup(struct rpc_rqst *req, struct xdr_stream *xdr, + const struct nfs4_lookup_arg *args) { - struct xdr_stream xdr; struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_compound_hdr(&xdr, req, &hdr); - encode_sequence(&xdr, &args->seq_args, &hdr); - encode_putfh(&xdr, args->dir_fh, &hdr); - encode_lookup(&xdr, args->name, &hdr); - encode_getfh(&xdr, &hdr); - encode_getfattr(&xdr, args->bitmask, &hdr); + encode_compound_hdr(xdr, req, &hdr); + encode_sequence(xdr, &args->seq_args, &hdr); + encode_putfh(xdr, args->dir_fh, &hdr); + encode_lookup(xdr, args->name, &hdr); + encode_getfh(xdr, &hdr); + encode_getfattr(xdr, args->bitmask, &hdr); encode_nops(&hdr); - return 0; } /* * Encode LOOKUP_ROOT request */ -static int nfs4_xdr_enc_lookup_root(struct rpc_rqst *req, __be32 *p, const struct nfs4_lookup_root_arg *args) +static void nfs4_xdr_enc_lookup_root(struct rpc_rqst *req, + struct xdr_stream *xdr, + const struct nfs4_lookup_root_arg *args) { - struct xdr_stream xdr; struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_compound_hdr(&xdr, req, &hdr); - encode_sequence(&xdr, &args->seq_args, &hdr); - encode_putrootfh(&xdr, &hdr); - encode_getfh(&xdr, &hdr); - encode_getfattr(&xdr, args->bitmask, &hdr); + encode_compound_hdr(xdr, req, &hdr); + encode_sequence(xdr, &args->seq_args, &hdr); + encode_putrootfh(xdr, &hdr); + encode_getfh(xdr, &hdr); + encode_getfattr(xdr, args->bitmask, &hdr); encode_nops(&hdr); - return 0; } /* * Encode REMOVE request */ -static int nfs4_xdr_enc_remove(struct rpc_rqst *req, __be32 *p, const struct nfs_removeargs *args) +static void nfs4_xdr_enc_remove(struct rpc_rqst *req, struct xdr_stream *xdr, + const struct nfs_removeargs *args) { - struct xdr_stream xdr; struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_compound_hdr(&xdr, req, &hdr); - encode_sequence(&xdr, &args->seq_args, &hdr); - encode_putfh(&xdr, args->fh, &hdr); - encode_remove(&xdr, &args->name, &hdr); - encode_getfattr(&xdr, args->bitmask, &hdr); + encode_compound_hdr(xdr, req, &hdr); + encode_sequence(xdr, &args->seq_args, &hdr); + encode_putfh(xdr, args->fh, &hdr); + encode_remove(xdr, &args->name, &hdr); + encode_getfattr(xdr, args->bitmask, &hdr); encode_nops(&hdr); - return 0; } /* * Encode RENAME request */ -static int nfs4_xdr_enc_rename(struct rpc_rqst *req, __be32 *p, const struct nfs_renameargs *args) +static void nfs4_xdr_enc_rename(struct rpc_rqst *req, struct xdr_stream *xdr, + const struct nfs_renameargs *args) { - struct xdr_stream xdr; struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_compound_hdr(&xdr, req, &hdr); - encode_sequence(&xdr, &args->seq_args, &hdr); - encode_putfh(&xdr, args->old_dir, &hdr); - encode_savefh(&xdr, &hdr); - encode_putfh(&xdr, args->new_dir, &hdr); - encode_rename(&xdr, args->old_name, args->new_name, &hdr); - encode_getfattr(&xdr, args->bitmask, &hdr); - encode_restorefh(&xdr, &hdr); - encode_getfattr(&xdr, args->bitmask, &hdr); + encode_compound_hdr(xdr, req, &hdr); + encode_sequence(xdr, &args->seq_args, &hdr); + encode_putfh(xdr, args->old_dir, &hdr); + encode_savefh(xdr, &hdr); + encode_putfh(xdr, args->new_dir, &hdr); + encode_rename(xdr, args->old_name, args->new_name, &hdr); + encode_getfattr(xdr, args->bitmask, &hdr); + encode_restorefh(xdr, &hdr); + encode_getfattr(xdr, args->bitmask, &hdr); encode_nops(&hdr); - return 0; } /* * Encode LINK request */ -static int nfs4_xdr_enc_link(struct rpc_rqst *req, __be32 *p, const struct nfs4_link_arg *args) +static void nfs4_xdr_enc_link(struct rpc_rqst *req, struct xdr_stream *xdr, + const struct nfs4_link_arg *args) { - struct xdr_stream xdr; struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_compound_hdr(&xdr, req, &hdr); - encode_sequence(&xdr, &args->seq_args, &hdr); - encode_putfh(&xdr, args->fh, &hdr); - encode_savefh(&xdr, &hdr); - encode_putfh(&xdr, args->dir_fh, &hdr); - encode_link(&xdr, args->name, &hdr); - encode_getfattr(&xdr, args->bitmask, &hdr); - encode_restorefh(&xdr, &hdr); - encode_getfattr(&xdr, args->bitmask, &hdr); + encode_compound_hdr(xdr, req, &hdr); + encode_sequence(xdr, &args->seq_args, &hdr); + encode_putfh(xdr, args->fh, &hdr); + encode_savefh(xdr, &hdr); + encode_putfh(xdr, args->dir_fh, &hdr); + encode_link(xdr, args->name, &hdr); + encode_getfattr(xdr, args->bitmask, &hdr); + encode_restorefh(xdr, &hdr); + encode_getfattr(xdr, args->bitmask, &hdr); encode_nops(&hdr); - return 0; } /* * Encode CREATE request */ -static int nfs4_xdr_enc_create(struct rpc_rqst *req, __be32 *p, const struct nfs4_create_arg *args) +static void nfs4_xdr_enc_create(struct rpc_rqst *req, struct xdr_stream *xdr, + const struct nfs4_create_arg *args) { - struct xdr_stream xdr; struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_compound_hdr(&xdr, req, &hdr); - encode_sequence(&xdr, &args->seq_args, &hdr); - encode_putfh(&xdr, args->dir_fh, &hdr); - encode_savefh(&xdr, &hdr); - encode_create(&xdr, args, &hdr); - encode_getfh(&xdr, &hdr); - encode_getfattr(&xdr, args->bitmask, &hdr); - encode_restorefh(&xdr, &hdr); - encode_getfattr(&xdr, args->bitmask, &hdr); + encode_compound_hdr(xdr, req, &hdr); + encode_sequence(xdr, &args->seq_args, &hdr); + encode_putfh(xdr, args->dir_fh, &hdr); + encode_savefh(xdr, &hdr); + encode_create(xdr, args, &hdr); + encode_getfh(xdr, &hdr); + encode_getfattr(xdr, args->bitmask, &hdr); + encode_restorefh(xdr, &hdr); + encode_getfattr(xdr, args->bitmask, &hdr); encode_nops(&hdr); - return 0; } /* * Encode SYMLINK request */ -static int nfs4_xdr_enc_symlink(struct rpc_rqst *req, __be32 *p, const struct nfs4_create_arg *args) +static void nfs4_xdr_enc_symlink(struct rpc_rqst *req, struct xdr_stream *xdr, + const struct nfs4_create_arg *args) { - return nfs4_xdr_enc_create(req, p, args); + nfs4_xdr_enc_create(req, xdr, args); } /* * Encode GETATTR request */ -static int nfs4_xdr_enc_getattr(struct rpc_rqst *req, __be32 *p, const struct nfs4_getattr_arg *args) +static void nfs4_xdr_enc_getattr(struct rpc_rqst *req, struct xdr_stream *xdr, + const struct nfs4_getattr_arg *args) { - struct xdr_stream xdr; struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_compound_hdr(&xdr, req, &hdr); - encode_sequence(&xdr, &args->seq_args, &hdr); - encode_putfh(&xdr, args->fh, &hdr); - encode_getfattr(&xdr, args->bitmask, &hdr); + encode_compound_hdr(xdr, req, &hdr); + encode_sequence(xdr, &args->seq_args, &hdr); + encode_putfh(xdr, args->fh, &hdr); + encode_getfattr(xdr, args->bitmask, &hdr); encode_nops(&hdr); - return 0; } /* * Encode a CLOSE request */ -static int nfs4_xdr_enc_close(struct rpc_rqst *req, __be32 *p, struct nfs_closeargs *args) +static void nfs4_xdr_enc_close(struct rpc_rqst *req, struct xdr_stream *xdr, + struct nfs_closeargs *args) { - struct xdr_stream xdr; struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_compound_hdr(&xdr, req, &hdr); - encode_sequence(&xdr, &args->seq_args, &hdr); - encode_putfh(&xdr, args->fh, &hdr); - encode_close(&xdr, args, &hdr); - encode_getfattr(&xdr, args->bitmask, &hdr); + encode_compound_hdr(xdr, req, &hdr); + encode_sequence(xdr, &args->seq_args, &hdr); + encode_putfh(xdr, args->fh, &hdr); + encode_close(xdr, args, &hdr); + encode_getfattr(xdr, args->bitmask, &hdr); encode_nops(&hdr); - return 0; } /* * Encode an OPEN request */ -static int nfs4_xdr_enc_open(struct rpc_rqst *req, __be32 *p, struct nfs_openargs *args) +static void nfs4_xdr_enc_open(struct rpc_rqst *req, struct xdr_stream *xdr, + struct nfs_openargs *args) { - struct xdr_stream xdr; struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_compound_hdr(&xdr, req, &hdr); - encode_sequence(&xdr, &args->seq_args, &hdr); - encode_putfh(&xdr, args->fh, &hdr); - encode_savefh(&xdr, &hdr); - encode_open(&xdr, args, &hdr); - encode_getfh(&xdr, &hdr); - encode_getfattr(&xdr, args->bitmask, &hdr); - encode_restorefh(&xdr, &hdr); - encode_getfattr(&xdr, args->bitmask, &hdr); + encode_compound_hdr(xdr, req, &hdr); + encode_sequence(xdr, &args->seq_args, &hdr); + encode_putfh(xdr, args->fh, &hdr); + encode_savefh(xdr, &hdr); + encode_open(xdr, args, &hdr); + encode_getfh(xdr, &hdr); + encode_getfattr(xdr, args->bitmask, &hdr); + encode_restorefh(xdr, &hdr); + encode_getfattr(xdr, args->bitmask, &hdr); encode_nops(&hdr); - return 0; } /* * Encode an OPEN_CONFIRM request */ -static int nfs4_xdr_enc_open_confirm(struct rpc_rqst *req, __be32 *p, struct nfs_open_confirmargs *args) +static void nfs4_xdr_enc_open_confirm(struct rpc_rqst *req, + struct xdr_stream *xdr, + struct nfs_open_confirmargs *args) { - struct xdr_stream xdr; struct compound_hdr hdr = { .nops = 0, }; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_compound_hdr(&xdr, req, &hdr); - encode_putfh(&xdr, args->fh, &hdr); - encode_open_confirm(&xdr, args, &hdr); + encode_compound_hdr(xdr, req, &hdr); + encode_putfh(xdr, args->fh, &hdr); + encode_open_confirm(xdr, args, &hdr); encode_nops(&hdr); - return 0; } /* * Encode an OPEN request with no attributes. */ -static int nfs4_xdr_enc_open_noattr(struct rpc_rqst *req, __be32 *p, struct nfs_openargs *args) +static void nfs4_xdr_enc_open_noattr(struct rpc_rqst *req, + struct xdr_stream *xdr, + struct nfs_openargs *args) { - struct xdr_stream xdr; struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_compound_hdr(&xdr, req, &hdr); - encode_sequence(&xdr, &args->seq_args, &hdr); - encode_putfh(&xdr, args->fh, &hdr); - encode_open(&xdr, args, &hdr); - encode_getfattr(&xdr, args->bitmask, &hdr); + encode_compound_hdr(xdr, req, &hdr); + encode_sequence(xdr, &args->seq_args, &hdr); + encode_putfh(xdr, args->fh, &hdr); + encode_open(xdr, args, &hdr); + encode_getfattr(xdr, args->bitmask, &hdr); encode_nops(&hdr); - return 0; } /* * Encode an OPEN_DOWNGRADE request */ -static int nfs4_xdr_enc_open_downgrade(struct rpc_rqst *req, __be32 *p, struct nfs_closeargs *args) +static void nfs4_xdr_enc_open_downgrade(struct rpc_rqst *req, + struct xdr_stream *xdr, + struct nfs_closeargs *args) { - struct xdr_stream xdr; struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_compound_hdr(&xdr, req, &hdr); - encode_sequence(&xdr, &args->seq_args, &hdr); - encode_putfh(&xdr, args->fh, &hdr); - encode_open_downgrade(&xdr, args, &hdr); - encode_getfattr(&xdr, args->bitmask, &hdr); + encode_compound_hdr(xdr, req, &hdr); + encode_sequence(xdr, &args->seq_args, &hdr); + encode_putfh(xdr, args->fh, &hdr); + encode_open_downgrade(xdr, args, &hdr); + encode_getfattr(xdr, args->bitmask, &hdr); encode_nops(&hdr); - return 0; } /* * Encode a LOCK request */ -static int nfs4_xdr_enc_lock(struct rpc_rqst *req, __be32 *p, struct nfs_lock_args *args) +static void nfs4_xdr_enc_lock(struct rpc_rqst *req, struct xdr_stream *xdr, + struct nfs_lock_args *args) { - struct xdr_stream xdr; struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_compound_hdr(&xdr, req, &hdr); - encode_sequence(&xdr, &args->seq_args, &hdr); - encode_putfh(&xdr, args->fh, &hdr); - encode_lock(&xdr, args, &hdr); + encode_compound_hdr(xdr, req, &hdr); + encode_sequence(xdr, &args->seq_args, &hdr); + encode_putfh(xdr, args->fh, &hdr); + encode_lock(xdr, args, &hdr); encode_nops(&hdr); - return 0; } /* * Encode a LOCKT request */ -static int nfs4_xdr_enc_lockt(struct rpc_rqst *req, __be32 *p, struct nfs_lockt_args *args) +static void nfs4_xdr_enc_lockt(struct rpc_rqst *req, struct xdr_stream *xdr, + struct nfs_lockt_args *args) { - struct xdr_stream xdr; struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_compound_hdr(&xdr, req, &hdr); - encode_sequence(&xdr, &args->seq_args, &hdr); - encode_putfh(&xdr, args->fh, &hdr); - encode_lockt(&xdr, args, &hdr); + encode_compound_hdr(xdr, req, &hdr); + encode_sequence(xdr, &args->seq_args, &hdr); + encode_putfh(xdr, args->fh, &hdr); + encode_lockt(xdr, args, &hdr); encode_nops(&hdr); - return 0; } /* * Encode a LOCKU request */ -static int nfs4_xdr_enc_locku(struct rpc_rqst *req, __be32 *p, struct nfs_locku_args *args) +static void nfs4_xdr_enc_locku(struct rpc_rqst *req, struct xdr_stream *xdr, + struct nfs_locku_args *args) { - struct xdr_stream xdr; struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_compound_hdr(&xdr, req, &hdr); - encode_sequence(&xdr, &args->seq_args, &hdr); - encode_putfh(&xdr, args->fh, &hdr); - encode_locku(&xdr, args, &hdr); + encode_compound_hdr(xdr, req, &hdr); + encode_sequence(xdr, &args->seq_args, &hdr); + encode_putfh(xdr, args->fh, &hdr); + encode_locku(xdr, args, &hdr); encode_nops(&hdr); - return 0; } -static int nfs4_xdr_enc_release_lockowner(struct rpc_rqst *req, __be32 *p, struct nfs_release_lockowner_args *args) +static void nfs4_xdr_enc_release_lockowner(struct rpc_rqst *req, + struct xdr_stream *xdr, + struct nfs_release_lockowner_args *args) { - struct xdr_stream xdr; struct compound_hdr hdr = { .minorversion = 0, }; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_compound_hdr(&xdr, req, &hdr); - encode_release_lockowner(&xdr, &args->lock_owner, &hdr); + encode_compound_hdr(xdr, req, &hdr); + encode_release_lockowner(xdr, &args->lock_owner, &hdr); encode_nops(&hdr); - return 0; } /* * Encode a READLINK request */ -static int nfs4_xdr_enc_readlink(struct rpc_rqst *req, __be32 *p, const struct nfs4_readlink *args) +static void nfs4_xdr_enc_readlink(struct rpc_rqst *req, struct xdr_stream *xdr, + const struct nfs4_readlink *args) { - struct xdr_stream xdr; struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_compound_hdr(&xdr, req, &hdr); - encode_sequence(&xdr, &args->seq_args, &hdr); - encode_putfh(&xdr, args->fh, &hdr); - encode_readlink(&xdr, args, req, &hdr); + encode_compound_hdr(xdr, req, &hdr); + encode_sequence(xdr, &args->seq_args, &hdr); + encode_putfh(xdr, args->fh, &hdr); + encode_readlink(xdr, args, req, &hdr); xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2, args->pages, args->pgbase, args->pglen); encode_nops(&hdr); - return 0; } /* * Encode a READDIR request */ -static int nfs4_xdr_enc_readdir(struct rpc_rqst *req, __be32 *p, const struct nfs4_readdir_arg *args) +static void nfs4_xdr_enc_readdir(struct rpc_rqst *req, struct xdr_stream *xdr, + const struct nfs4_readdir_arg *args) { - struct xdr_stream xdr; struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_compound_hdr(&xdr, req, &hdr); - encode_sequence(&xdr, &args->seq_args, &hdr); - encode_putfh(&xdr, args->fh, &hdr); - encode_readdir(&xdr, args, req, &hdr); + encode_compound_hdr(xdr, req, &hdr); + encode_sequence(xdr, &args->seq_args, &hdr); + encode_putfh(xdr, args->fh, &hdr); + encode_readdir(xdr, args, req, &hdr); xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2, args->pages, args->pgbase, args->count); @@ -2227,428 +2194,387 @@ static int nfs4_xdr_enc_readdir(struct rpc_rqst *req, __be32 *p, const struct nf __func__, hdr.replen << 2, args->pages, args->pgbase, args->count); encode_nops(&hdr); - return 0; } /* * Encode a READ request */ -static int nfs4_xdr_enc_read(struct rpc_rqst *req, __be32 *p, struct nfs_readargs *args) +static void nfs4_xdr_enc_read(struct rpc_rqst *req, struct xdr_stream *xdr, + struct nfs_readargs *args) { - struct xdr_stream xdr; struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_compound_hdr(&xdr, req, &hdr); - encode_sequence(&xdr, &args->seq_args, &hdr); - encode_putfh(&xdr, args->fh, &hdr); - encode_read(&xdr, args, &hdr); + encode_compound_hdr(xdr, req, &hdr); + encode_sequence(xdr, &args->seq_args, &hdr); + encode_putfh(xdr, args->fh, &hdr); + encode_read(xdr, args, &hdr); xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2, args->pages, args->pgbase, args->count); req->rq_rcv_buf.flags |= XDRBUF_READ; encode_nops(&hdr); - return 0; } /* * Encode an SETATTR request */ -static int nfs4_xdr_enc_setattr(struct rpc_rqst *req, __be32 *p, struct nfs_setattrargs *args) +static void nfs4_xdr_enc_setattr(struct rpc_rqst *req, struct xdr_stream *xdr, + struct nfs_setattrargs *args) { - struct xdr_stream xdr; struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_compound_hdr(&xdr, req, &hdr); - encode_sequence(&xdr, &args->seq_args, &hdr); - encode_putfh(&xdr, args->fh, &hdr); - encode_setattr(&xdr, args, args->server, &hdr); - encode_getfattr(&xdr, args->bitmask, &hdr); + encode_compound_hdr(xdr, req, &hdr); + encode_sequence(xdr, &args->seq_args, &hdr); + encode_putfh(xdr, args->fh, &hdr); + encode_setattr(xdr, args, args->server, &hdr); + encode_getfattr(xdr, args->bitmask, &hdr); encode_nops(&hdr); - return 0; } /* * Encode a GETACL request */ -static int -nfs4_xdr_enc_getacl(struct rpc_rqst *req, __be32 *p, - struct nfs_getaclargs *args) +static void nfs4_xdr_enc_getacl(struct rpc_rqst *req, struct xdr_stream *xdr, + struct nfs_getaclargs *args) { - struct xdr_stream xdr; struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; uint32_t replen; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_compound_hdr(&xdr, req, &hdr); - encode_sequence(&xdr, &args->seq_args, &hdr); - encode_putfh(&xdr, args->fh, &hdr); + encode_compound_hdr(xdr, req, &hdr); + encode_sequence(xdr, &args->seq_args, &hdr); + encode_putfh(xdr, args->fh, &hdr); replen = hdr.replen + op_decode_hdr_maxsz + nfs4_fattr_bitmap_maxsz + 1; - encode_getattr_two(&xdr, FATTR4_WORD0_ACL, 0, &hdr); + encode_getattr_two(xdr, FATTR4_WORD0_ACL, 0, &hdr); xdr_inline_pages(&req->rq_rcv_buf, replen << 2, args->acl_pages, args->acl_pgbase, args->acl_len); encode_nops(&hdr); - return 0; } /* * Encode a WRITE request */ -static int nfs4_xdr_enc_write(struct rpc_rqst *req, __be32 *p, struct nfs_writeargs *args) +static void nfs4_xdr_enc_write(struct rpc_rqst *req, struct xdr_stream *xdr, + struct nfs_writeargs *args) { - struct xdr_stream xdr; struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_compound_hdr(&xdr, req, &hdr); - encode_sequence(&xdr, &args->seq_args, &hdr); - encode_putfh(&xdr, args->fh, &hdr); - encode_write(&xdr, args, &hdr); + encode_compound_hdr(xdr, req, &hdr); + encode_sequence(xdr, &args->seq_args, &hdr); + encode_putfh(xdr, args->fh, &hdr); + encode_write(xdr, args, &hdr); req->rq_snd_buf.flags |= XDRBUF_WRITE; - encode_getfattr(&xdr, args->bitmask, &hdr); + encode_getfattr(xdr, args->bitmask, &hdr); encode_nops(&hdr); - return 0; } /* * a COMMIT request */ -static int nfs4_xdr_enc_commit(struct rpc_rqst *req, __be32 *p, struct nfs_writeargs *args) +static void nfs4_xdr_enc_commit(struct rpc_rqst *req, struct xdr_stream *xdr, + struct nfs_writeargs *args) { - struct xdr_stream xdr; struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_compound_hdr(&xdr, req, &hdr); - encode_sequence(&xdr, &args->seq_args, &hdr); - encode_putfh(&xdr, args->fh, &hdr); - encode_commit(&xdr, args, &hdr); - encode_getfattr(&xdr, args->bitmask, &hdr); + encode_compound_hdr(xdr, req, &hdr); + encode_sequence(xdr, &args->seq_args, &hdr); + encode_putfh(xdr, args->fh, &hdr); + encode_commit(xdr, args, &hdr); + encode_getfattr(xdr, args->bitmask, &hdr); encode_nops(&hdr); - return 0; } /* * FSINFO request */ -static int nfs4_xdr_enc_fsinfo(struct rpc_rqst *req, __be32 *p, struct nfs4_fsinfo_arg *args) +static void nfs4_xdr_enc_fsinfo(struct rpc_rqst *req, struct xdr_stream *xdr, + struct nfs4_fsinfo_arg *args) { - struct xdr_stream xdr; struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_compound_hdr(&xdr, req, &hdr); - encode_sequence(&xdr, &args->seq_args, &hdr); - encode_putfh(&xdr, args->fh, &hdr); - encode_fsinfo(&xdr, args->bitmask, &hdr); + encode_compound_hdr(xdr, req, &hdr); + encode_sequence(xdr, &args->seq_args, &hdr); + encode_putfh(xdr, args->fh, &hdr); + encode_fsinfo(xdr, args->bitmask, &hdr); encode_nops(&hdr); - return 0; } /* * a PATHCONF request */ -static int nfs4_xdr_enc_pathconf(struct rpc_rqst *req, __be32 *p, const struct nfs4_pathconf_arg *args) +static void nfs4_xdr_enc_pathconf(struct rpc_rqst *req, struct xdr_stream *xdr, + const struct nfs4_pathconf_arg *args) { - struct xdr_stream xdr; struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_compound_hdr(&xdr, req, &hdr); - encode_sequence(&xdr, &args->seq_args, &hdr); - encode_putfh(&xdr, args->fh, &hdr); - encode_getattr_one(&xdr, args->bitmask[0] & nfs4_pathconf_bitmap[0], + encode_compound_hdr(xdr, req, &hdr); + encode_sequence(xdr, &args->seq_args, &hdr); + encode_putfh(xdr, args->fh, &hdr); + encode_getattr_one(xdr, args->bitmask[0] & nfs4_pathconf_bitmap[0], &hdr); encode_nops(&hdr); - return 0; } /* * a STATFS request */ -static int nfs4_xdr_enc_statfs(struct rpc_rqst *req, __be32 *p, const struct nfs4_statfs_arg *args) +static void nfs4_xdr_enc_statfs(struct rpc_rqst *req, struct xdr_stream *xdr, + const struct nfs4_statfs_arg *args) { - struct xdr_stream xdr; struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_compound_hdr(&xdr, req, &hdr); - encode_sequence(&xdr, &args->seq_args, &hdr); - encode_putfh(&xdr, args->fh, &hdr); - encode_getattr_two(&xdr, args->bitmask[0] & nfs4_statfs_bitmap[0], + encode_compound_hdr(xdr, req, &hdr); + encode_sequence(xdr, &args->seq_args, &hdr); + encode_putfh(xdr, args->fh, &hdr); + encode_getattr_two(xdr, args->bitmask[0] & nfs4_statfs_bitmap[0], args->bitmask[1] & nfs4_statfs_bitmap[1], &hdr); encode_nops(&hdr); - return 0; } /* * GETATTR_BITMAP request */ -static int nfs4_xdr_enc_server_caps(struct rpc_rqst *req, __be32 *p, - struct nfs4_server_caps_arg *args) +static void nfs4_xdr_enc_server_caps(struct rpc_rqst *req, + struct xdr_stream *xdr, + struct nfs4_server_caps_arg *args) { - struct xdr_stream xdr; struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_compound_hdr(&xdr, req, &hdr); - encode_sequence(&xdr, &args->seq_args, &hdr); - encode_putfh(&xdr, args->fhandle, &hdr); - encode_getattr_one(&xdr, FATTR4_WORD0_SUPPORTED_ATTRS| + encode_compound_hdr(xdr, req, &hdr); + encode_sequence(xdr, &args->seq_args, &hdr); + encode_putfh(xdr, args->fhandle, &hdr); + encode_getattr_one(xdr, FATTR4_WORD0_SUPPORTED_ATTRS| FATTR4_WORD0_LINK_SUPPORT| FATTR4_WORD0_SYMLINK_SUPPORT| FATTR4_WORD0_ACLSUPPORT, &hdr); encode_nops(&hdr); - return 0; } /* * a RENEW request */ -static int nfs4_xdr_enc_renew(struct rpc_rqst *req, __be32 *p, struct nfs_client *clp) +static void nfs4_xdr_enc_renew(struct rpc_rqst *req, struct xdr_stream *xdr, + struct nfs_client *clp) { - struct xdr_stream xdr; struct compound_hdr hdr = { .nops = 0, }; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_compound_hdr(&xdr, req, &hdr); - encode_renew(&xdr, clp, &hdr); + encode_compound_hdr(xdr, req, &hdr); + encode_renew(xdr, clp, &hdr); encode_nops(&hdr); - return 0; } /* * a SETCLIENTID request */ -static int nfs4_xdr_enc_setclientid(struct rpc_rqst *req, __be32 *p, struct nfs4_setclientid *sc) +static void nfs4_xdr_enc_setclientid(struct rpc_rqst *req, + struct xdr_stream *xdr, + struct nfs4_setclientid *sc) { - struct xdr_stream xdr; struct compound_hdr hdr = { .nops = 0, }; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_compound_hdr(&xdr, req, &hdr); - encode_setclientid(&xdr, sc, &hdr); + encode_compound_hdr(xdr, req, &hdr); + encode_setclientid(xdr, sc, &hdr); encode_nops(&hdr); - return 0; } /* * a SETCLIENTID_CONFIRM request */ -static int nfs4_xdr_enc_setclientid_confirm(struct rpc_rqst *req, __be32 *p, struct nfs4_setclientid_res *arg) +static void nfs4_xdr_enc_setclientid_confirm(struct rpc_rqst *req, + struct xdr_stream *xdr, + struct nfs4_setclientid_res *arg) { - struct xdr_stream xdr; struct compound_hdr hdr = { .nops = 0, }; const u32 lease_bitmap[2] = { FATTR4_WORD0_LEASE_TIME, 0 }; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_compound_hdr(&xdr, req, &hdr); - encode_setclientid_confirm(&xdr, arg, &hdr); - encode_putrootfh(&xdr, &hdr); - encode_fsinfo(&xdr, lease_bitmap, &hdr); + encode_compound_hdr(xdr, req, &hdr); + encode_setclientid_confirm(xdr, arg, &hdr); + encode_putrootfh(xdr, &hdr); + encode_fsinfo(xdr, lease_bitmap, &hdr); encode_nops(&hdr); - return 0; } /* * DELEGRETURN request */ -static int nfs4_xdr_enc_delegreturn(struct rpc_rqst *req, __be32 *p, const struct nfs4_delegreturnargs *args) +static void nfs4_xdr_enc_delegreturn(struct rpc_rqst *req, + struct xdr_stream *xdr, + const struct nfs4_delegreturnargs *args) { - struct xdr_stream xdr; struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_compound_hdr(&xdr, req, &hdr); - encode_sequence(&xdr, &args->seq_args, &hdr); - encode_putfh(&xdr, args->fhandle, &hdr); - encode_delegreturn(&xdr, args->stateid, &hdr); - encode_getfattr(&xdr, args->bitmask, &hdr); + encode_compound_hdr(xdr, req, &hdr); + encode_sequence(xdr, &args->seq_args, &hdr); + encode_putfh(xdr, args->fhandle, &hdr); + encode_delegreturn(xdr, args->stateid, &hdr); + encode_getfattr(xdr, args->bitmask, &hdr); encode_nops(&hdr); - return 0; } /* * Encode FS_LOCATIONS request */ -static int nfs4_xdr_enc_fs_locations(struct rpc_rqst *req, __be32 *p, struct nfs4_fs_locations_arg *args) +static void nfs4_xdr_enc_fs_locations(struct rpc_rqst *req, + struct xdr_stream *xdr, + struct nfs4_fs_locations_arg *args) { - struct xdr_stream xdr; struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; uint32_t replen; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_compound_hdr(&xdr, req, &hdr); - encode_sequence(&xdr, &args->seq_args, &hdr); - encode_putfh(&xdr, args->dir_fh, &hdr); - encode_lookup(&xdr, args->name, &hdr); + encode_compound_hdr(xdr, req, &hdr); + encode_sequence(xdr, &args->seq_args, &hdr); + encode_putfh(xdr, args->dir_fh, &hdr); + encode_lookup(xdr, args->name, &hdr); replen = hdr.replen; /* get the attribute into args->page */ - encode_fs_locations(&xdr, args->bitmask, &hdr); + encode_fs_locations(xdr, args->bitmask, &hdr); xdr_inline_pages(&req->rq_rcv_buf, replen << 2, &args->page, 0, PAGE_SIZE); encode_nops(&hdr); - return 0; } #if defined(CONFIG_NFS_V4_1) /* * EXCHANGE_ID request */ -static int nfs4_xdr_enc_exchange_id(struct rpc_rqst *req, uint32_t *p, - struct nfs41_exchange_id_args *args) +static void nfs4_xdr_enc_exchange_id(struct rpc_rqst *req, + struct xdr_stream *xdr, + struct nfs41_exchange_id_args *args) { - struct xdr_stream xdr; struct compound_hdr hdr = { .minorversion = args->client->cl_mvops->minor_version, }; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_compound_hdr(&xdr, req, &hdr); - encode_exchange_id(&xdr, args, &hdr); + encode_compound_hdr(xdr, req, &hdr); + encode_exchange_id(xdr, args, &hdr); encode_nops(&hdr); - return 0; } /* * a CREATE_SESSION request */ -static int nfs4_xdr_enc_create_session(struct rpc_rqst *req, uint32_t *p, - struct nfs41_create_session_args *args) +static void nfs4_xdr_enc_create_session(struct rpc_rqst *req, + struct xdr_stream *xdr, + struct nfs41_create_session_args *args) { - struct xdr_stream xdr; struct compound_hdr hdr = { .minorversion = args->client->cl_mvops->minor_version, }; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_compound_hdr(&xdr, req, &hdr); - encode_create_session(&xdr, args, &hdr); + encode_compound_hdr(xdr, req, &hdr); + encode_create_session(xdr, args, &hdr); encode_nops(&hdr); - return 0; } /* * a DESTROY_SESSION request */ -static int nfs4_xdr_enc_destroy_session(struct rpc_rqst *req, uint32_t *p, - struct nfs4_session *session) +static void nfs4_xdr_enc_destroy_session(struct rpc_rqst *req, + struct xdr_stream *xdr, + struct nfs4_session *session) { - struct xdr_stream xdr; struct compound_hdr hdr = { .minorversion = session->clp->cl_mvops->minor_version, }; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_compound_hdr(&xdr, req, &hdr); - encode_destroy_session(&xdr, session, &hdr); + encode_compound_hdr(xdr, req, &hdr); + encode_destroy_session(xdr, session, &hdr); encode_nops(&hdr); - return 0; } /* * a SEQUENCE request */ -static int nfs4_xdr_enc_sequence(struct rpc_rqst *req, uint32_t *p, - struct nfs4_sequence_args *args) +static void nfs4_xdr_enc_sequence(struct rpc_rqst *req, struct xdr_stream *xdr, + struct nfs4_sequence_args *args) { - struct xdr_stream xdr; struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(args), }; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_compound_hdr(&xdr, req, &hdr); - encode_sequence(&xdr, args, &hdr); + encode_compound_hdr(xdr, req, &hdr); + encode_sequence(xdr, args, &hdr); encode_nops(&hdr); - return 0; } /* * a GET_LEASE_TIME request */ -static int nfs4_xdr_enc_get_lease_time(struct rpc_rqst *req, uint32_t *p, - struct nfs4_get_lease_time_args *args) +static void nfs4_xdr_enc_get_lease_time(struct rpc_rqst *req, + struct xdr_stream *xdr, + struct nfs4_get_lease_time_args *args) { - struct xdr_stream xdr; struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->la_seq_args), }; const u32 lease_bitmap[2] = { FATTR4_WORD0_LEASE_TIME, 0 }; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_compound_hdr(&xdr, req, &hdr); - encode_sequence(&xdr, &args->la_seq_args, &hdr); - encode_putrootfh(&xdr, &hdr); - encode_fsinfo(&xdr, lease_bitmap, &hdr); + encode_compound_hdr(xdr, req, &hdr); + encode_sequence(xdr, &args->la_seq_args, &hdr); + encode_putrootfh(xdr, &hdr); + encode_fsinfo(xdr, lease_bitmap, &hdr); encode_nops(&hdr); - return 0; } /* * a RECLAIM_COMPLETE request */ -static int nfs4_xdr_enc_reclaim_complete(struct rpc_rqst *req, uint32_t *p, - struct nfs41_reclaim_complete_args *args) +static void nfs4_xdr_enc_reclaim_complete(struct rpc_rqst *req, + struct xdr_stream *xdr, + struct nfs41_reclaim_complete_args *args) { - struct xdr_stream xdr; struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args) }; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_compound_hdr(&xdr, req, &hdr); - encode_sequence(&xdr, &args->seq_args, &hdr); - encode_reclaim_complete(&xdr, args, &hdr); + encode_compound_hdr(xdr, req, &hdr); + encode_sequence(xdr, &args->seq_args, &hdr); + encode_reclaim_complete(xdr, args, &hdr); encode_nops(&hdr); - return 0; } /* * Encode GETDEVICEINFO request */ -static int nfs4_xdr_enc_getdeviceinfo(struct rpc_rqst *req, uint32_t *p, - struct nfs4_getdeviceinfo_args *args) +static void nfs4_xdr_enc_getdeviceinfo(struct rpc_rqst *req, + struct xdr_stream *xdr, + struct nfs4_getdeviceinfo_args *args) { - struct xdr_stream xdr; struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_compound_hdr(&xdr, req, &hdr); - encode_sequence(&xdr, &args->seq_args, &hdr); - encode_getdeviceinfo(&xdr, args, &hdr); + encode_compound_hdr(xdr, req, &hdr); + encode_sequence(xdr, &args->seq_args, &hdr); + encode_getdeviceinfo(xdr, args, &hdr); /* set up reply kvec. Subtract notification bitmap max size (2) * so that notification bitmap is put in xdr_buf tail */ @@ -2657,27 +2583,24 @@ static int nfs4_xdr_enc_getdeviceinfo(struct rpc_rqst *req, uint32_t *p, args->pdev->pglen); encode_nops(&hdr); - return 0; } /* * Encode LAYOUTGET request */ -static int nfs4_xdr_enc_layoutget(struct rpc_rqst *req, uint32_t *p, - struct nfs4_layoutget_args *args) +static void nfs4_xdr_enc_layoutget(struct rpc_rqst *req, + struct xdr_stream *xdr, + struct nfs4_layoutget_args *args) { - struct xdr_stream xdr; struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_compound_hdr(&xdr, req, &hdr); - encode_sequence(&xdr, &args->seq_args, &hdr); - encode_putfh(&xdr, NFS_FH(args->inode), &hdr); - encode_layoutget(&xdr, args, &hdr); + encode_compound_hdr(xdr, req, &hdr); + encode_sequence(xdr, &args->seq_args, &hdr); + encode_putfh(xdr, NFS_FH(args->inode), &hdr); + encode_layoutget(xdr, args, &hdr); encode_nops(&hdr); - return 0; } #endif /* CONFIG_NFS_V4_1 */ @@ -5368,22 +5291,18 @@ out: /* * Encode an SETACL request */ -static int -nfs4_xdr_enc_setacl(struct rpc_rqst *req, __be32 *p, struct nfs_setaclargs *args) +static void nfs4_xdr_enc_setacl(struct rpc_rqst *req, struct xdr_stream *xdr, + struct nfs_setaclargs *args) { - struct xdr_stream xdr; struct compound_hdr hdr = { .minorversion = nfs4_xdr_minorversion(&args->seq_args), }; - int status; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_compound_hdr(&xdr, req, &hdr); - encode_sequence(&xdr, &args->seq_args, &hdr); - encode_putfh(&xdr, args->fh, &hdr); - status = encode_setacl(&xdr, args, &hdr); + encode_compound_hdr(xdr, req, &hdr); + encode_sequence(xdr, &args->seq_args, &hdr); + encode_putfh(xdr, args->fh, &hdr); + encode_setacl(xdr, args, &hdr); encode_nops(&hdr); - return status; } /* @@ -6316,7 +6235,7 @@ nfs4_stat_to_errno(int stat) #define PROC(proc, argtype, restype) \ [NFSPROC4_CLNT_##proc] = { \ .p_proc = NFSPROC4_COMPOUND, \ - .p_encode = (kxdrproc_t)nfs4_xdr_##argtype, \ + .p_encode = (kxdreproc_t)nfs4_xdr_##argtype, \ .p_decode = (kxdrproc_t)nfs4_xdr_##restype, \ .p_arglen = NFS4_##argtype##_sz, \ .p_replen = NFS4_##restype##_sz, \ diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index 6529534d7aae..c363efda8ecf 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -499,34 +499,28 @@ out_default: /* * NB: Without this zero space reservation, callbacks over krb5p fail */ -static int nfs4_xdr_enc_cb_null(struct rpc_rqst *req, __be32 *p, void *__unused) +static void nfs4_xdr_enc_cb_null(struct rpc_rqst *req, struct xdr_stream *xdr, + void *__unused) { - struct xdr_stream xdrs, *xdr = &xdrs; - - xdr_init_encode(&xdrs, &req->rq_snd_buf, p); xdr_reserve_space(xdr, 0); - return 0; } /* * 20.2. Operation 4: CB_RECALL - Recall a Delegation */ -static int nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, __be32 *p, - const struct nfsd4_callback *cb) +static void nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, struct xdr_stream *xdr, + const struct nfsd4_callback *cb) { - struct xdr_stream xdr; const struct nfs4_delegation *args = cb->cb_op; struct nfs4_cb_compound_hdr hdr = { .ident = cb->cb_clp->cl_cb_ident, .minorversion = cb->cb_minorversion, }; - xdr_init_encode(&xdr, &req->rq_snd_buf, p); - encode_cb_compound4args(&xdr, &hdr); - encode_cb_sequence4args(&xdr, cb, &hdr); - encode_cb_recall4args(&xdr, args, &hdr); + encode_cb_compound4args(xdr, &hdr); + encode_cb_sequence4args(xdr, cb, &hdr); + encode_cb_recall4args(xdr, args, &hdr); encode_cb_nops(&hdr); - return 0; } @@ -583,7 +577,7 @@ out_default: #define PROC(proc, call, argtype, restype) \ [NFSPROC4_CLNT_##proc] = { \ .p_proc = NFSPROC4_CB_##call, \ - .p_encode = (kxdrproc_t)nfs4_xdr_enc_##argtype, \ + .p_encode = (kxdreproc_t)nfs4_xdr_enc_##argtype, \ .p_decode = (kxdrproc_t)nfs4_xdr_dec_##restype, \ .p_arglen = NFS4_enc_##argtype##_sz, \ .p_replen = NFS4_dec_##restype##_sz, \ -- cgit v1.2.2 From bf2695516db982e90a22fc94f93491b481796bb1 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 14 Dec 2010 14:59:29 +0000 Subject: SUNRPC: New xdr_streams XDR decoder API Now that all client-side XDR decoder routines use xdr_streams, there should be no need to support the legacy calling sequence [rpc_rqst *, __be32 *, RPC res *] anywhere. We can construct an xdr_stream in the generic RPC code, instead of in each decoder function. This is a refactoring change. It should not cause different behavior. Signed-off-by: Chuck Lever Tested-by: J. Bruce Fields Signed-off-by: Trond Myklebust --- fs/lockd/clnt4xdr.c | 20 +- fs/lockd/clntxdr.c | 20 +- fs/lockd/mon.c | 30 +-- fs/nfs/mount_clnt.c | 30 ++- fs/nfs/nfs2xdr.c | 68 ++---- fs/nfs/nfs3xdr.c | 195 +++++++--------- fs/nfs/nfs4xdr.c | 619 ++++++++++++++++++++++++------------------------- fs/nfsd/nfs4callback.c | 16 +- 8 files changed, 468 insertions(+), 530 deletions(-) (limited to 'fs') diff --git a/fs/lockd/clnt4xdr.c b/fs/lockd/clnt4xdr.c index 974f1d9cd323..f848b52c67b1 100644 --- a/fs/lockd/clnt4xdr.c +++ b/fs/lockd/clnt4xdr.c @@ -529,17 +529,16 @@ out: return error; } -static int nlm4_xdr_dec_testres(struct rpc_rqst *req, __be32 *p, +static int nlm4_xdr_dec_testres(struct rpc_rqst *req, + struct xdr_stream *xdr, struct nlm_res *result) { - struct xdr_stream xdr; int error; - xdr_init_decode(&xdr, &req->rq_rcv_buf, p); - error = decode_cookie(&xdr, &result->cookie); + error = decode_cookie(xdr, &result->cookie); if (unlikely(error)) goto out; - error = decode_nlm4_testrply(&xdr, result); + error = decode_nlm4_testrply(xdr, result); out: return error; } @@ -550,17 +549,16 @@ out: * nlm4_stat stat; * }; */ -static int nlm4_xdr_dec_res(struct rpc_rqst *req, __be32 *p, +static int nlm4_xdr_dec_res(struct rpc_rqst *req, + struct xdr_stream *xdr, struct nlm_res *result) { - struct xdr_stream xdr; int error; - xdr_init_decode(&xdr, &req->rq_rcv_buf, p); - error = decode_cookie(&xdr, &result->cookie); + error = decode_cookie(xdr, &result->cookie); if (unlikely(error)) goto out; - error = decode_nlm4_stat(&xdr, &result->status); + error = decode_nlm4_stat(xdr, &result->status); out: return error; } @@ -575,7 +573,7 @@ out: [NLMPROC_##proc] = { \ .p_proc = NLMPROC_##proc, \ .p_encode = (kxdreproc_t)nlm4_xdr_enc_##argtype, \ - .p_decode = (kxdrproc_t)nlm4_xdr_dec_##restype, \ + .p_decode = (kxdrdproc_t)nlm4_xdr_dec_##restype, \ .p_arglen = NLM4_##argtype##_sz, \ .p_replen = NLM4_##restype##_sz, \ .p_statidx = NLMPROC_##proc, \ diff --git a/fs/lockd/clntxdr.c b/fs/lockd/clntxdr.c index c6fda8fb1c5b..180ac34feb9a 100644 --- a/fs/lockd/clntxdr.c +++ b/fs/lockd/clntxdr.c @@ -527,17 +527,16 @@ out: return error; } -static int nlm_xdr_dec_testres(struct rpc_rqst *req, __be32 *p, +static int nlm_xdr_dec_testres(struct rpc_rqst *req, + struct xdr_stream *xdr, struct nlm_res *result) { - struct xdr_stream xdr; int error; - xdr_init_decode(&xdr, &req->rq_rcv_buf, p); - error = decode_cookie(&xdr, &result->cookie); + error = decode_cookie(xdr, &result->cookie); if (unlikely(error)) goto out; - error = decode_nlm_testrply(&xdr, result); + error = decode_nlm_testrply(xdr, result); out: return error; } @@ -548,17 +547,16 @@ out: * nlm_stat stat; * }; */ -static int nlm_xdr_dec_res(struct rpc_rqst *req, __be32 *p, +static int nlm_xdr_dec_res(struct rpc_rqst *req, + struct xdr_stream *xdr, struct nlm_res *result) { - struct xdr_stream xdr; int error; - xdr_init_decode(&xdr, &req->rq_rcv_buf, p); - error = decode_cookie(&xdr, &result->cookie); + error = decode_cookie(xdr, &result->cookie); if (unlikely(error)) goto out; - error = decode_nlm_stat(&xdr, &result->status); + error = decode_nlm_stat(xdr, &result->status); out: return error; } @@ -573,7 +571,7 @@ out: [NLMPROC_##proc] = { \ .p_proc = NLMPROC_##proc, \ .p_encode = (kxdreproc_t)nlm_xdr_enc_##argtype, \ - .p_decode = (kxdrproc_t)nlm_xdr_dec_##restype, \ + .p_decode = (kxdrdproc_t)nlm_xdr_dec_##restype, \ .p_arglen = NLM_##argtype##_sz, \ .p_replen = NLM_##restype##_sz, \ .p_statidx = NLMPROC_##proc, \ diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c index baa77bc9d825..23d7451b2938 100644 --- a/fs/lockd/mon.c +++ b/fs/lockd/mon.c @@ -472,35 +472,35 @@ static void nsm_xdr_enc_unmon(struct rpc_rqst *req, struct xdr_stream *xdr, encode_mon_id(xdr, argp); } -static int xdr_dec_stat_res(struct rpc_rqst *rqstp, __be32 *p, - struct nsm_res *resp) +static int nsm_xdr_dec_stat_res(struct rpc_rqst *rqstp, + struct xdr_stream *xdr, + struct nsm_res *resp) { - struct xdr_stream xdr; + __be32 *p; - xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); - p = xdr_inline_decode(&xdr, 4 + 4); + p = xdr_inline_decode(xdr, 4 + 4); if (unlikely(p == NULL)) return -EIO; resp->status = be32_to_cpup(p++); resp->state = be32_to_cpup(p); - dprintk("lockd: xdr_dec_stat_res status %d state %d\n", - resp->status, resp->state); + dprintk("lockd: %s status %d state %d\n", + __func__, resp->status, resp->state); return 0; } -static int xdr_dec_stat(struct rpc_rqst *rqstp, __be32 *p, - struct nsm_res *resp) +static int nsm_xdr_dec_stat(struct rpc_rqst *rqstp, + struct xdr_stream *xdr, + struct nsm_res *resp) { - struct xdr_stream xdr; + __be32 *p; - xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); - p = xdr_inline_decode(&xdr, 4); + p = xdr_inline_decode(xdr, 4); if (unlikely(p == NULL)) return -EIO; resp->state = be32_to_cpup(p); - dprintk("lockd: xdr_dec_stat state %d\n", resp->state); + dprintk("lockd: %s state %d\n", __func__, resp->state); return 0; } @@ -517,7 +517,7 @@ static struct rpc_procinfo nsm_procedures[] = { [NSMPROC_MON] = { .p_proc = NSMPROC_MON, .p_encode = (kxdreproc_t)nsm_xdr_enc_mon, - .p_decode = (kxdrproc_t)xdr_dec_stat_res, + .p_decode = (kxdrdproc_t)nsm_xdr_dec_stat_res, .p_arglen = SM_mon_sz, .p_replen = SM_monres_sz, .p_statidx = NSMPROC_MON, @@ -526,7 +526,7 @@ static struct rpc_procinfo nsm_procedures[] = { [NSMPROC_UNMON] = { .p_proc = NSMPROC_UNMON, .p_encode = (kxdreproc_t)nsm_xdr_enc_unmon, - .p_decode = (kxdrproc_t)xdr_dec_stat, + .p_decode = (kxdrdproc_t)nsm_xdr_dec_stat, .p_arglen = SM_mon_id_sz, .p_replen = SM_unmonres_sz, .p_statidx = NSMPROC_UNMON, diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c index 979ebd7af3cb..697e07235f30 100644 --- a/fs/nfs/mount_clnt.c +++ b/fs/nfs/mount_clnt.c @@ -340,18 +340,16 @@ static int decode_fhandle(struct xdr_stream *xdr, struct mountres *res) return 0; } -static int mnt_dec_mountres(struct rpc_rqst *req, __be32 *p, - struct mountres *res) +static int mnt_xdr_dec_mountres(struct rpc_rqst *req, + struct xdr_stream *xdr, + struct mountres *res) { - struct xdr_stream xdr; int status; - xdr_init_decode(&xdr, &req->rq_rcv_buf, p); - - status = decode_status(&xdr, res); + status = decode_status(xdr, res); if (unlikely(status != 0 || res->errno != 0)) return status; - return decode_fhandle(&xdr, res); + return decode_fhandle(xdr, res); } static int decode_fhs_status(struct xdr_stream *xdr, struct mountres *res) @@ -434,30 +432,28 @@ static int decode_auth_flavors(struct xdr_stream *xdr, struct mountres *res) return 0; } -static int mnt_dec_mountres3(struct rpc_rqst *req, __be32 *p, - struct mountres *res) +static int mnt_xdr_dec_mountres3(struct rpc_rqst *req, + struct xdr_stream *xdr, + struct mountres *res) { - struct xdr_stream xdr; int status; - xdr_init_decode(&xdr, &req->rq_rcv_buf, p); - - status = decode_fhs_status(&xdr, res); + status = decode_fhs_status(xdr, res); if (unlikely(status != 0 || res->errno != 0)) return status; - status = decode_fhandle3(&xdr, res); + status = decode_fhandle3(xdr, res); if (unlikely(status != 0)) { res->errno = -EBADHANDLE; return 0; } - return decode_auth_flavors(&xdr, res); + return decode_auth_flavors(xdr, res); } static struct rpc_procinfo mnt_procedures[] = { [MOUNTPROC_MNT] = { .p_proc = MOUNTPROC_MNT, .p_encode = (kxdreproc_t)mnt_xdr_enc_dirpath, - .p_decode = (kxdrproc_t)mnt_dec_mountres, + .p_decode = (kxdrdproc_t)mnt_xdr_dec_mountres, .p_arglen = MNT_enc_dirpath_sz, .p_replen = MNT_dec_mountres_sz, .p_statidx = MOUNTPROC_MNT, @@ -476,7 +472,7 @@ static struct rpc_procinfo mnt3_procedures[] = { [MOUNTPROC3_MNT] = { .p_proc = MOUNTPROC3_MNT, .p_encode = (kxdreproc_t)mnt_xdr_enc_dirpath, - .p_decode = (kxdrproc_t)mnt_dec_mountres3, + .p_decode = (kxdrdproc_t)mnt_xdr_dec_mountres3, .p_arglen = MNT_enc_dirpath_sz, .p_replen = MNT_dec_mountres3_sz, .p_statidx = MOUNTPROC3_MNT, diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c index 8f3acbec761f..51f1cfa04d27 100644 --- a/fs/nfs/nfs2xdr.c +++ b/fs/nfs/nfs2xdr.c @@ -783,15 +783,13 @@ static void nfs2_xdr_enc_readdirargs(struct rpc_rqst *req, * "NFS: Network File System Protocol Specification". */ -static int nfs2_xdr_dec_stat(struct rpc_rqst *req, __be32 *p, +static int nfs2_xdr_dec_stat(struct rpc_rqst *req, struct xdr_stream *xdr, void *__unused) { - struct xdr_stream xdr; enum nfs_stat status; int error; - xdr_init_decode(&xdr, &req->rq_rcv_buf, p); - error = decode_stat(&xdr, &status); + error = decode_stat(xdr, &status); if (unlikely(error)) goto out; if (status != NFS_OK) @@ -802,22 +800,16 @@ out_default: return nfs_stat_to_errno(status); } -static int nfs2_xdr_dec_attrstat(struct rpc_rqst *req, __be32 *p, +static int nfs2_xdr_dec_attrstat(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_fattr *result) { - struct xdr_stream xdr; - - xdr_init_decode(&xdr, &req->rq_rcv_buf, p); - return decode_attrstat(&xdr, result); + return decode_attrstat(xdr, result); } -static int nfs2_xdr_dec_diropres(struct rpc_rqst *req, __be32 *p, +static int nfs2_xdr_dec_diropres(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_diropok *result) { - struct xdr_stream xdr; - - xdr_init_decode(&xdr, &req->rq_rcv_buf, p); - return decode_diropres(&xdr, result); + return decode_diropres(xdr, result); } /* @@ -830,20 +822,18 @@ static int nfs2_xdr_dec_diropres(struct rpc_rqst *req, __be32 *p, * void; * }; */ -static int nfs2_xdr_dec_readlinkres(struct rpc_rqst *req, __be32 *p, - void *__unused) +static int nfs2_xdr_dec_readlinkres(struct rpc_rqst *req, + struct xdr_stream *xdr, void *__unused) { - struct xdr_stream xdr; enum nfs_stat status; int error; - xdr_init_decode(&xdr, &req->rq_rcv_buf, p); - error = decode_stat(&xdr, &status); + error = decode_stat(xdr, &status); if (unlikely(error)) goto out; if (status != NFS_OK) goto out_default; - error = decode_path(&xdr); + error = decode_path(xdr); out: return error; out_default: @@ -861,39 +851,33 @@ out_default: * void; * }; */ -static int nfs2_xdr_dec_readres(struct rpc_rqst *req, __be32 *p, +static int nfs2_xdr_dec_readres(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_readres *result) { - struct xdr_stream xdr; enum nfs_stat status; int error; - xdr_init_decode(&xdr, &req->rq_rcv_buf, p); - error = decode_stat(&xdr, &status); + error = decode_stat(xdr, &status); if (unlikely(error)) goto out; if (status != NFS_OK) goto out_default; - error = decode_fattr(&xdr, result->fattr); + error = decode_fattr(xdr, result->fattr); if (unlikely(error)) goto out; - error = decode_nfsdata(&xdr, result); + error = decode_nfsdata(xdr, result); out: return error; out_default: return nfs_stat_to_errno(status); } -static int nfs2_xdr_dec_writeres(struct rpc_rqst *req, __be32 *p, +static int nfs2_xdr_dec_writeres(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_writeres *result) { - struct xdr_stream xdr; - /* All NFSv2 writes are "file sync" writes */ result->verf->committed = NFS_FILE_SYNC; - - xdr_init_decode(&xdr, &req->rq_rcv_buf, p); - return decode_attrstat(&xdr, result->fattr); + return decode_attrstat(xdr, result->fattr); } /** @@ -1008,20 +992,18 @@ out_cheating: goto out; } -static int nfs2_xdr_dec_readdirres(struct rpc_rqst *req, __be32 *p, - void *__unused) +static int nfs2_xdr_dec_readdirres(struct rpc_rqst *req, + struct xdr_stream *xdr, void *__unused) { - struct xdr_stream xdr; enum nfs_stat status; int error; - xdr_init_decode(&xdr, &req->rq_rcv_buf, p); - error = decode_stat(&xdr, &status); + error = decode_stat(xdr, &status); if (unlikely(error)) goto out; if (status != NFS_OK) goto out_default; - error = decode_readdirok(&xdr); + error = decode_readdirok(xdr); out: return error; out_default: @@ -1062,20 +1044,18 @@ out_overflow: return -EIO; } -static int nfs2_xdr_dec_statfsres(struct rpc_rqst *req, __be32 *p, +static int nfs2_xdr_dec_statfsres(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs2_fsstat *result) { - struct xdr_stream xdr; enum nfs_stat status; int error; - xdr_init_decode(&xdr, &req->rq_rcv_buf, p); - error = decode_stat(&xdr, &status); + error = decode_stat(xdr, &status); if (unlikely(error)) goto out; if (status != NFS_OK) goto out_default; - error = decode_info(&xdr, result); + error = decode_info(xdr, result); out: return error; out_default: @@ -1150,7 +1130,7 @@ int nfs_stat_to_errno(enum nfs_stat status) [NFSPROC_##proc] = { \ .p_proc = NFSPROC_##proc, \ .p_encode = (kxdreproc_t)nfs2_xdr_enc_##argtype, \ - .p_decode = (kxdrproc_t)nfs2_xdr_dec_##restype, \ + .p_decode = (kxdrdproc_t)nfs2_xdr_dec_##restype, \ .p_arglen = NFS_##argtype##_sz, \ .p_replen = NFS_##restype##_sz, \ .p_timer = timer, \ diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c index ae1b1a43f05e..df30a26cc4fa 100644 --- a/fs/nfs/nfs3xdr.c +++ b/fs/nfs/nfs3xdr.c @@ -1366,20 +1366,19 @@ static void nfs3_xdr_enc_setacl3args(struct rpc_rqst *req, * void; * }; */ -static int nfs3_xdr_dec_getattr3res(struct rpc_rqst *req, __be32 *p, +static int nfs3_xdr_dec_getattr3res(struct rpc_rqst *req, + struct xdr_stream *xdr, struct nfs_fattr *result) { - struct xdr_stream xdr; enum nfs_stat status; int error; - xdr_init_decode(&xdr, &req->rq_rcv_buf, p); - error = decode_nfsstat3(&xdr, &status); + error = decode_nfsstat3(xdr, &status); if (unlikely(error)) goto out; if (status != NFS3_OK) goto out_default; - error = decode_fattr3(&xdr, result); + error = decode_fattr3(xdr, result); out: return error; out_default: @@ -1404,18 +1403,17 @@ out_default: * SETATTR3resfail resfail; * }; */ -static int nfs3_xdr_dec_setattr3res(struct rpc_rqst *req, __be32 *p, +static int nfs3_xdr_dec_setattr3res(struct rpc_rqst *req, + struct xdr_stream *xdr, struct nfs_fattr *result) { - struct xdr_stream xdr; enum nfs_stat status; int error; - xdr_init_decode(&xdr, &req->rq_rcv_buf, p); - error = decode_nfsstat3(&xdr, &status); + error = decode_nfsstat3(xdr, &status); if (unlikely(error)) goto out; - error = decode_wcc_data(&xdr, result); + error = decode_wcc_data(xdr, result); if (unlikely(error)) goto out; if (status != NFS3_OK) @@ -1446,30 +1444,29 @@ out_status: * LOOKUP3resfail resfail; * }; */ -static int nfs3_xdr_dec_lookup3res(struct rpc_rqst *req, __be32 *p, +static int nfs3_xdr_dec_lookup3res(struct rpc_rqst *req, + struct xdr_stream *xdr, struct nfs3_diropres *result) { - struct xdr_stream xdr; enum nfs_stat status; int error; - xdr_init_decode(&xdr, &req->rq_rcv_buf, p); - error = decode_nfsstat3(&xdr, &status); + error = decode_nfsstat3(xdr, &status); if (unlikely(error)) goto out; if (status != NFS3_OK) goto out_default; - error = decode_nfs_fh3(&xdr, result->fh); + error = decode_nfs_fh3(xdr, result->fh); if (unlikely(error)) goto out; - error = decode_post_op_attr(&xdr, result->fattr); + error = decode_post_op_attr(xdr, result->fattr); if (unlikely(error)) goto out; - error = decode_post_op_attr(&xdr, result->dir_attr); + error = decode_post_op_attr(xdr, result->dir_attr); out: return error; out_default: - error = decode_post_op_attr(&xdr, result->dir_attr); + error = decode_post_op_attr(xdr, result->dir_attr); if (unlikely(error)) goto out; return nfs_stat_to_errno(status); @@ -1494,23 +1491,22 @@ out_default: * ACCESS3resfail resfail; * }; */ -static int nfs3_xdr_dec_access3res(struct rpc_rqst *req, __be32 *p, +static int nfs3_xdr_dec_access3res(struct rpc_rqst *req, + struct xdr_stream *xdr, struct nfs3_accessres *result) { - struct xdr_stream xdr; enum nfs_stat status; int error; - xdr_init_decode(&xdr, &req->rq_rcv_buf, p); - error = decode_nfsstat3(&xdr, &status); + error = decode_nfsstat3(xdr, &status); if (unlikely(error)) goto out; - error = decode_post_op_attr(&xdr, result->fattr); + error = decode_post_op_attr(xdr, result->fattr); if (unlikely(error)) goto out; if (status != NFS3_OK) goto out_default; - error = decode_uint32(&xdr, &result->access); + error = decode_uint32(xdr, &result->access); out: return error; out_default: @@ -1536,23 +1532,22 @@ out_default: * READLINK3resfail resfail; * }; */ -static int nfs3_xdr_dec_readlink3res(struct rpc_rqst *req, __be32 *p, +static int nfs3_xdr_dec_readlink3res(struct rpc_rqst *req, + struct xdr_stream *xdr, struct nfs_fattr *result) { - struct xdr_stream xdr; enum nfs_stat status; int error; - xdr_init_decode(&xdr, &req->rq_rcv_buf, p); - error = decode_nfsstat3(&xdr, &status); + error = decode_nfsstat3(xdr, &status); if (unlikely(error)) goto out; - error = decode_post_op_attr(&xdr, result); + error = decode_post_op_attr(xdr, result); if (unlikely(error)) goto out; if (status != NFS3_OK) goto out_default; - error = decode_nfspath3(&xdr); + error = decode_nfspath3(xdr); out: return error; out_default: @@ -1620,23 +1615,21 @@ out_overflow: return -EIO; } -static int nfs3_xdr_dec_read3res(struct rpc_rqst *req, __be32 *p, +static int nfs3_xdr_dec_read3res(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_readres *result) { - struct xdr_stream xdr; enum nfs_stat status; int error; - xdr_init_decode(&xdr, &req->rq_rcv_buf, p); - error = decode_nfsstat3(&xdr, &status); + error = decode_nfsstat3(xdr, &status); if (unlikely(error)) goto out; - error = decode_post_op_attr(&xdr, result->fattr); + error = decode_post_op_attr(xdr, result->fattr); if (unlikely(error)) goto out; if (status != NFS3_OK) goto out_status; - error = decode_read3resok(&xdr, result); + error = decode_read3resok(xdr, result); out: return error; out_status: @@ -1692,23 +1685,21 @@ out_overflow: return -EIO; } -static int nfs3_xdr_dec_write3res(struct rpc_rqst *req, __be32 *p, +static int nfs3_xdr_dec_write3res(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs_writeres *result) { - struct xdr_stream xdr; enum nfs_stat status; int error; - xdr_init_decode(&xdr, &req->rq_rcv_buf, p); - error = decode_nfsstat3(&xdr, &status); + error = decode_nfsstat3(xdr, &status); if (unlikely(error)) goto out; - error = decode_wcc_data(&xdr, result->fattr); + error = decode_wcc_data(xdr, result->fattr); if (unlikely(error)) goto out; if (status != NFS3_OK) goto out_status; - error = decode_write3resok(&xdr, result); + error = decode_write3resok(xdr, result); out: return error; out_status: @@ -1757,24 +1748,23 @@ out: return error; } -static int nfs3_xdr_dec_create3res(struct rpc_rqst *req, __be32 *p, +static int nfs3_xdr_dec_create3res(struct rpc_rqst *req, + struct xdr_stream *xdr, struct nfs3_diropres *result) { - struct xdr_stream xdr; enum nfs_stat status; int error; - xdr_init_decode(&xdr, &req->rq_rcv_buf, p); - error = decode_nfsstat3(&xdr, &status); + error = decode_nfsstat3(xdr, &status); if (unlikely(error)) goto out; if (status != NFS3_OK) goto out_default; - error = decode_create3resok(&xdr, result); + error = decode_create3resok(xdr, result); out: return error; out_default: - error = decode_wcc_data(&xdr, result->dir_attr); + error = decode_wcc_data(xdr, result->dir_attr); if (unlikely(error)) goto out; return nfs_stat_to_errno(status); @@ -1798,18 +1788,17 @@ out_default: * REMOVE3resfail resfail; * }; */ -static int nfs3_xdr_dec_remove3res(struct rpc_rqst *req, __be32 *p, +static int nfs3_xdr_dec_remove3res(struct rpc_rqst *req, + struct xdr_stream *xdr, struct nfs_removeres *result) { - struct xdr_stream xdr; enum nfs_stat status; int error; - xdr_init_decode(&xdr, &req->rq_rcv_buf, p); - error = decode_nfsstat3(&xdr, &status); + error = decode_nfsstat3(xdr, &status); if (unlikely(error)) goto out; - error = decode_wcc_data(&xdr, result->dir_attr); + error = decode_wcc_data(xdr, result->dir_attr); if (unlikely(error)) goto out; if (status != NFS3_OK) @@ -1840,21 +1829,20 @@ out_status: * RENAME3resfail resfail; * }; */ -static int nfs3_xdr_dec_rename3res(struct rpc_rqst *req, __be32 *p, +static int nfs3_xdr_dec_rename3res(struct rpc_rqst *req, + struct xdr_stream *xdr, struct nfs_renameres *result) { - struct xdr_stream xdr; enum nfs_stat status; int error; - xdr_init_decode(&xdr, &req->rq_rcv_buf, p); - error = decode_nfsstat3(&xdr, &status); + error = decode_nfsstat3(xdr, &status); if (unlikely(error)) goto out; - error = decode_wcc_data(&xdr, result->old_fattr); + error = decode_wcc_data(xdr, result->old_fattr); if (unlikely(error)) goto out; - error = decode_wcc_data(&xdr, result->new_fattr); + error = decode_wcc_data(xdr, result->new_fattr); if (unlikely(error)) goto out; if (status != NFS3_OK) @@ -1885,21 +1873,19 @@ out_status: * LINK3resfail resfail; * }; */ -static int nfs3_xdr_dec_link3res(struct rpc_rqst *req, __be32 *p, +static int nfs3_xdr_dec_link3res(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs3_linkres *result) { - struct xdr_stream xdr; enum nfs_stat status; int error; - xdr_init_decode(&xdr, &req->rq_rcv_buf, p); - error = decode_nfsstat3(&xdr, &status); + error = decode_nfsstat3(xdr, &status); if (unlikely(error)) goto out; - error = decode_post_op_attr(&xdr, result->fattr); + error = decode_post_op_attr(xdr, result->fattr); if (unlikely(error)) goto out; - error = decode_wcc_data(&xdr, result->dir_attr); + error = decode_wcc_data(xdr, result->dir_attr); if (unlikely(error)) goto out; if (status != NFS3_OK) @@ -2085,24 +2071,23 @@ out: return error; } -static int nfs3_xdr_dec_readdir3res(struct rpc_rqst *req, __be32 *p, +static int nfs3_xdr_dec_readdir3res(struct rpc_rqst *req, + struct xdr_stream *xdr, struct nfs3_readdirres *result) { - struct xdr_stream xdr; enum nfs_stat status; int error; - xdr_init_decode(&xdr, &req->rq_rcv_buf, p); - error = decode_nfsstat3(&xdr, &status); + error = decode_nfsstat3(xdr, &status); if (unlikely(error)) goto out; if (status != NFS3_OK) goto out_default; - error = decode_readdir3resok(&xdr, result); + error = decode_readdir3resok(xdr, result); out: return error; out_default: - error = decode_post_op_attr(&xdr, result->dir_attr); + error = decode_post_op_attr(xdr, result->dir_attr); if (unlikely(error)) goto out; return nfs_stat_to_errno(status); @@ -2154,23 +2139,22 @@ out_overflow: return -EIO; } -static int nfs3_xdr_dec_fsstat3res(struct rpc_rqst *req, __be32 *p, +static int nfs3_xdr_dec_fsstat3res(struct rpc_rqst *req, + struct xdr_stream *xdr, struct nfs_fsstat *result) { - struct xdr_stream xdr; enum nfs_stat status; int error; - xdr_init_decode(&xdr, &req->rq_rcv_buf, p); - error = decode_nfsstat3(&xdr, &status); + error = decode_nfsstat3(xdr, &status); if (unlikely(error)) goto out; - error = decode_post_op_attr(&xdr, result->fattr); + error = decode_post_op_attr(xdr, result->fattr); if (unlikely(error)) goto out; if (status != NFS3_OK) goto out_status; - error = decode_fsstat3resok(&xdr, result); + error = decode_fsstat3resok(xdr, result); out: return error; out_status: @@ -2231,23 +2215,22 @@ out_overflow: return -EIO; } -static int nfs3_xdr_dec_fsinfo3res(struct rpc_rqst *req, __be32 *p, +static int nfs3_xdr_dec_fsinfo3res(struct rpc_rqst *req, + struct xdr_stream *xdr, struct nfs_fsinfo *result) { - struct xdr_stream xdr; enum nfs_stat status; int error; - xdr_init_decode(&xdr, &req->rq_rcv_buf, p); - error = decode_nfsstat3(&xdr, &status); + error = decode_nfsstat3(xdr, &status); if (unlikely(error)) goto out; - error = decode_post_op_attr(&xdr, result->fattr); + error = decode_post_op_attr(xdr, result->fattr); if (unlikely(error)) goto out; if (status != NFS3_OK) goto out_status; - error = decode_fsinfo3resok(&xdr, result); + error = decode_fsinfo3resok(xdr, result); out: return error; out_status: @@ -2295,23 +2278,22 @@ out_overflow: return -EIO; } -static int nfs3_xdr_dec_pathconf3res(struct rpc_rqst *req, __be32 *p, +static int nfs3_xdr_dec_pathconf3res(struct rpc_rqst *req, + struct xdr_stream *xdr, struct nfs_pathconf *result) { - struct xdr_stream xdr; enum nfs_stat status; int error; - xdr_init_decode(&xdr, &req->rq_rcv_buf, p); - error = decode_nfsstat3(&xdr, &status); + error = decode_nfsstat3(xdr, &status); if (unlikely(error)) goto out; - error = decode_post_op_attr(&xdr, result->fattr); + error = decode_post_op_attr(xdr, result->fattr); if (unlikely(error)) goto out; if (status != NFS3_OK) goto out_status; - error = decode_pathconf3resok(&xdr, result); + error = decode_pathconf3resok(xdr, result); out: return error; out_status: @@ -2337,23 +2319,22 @@ out_status: * COMMIT3resfail resfail; * }; */ -static int nfs3_xdr_dec_commit3res(struct rpc_rqst *req, __be32 *p, +static int nfs3_xdr_dec_commit3res(struct rpc_rqst *req, + struct xdr_stream *xdr, struct nfs_writeres *result) { - struct xdr_stream xdr; enum nfs_stat status; int error; - xdr_init_decode(&xdr, &req->rq_rcv_buf, p); - error = decode_nfsstat3(&xdr, &status); + error = decode_nfsstat3(xdr, &status); if (unlikely(error)) goto out; - error = decode_wcc_data(&xdr, result->fattr); + error = decode_wcc_data(xdr, result->fattr); if (unlikely(error)) goto out; if (status != NFS3_OK) goto out_status; - error = decode_writeverf3(&xdr, result->verf->verifier); + error = decode_writeverf3(xdr, result->verf->verifier); out: return error; out_status: @@ -2406,40 +2387,38 @@ out: return error; } -static int nfs3_xdr_dec_getacl3res(struct rpc_rqst *req, __be32 *p, +static int nfs3_xdr_dec_getacl3res(struct rpc_rqst *req, + struct xdr_stream *xdr, struct nfs3_getaclres *result) { - struct xdr_stream xdr; enum nfs_stat status; int error; - xdr_init_decode(&xdr, &req->rq_rcv_buf, p); - error = decode_nfsstat3(&xdr, &status); + error = decode_nfsstat3(xdr, &status); if (unlikely(error)) goto out; if (status != NFS3_OK) goto out_default; - error = decode_getacl3resok(&xdr, result); + error = decode_getacl3resok(xdr, result); out: return error; out_default: return nfs_stat_to_errno(status); } -static int nfs3_xdr_dec_setacl3res(struct rpc_rqst *req, __be32 *p, +static int nfs3_xdr_dec_setacl3res(struct rpc_rqst *req, + struct xdr_stream *xdr, struct nfs_fattr *result) { - struct xdr_stream xdr; enum nfs_stat status; int error; - xdr_init_decode(&xdr, &req->rq_rcv_buf, p); - error = decode_nfsstat3(&xdr, &status); + error = decode_nfsstat3(xdr, &status); if (unlikely(error)) goto out; if (status != NFS3_OK) goto out_default; - error = decode_post_op_attr(&xdr, result); + error = decode_post_op_attr(xdr, result); out: return error; out_default: @@ -2452,7 +2431,7 @@ out_default: [NFS3PROC_##proc] = { \ .p_proc = NFS3PROC_##proc, \ .p_encode = (kxdreproc_t)nfs3_xdr_enc_##argtype##3args, \ - .p_decode = (kxdrproc_t)nfs3_xdr_dec_##restype##3res, \ + .p_decode = (kxdrdproc_t)nfs3_xdr_dec_##restype##3res, \ .p_arglen = NFS3_##argtype##args_sz, \ .p_replen = NFS3_##restype##res_sz, \ .p_timer = timer, \ @@ -2495,7 +2474,7 @@ static struct rpc_procinfo nfs3_acl_procedures[] = { [ACLPROC3_GETACL] = { .p_proc = ACLPROC3_GETACL, .p_encode = (kxdreproc_t)nfs3_xdr_enc_getacl3args, - .p_decode = (kxdrproc_t)nfs3_xdr_dec_getacl3res, + .p_decode = (kxdrdproc_t)nfs3_xdr_dec_getacl3res, .p_arglen = ACL3_getaclargs_sz, .p_replen = ACL3_getaclres_sz, .p_timer = 1, @@ -2504,7 +2483,7 @@ static struct rpc_procinfo nfs3_acl_procedures[] = { [ACLPROC3_SETACL] = { .p_proc = ACLPROC3_SETACL, .p_encode = (kxdreproc_t)nfs3_xdr_enc_setacl3args, - .p_decode = (kxdrproc_t)nfs3_xdr_dec_setacl3res, + .p_decode = (kxdrdproc_t)nfs3_xdr_dec_setacl3res, .p_arglen = ACL3_setaclargs_sz, .p_replen = ACL3_setaclres_sz, .p_timer = 0, diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 6ec38b3e4a3d..f3f99156bfcb 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -5013,26 +5013,26 @@ out_overflow: /* * Decode OPEN_DOWNGRADE response */ -static int nfs4_xdr_dec_open_downgrade(struct rpc_rqst *rqstp, __be32 *p, struct nfs_closeres *res) +static int nfs4_xdr_dec_open_downgrade(struct rpc_rqst *rqstp, + struct xdr_stream *xdr, + struct nfs_closeres *res) { - struct xdr_stream xdr; struct compound_hdr hdr; int status; - xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); - status = decode_compound_hdr(&xdr, &hdr); + status = decode_compound_hdr(xdr, &hdr); if (status) goto out; - status = decode_sequence(&xdr, &res->seq_res, rqstp); + status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; - status = decode_putfh(&xdr); + status = decode_putfh(xdr); if (status) goto out; - status = decode_open_downgrade(&xdr, res); + status = decode_open_downgrade(xdr, res); if (status != 0) goto out; - decode_getfattr(&xdr, res->fattr, res->server, + decode_getfattr(xdr, res->fattr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)); out: return status; @@ -5041,26 +5041,25 @@ out: /* * Decode ACCESS response */ -static int nfs4_xdr_dec_access(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_accessres *res) +static int nfs4_xdr_dec_access(struct rpc_rqst *rqstp, struct xdr_stream *xdr, + struct nfs4_accessres *res) { - struct xdr_stream xdr; struct compound_hdr hdr; int status; - xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); - status = decode_compound_hdr(&xdr, &hdr); + status = decode_compound_hdr(xdr, &hdr); if (status) goto out; - status = decode_sequence(&xdr, &res->seq_res, rqstp); + status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; - status = decode_putfh(&xdr); + status = decode_putfh(xdr); if (status != 0) goto out; - status = decode_access(&xdr, res); + status = decode_access(xdr, res); if (status != 0) goto out; - decode_getfattr(&xdr, res->fattr, res->server, + decode_getfattr(xdr, res->fattr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)); out: return status; @@ -5069,26 +5068,28 @@ out: /* * Decode LOOKUP response */ -static int nfs4_xdr_dec_lookup(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_lookup_res *res) +static int nfs4_xdr_dec_lookup(struct rpc_rqst *rqstp, struct xdr_stream *xdr, + struct nfs4_lookup_res *res) { - struct xdr_stream xdr; struct compound_hdr hdr; int status; - xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); - status = decode_compound_hdr(&xdr, &hdr); + status = decode_compound_hdr(xdr, &hdr); if (status) goto out; - status = decode_sequence(&xdr, &res->seq_res, rqstp); + status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; - if ((status = decode_putfh(&xdr)) != 0) + status = decode_putfh(xdr); + if (status) goto out; - if ((status = decode_lookup(&xdr)) != 0) + status = decode_lookup(xdr); + if (status) goto out; - if ((status = decode_getfh(&xdr, res->fh)) != 0) + status = decode_getfh(xdr, res->fh); + if (status) goto out; - status = decode_getfattr(&xdr, res->fattr, res->server + status = decode_getfattr(xdr, res->fattr, res->server ,!RPC_IS_ASYNC(rqstp->rq_task)); out: return status; @@ -5097,23 +5098,25 @@ out: /* * Decode LOOKUP_ROOT response */ -static int nfs4_xdr_dec_lookup_root(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_lookup_res *res) +static int nfs4_xdr_dec_lookup_root(struct rpc_rqst *rqstp, + struct xdr_stream *xdr, + struct nfs4_lookup_res *res) { - struct xdr_stream xdr; struct compound_hdr hdr; int status; - xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); - status = decode_compound_hdr(&xdr, &hdr); + status = decode_compound_hdr(xdr, &hdr); if (status) goto out; - status = decode_sequence(&xdr, &res->seq_res, rqstp); + status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; - if ((status = decode_putrootfh(&xdr)) != 0) + status = decode_putrootfh(xdr); + if (status) goto out; - if ((status = decode_getfh(&xdr, res->fh)) == 0) - status = decode_getfattr(&xdr, res->fattr, res->server, + status = decode_getfh(xdr, res->fh); + if (status == 0) + status = decode_getfattr(xdr, res->fattr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)); out: return status; @@ -5122,24 +5125,25 @@ out: /* * Decode REMOVE response */ -static int nfs4_xdr_dec_remove(struct rpc_rqst *rqstp, __be32 *p, struct nfs_removeres *res) +static int nfs4_xdr_dec_remove(struct rpc_rqst *rqstp, struct xdr_stream *xdr, + struct nfs_removeres *res) { - struct xdr_stream xdr; struct compound_hdr hdr; int status; - xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); - status = decode_compound_hdr(&xdr, &hdr); + status = decode_compound_hdr(xdr, &hdr); if (status) goto out; - status = decode_sequence(&xdr, &res->seq_res, rqstp); + status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; - if ((status = decode_putfh(&xdr)) != 0) + status = decode_putfh(xdr); + if (status) goto out; - if ((status = decode_remove(&xdr, &res->cinfo)) != 0) + status = decode_remove(xdr, &res->cinfo); + if (status) goto out; - decode_getfattr(&xdr, res->dir_attr, res->server, + decode_getfattr(xdr, res->dir_attr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)); out: return status; @@ -5148,34 +5152,38 @@ out: /* * Decode RENAME response */ -static int nfs4_xdr_dec_rename(struct rpc_rqst *rqstp, __be32 *p, struct nfs_renameres *res) +static int nfs4_xdr_dec_rename(struct rpc_rqst *rqstp, struct xdr_stream *xdr, + struct nfs_renameres *res) { - struct xdr_stream xdr; struct compound_hdr hdr; int status; - xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); - status = decode_compound_hdr(&xdr, &hdr); + status = decode_compound_hdr(xdr, &hdr); if (status) goto out; - status = decode_sequence(&xdr, &res->seq_res, rqstp); + status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; - if ((status = decode_putfh(&xdr)) != 0) + status = decode_putfh(xdr); + if (status) goto out; - if ((status = decode_savefh(&xdr)) != 0) + status = decode_savefh(xdr); + if (status) goto out; - if ((status = decode_putfh(&xdr)) != 0) + status = decode_putfh(xdr); + if (status) goto out; - if ((status = decode_rename(&xdr, &res->old_cinfo, &res->new_cinfo)) != 0) + status = decode_rename(xdr, &res->old_cinfo, &res->new_cinfo); + if (status) goto out; /* Current FH is target directory */ - if (decode_getfattr(&xdr, res->new_fattr, res->server, + if (decode_getfattr(xdr, res->new_fattr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)) != 0) goto out; - if ((status = decode_restorefh(&xdr)) != 0) + status = decode_restorefh(xdr); + if (status) goto out; - decode_getfattr(&xdr, res->old_fattr, res->server, + decode_getfattr(xdr, res->old_fattr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)); out: return status; @@ -5184,37 +5192,41 @@ out: /* * Decode LINK response */ -static int nfs4_xdr_dec_link(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_link_res *res) +static int nfs4_xdr_dec_link(struct rpc_rqst *rqstp, struct xdr_stream *xdr, + struct nfs4_link_res *res) { - struct xdr_stream xdr; struct compound_hdr hdr; int status; - xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); - status = decode_compound_hdr(&xdr, &hdr); + status = decode_compound_hdr(xdr, &hdr); if (status) goto out; - status = decode_sequence(&xdr, &res->seq_res, rqstp); + status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; - if ((status = decode_putfh(&xdr)) != 0) + status = decode_putfh(xdr); + if (status) goto out; - if ((status = decode_savefh(&xdr)) != 0) + status = decode_savefh(xdr); + if (status) goto out; - if ((status = decode_putfh(&xdr)) != 0) + status = decode_putfh(xdr); + if (status) goto out; - if ((status = decode_link(&xdr, &res->cinfo)) != 0) + status = decode_link(xdr, &res->cinfo); + if (status) goto out; /* * Note order: OP_LINK leaves the directory as the current * filehandle. */ - if (decode_getfattr(&xdr, res->dir_attr, res->server, + if (decode_getfattr(xdr, res->dir_attr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)) != 0) goto out; - if ((status = decode_restorefh(&xdr)) != 0) + status = decode_restorefh(xdr); + if (status) goto out; - decode_getfattr(&xdr, res->fattr, res->server, + decode_getfattr(xdr, res->fattr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)); out: return status; @@ -5223,33 +5235,37 @@ out: /* * Decode CREATE response */ -static int nfs4_xdr_dec_create(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_create_res *res) +static int nfs4_xdr_dec_create(struct rpc_rqst *rqstp, struct xdr_stream *xdr, + struct nfs4_create_res *res) { - struct xdr_stream xdr; struct compound_hdr hdr; int status; - xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); - status = decode_compound_hdr(&xdr, &hdr); + status = decode_compound_hdr(xdr, &hdr); if (status) goto out; - status = decode_sequence(&xdr, &res->seq_res, rqstp); + status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; - if ((status = decode_putfh(&xdr)) != 0) + status = decode_putfh(xdr); + if (status) goto out; - if ((status = decode_savefh(&xdr)) != 0) + status = decode_savefh(xdr); + if (status) goto out; - if ((status = decode_create(&xdr,&res->dir_cinfo)) != 0) + status = decode_create(xdr, &res->dir_cinfo); + if (status) goto out; - if ((status = decode_getfh(&xdr, res->fh)) != 0) + status = decode_getfh(xdr, res->fh); + if (status) goto out; - if (decode_getfattr(&xdr, res->fattr, res->server, + if (decode_getfattr(xdr, res->fattr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)) != 0) goto out; - if ((status = decode_restorefh(&xdr)) != 0) + status = decode_restorefh(xdr); + if (status) goto out; - decode_getfattr(&xdr, res->dir_fattr, res->server, + decode_getfattr(xdr, res->dir_fattr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)); out: return status; @@ -5258,31 +5274,31 @@ out: /* * Decode SYMLINK response */ -static int nfs4_xdr_dec_symlink(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_create_res *res) +static int nfs4_xdr_dec_symlink(struct rpc_rqst *rqstp, struct xdr_stream *xdr, + struct nfs4_create_res *res) { - return nfs4_xdr_dec_create(rqstp, p, res); + return nfs4_xdr_dec_create(rqstp, xdr, res); } /* * Decode GETATTR response */ -static int nfs4_xdr_dec_getattr(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_getattr_res *res) +static int nfs4_xdr_dec_getattr(struct rpc_rqst *rqstp, struct xdr_stream *xdr, + struct nfs4_getattr_res *res) { - struct xdr_stream xdr; struct compound_hdr hdr; int status; - xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); - status = decode_compound_hdr(&xdr, &hdr); + status = decode_compound_hdr(xdr, &hdr); if (status) goto out; - status = decode_sequence(&xdr, &res->seq_res, rqstp); + status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; - status = decode_putfh(&xdr); + status = decode_putfh(xdr); if (status) goto out; - status = decode_getfattr(&xdr, res->fattr, res->server, + status = decode_getfattr(xdr, res->fattr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)); out: return status; @@ -5309,24 +5325,22 @@ static void nfs4_xdr_enc_setacl(struct rpc_rqst *req, struct xdr_stream *xdr, * Decode SETACL response */ static int -nfs4_xdr_dec_setacl(struct rpc_rqst *rqstp, __be32 *p, +nfs4_xdr_dec_setacl(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs_setaclres *res) { - struct xdr_stream xdr; struct compound_hdr hdr; int status; - xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); - status = decode_compound_hdr(&xdr, &hdr); + status = decode_compound_hdr(xdr, &hdr); if (status) goto out; - status = decode_sequence(&xdr, &res->seq_res, rqstp); + status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; - status = decode_putfh(&xdr); + status = decode_putfh(xdr); if (status) goto out; - status = decode_setattr(&xdr); + status = decode_setattr(xdr); out: return status; } @@ -5335,24 +5349,22 @@ out: * Decode GETACL response */ static int -nfs4_xdr_dec_getacl(struct rpc_rqst *rqstp, __be32 *p, +nfs4_xdr_dec_getacl(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nfs_getaclres *res) { - struct xdr_stream xdr; struct compound_hdr hdr; int status; - xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); - status = decode_compound_hdr(&xdr, &hdr); + status = decode_compound_hdr(xdr, &hdr); if (status) goto out; - status = decode_sequence(&xdr, &res->seq_res, rqstp); + status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; - status = decode_putfh(&xdr); + status = decode_putfh(xdr); if (status) goto out; - status = decode_getacl(&xdr, rqstp, &res->acl_len); + status = decode_getacl(xdr, rqstp, &res->acl_len); out: return status; @@ -5361,23 +5373,22 @@ out: /* * Decode CLOSE response */ -static int nfs4_xdr_dec_close(struct rpc_rqst *rqstp, __be32 *p, struct nfs_closeres *res) +static int nfs4_xdr_dec_close(struct rpc_rqst *rqstp, struct xdr_stream *xdr, + struct nfs_closeres *res) { - struct xdr_stream xdr; struct compound_hdr hdr; int status; - xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); - status = decode_compound_hdr(&xdr, &hdr); + status = decode_compound_hdr(xdr, &hdr); if (status) goto out; - status = decode_sequence(&xdr, &res->seq_res, rqstp); + status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; - status = decode_putfh(&xdr); + status = decode_putfh(xdr); if (status) goto out; - status = decode_close(&xdr, res); + status = decode_close(xdr, res); if (status != 0) goto out; /* @@ -5386,7 +5397,7 @@ static int nfs4_xdr_dec_close(struct rpc_rqst *rqstp, __be32 *p, struct nfs_clos * an ESTALE error. Shouldn't be a problem, * though, since fattr->valid will remain unset. */ - decode_getfattr(&xdr, res->fattr, res->server, + decode_getfattr(xdr, res->fattr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)); out: return status; @@ -5395,36 +5406,35 @@ out: /* * Decode OPEN response */ -static int nfs4_xdr_dec_open(struct rpc_rqst *rqstp, __be32 *p, struct nfs_openres *res) +static int nfs4_xdr_dec_open(struct rpc_rqst *rqstp, struct xdr_stream *xdr, + struct nfs_openres *res) { - struct xdr_stream xdr; struct compound_hdr hdr; int status; - xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); - status = decode_compound_hdr(&xdr, &hdr); + status = decode_compound_hdr(xdr, &hdr); if (status) goto out; - status = decode_sequence(&xdr, &res->seq_res, rqstp); + status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; - status = decode_putfh(&xdr); + status = decode_putfh(xdr); if (status) goto out; - status = decode_savefh(&xdr); + status = decode_savefh(xdr); if (status) goto out; - status = decode_open(&xdr, res); + status = decode_open(xdr, res); if (status) goto out; - if (decode_getfh(&xdr, &res->fh) != 0) + if (decode_getfh(xdr, &res->fh) != 0) goto out; - if (decode_getfattr(&xdr, res->f_attr, res->server, + if (decode_getfattr(xdr, res->f_attr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)) != 0) goto out; - if (decode_restorefh(&xdr) != 0) + if (decode_restorefh(xdr) != 0) goto out; - decode_getfattr(&xdr, res->dir_attr, res->server, + decode_getfattr(xdr, res->dir_attr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)); out: return status; @@ -5433,20 +5443,20 @@ out: /* * Decode OPEN_CONFIRM response */ -static int nfs4_xdr_dec_open_confirm(struct rpc_rqst *rqstp, __be32 *p, struct nfs_open_confirmres *res) +static int nfs4_xdr_dec_open_confirm(struct rpc_rqst *rqstp, + struct xdr_stream *xdr, + struct nfs_open_confirmres *res) { - struct xdr_stream xdr; struct compound_hdr hdr; int status; - xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); - status = decode_compound_hdr(&xdr, &hdr); + status = decode_compound_hdr(xdr, &hdr); if (status) goto out; - status = decode_putfh(&xdr); + status = decode_putfh(xdr); if (status) goto out; - status = decode_open_confirm(&xdr, res); + status = decode_open_confirm(xdr, res); out: return status; } @@ -5454,26 +5464,26 @@ out: /* * Decode OPEN response */ -static int nfs4_xdr_dec_open_noattr(struct rpc_rqst *rqstp, __be32 *p, struct nfs_openres *res) +static int nfs4_xdr_dec_open_noattr(struct rpc_rqst *rqstp, + struct xdr_stream *xdr, + struct nfs_openres *res) { - struct xdr_stream xdr; struct compound_hdr hdr; int status; - xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); - status = decode_compound_hdr(&xdr, &hdr); + status = decode_compound_hdr(xdr, &hdr); if (status) goto out; - status = decode_sequence(&xdr, &res->seq_res, rqstp); + status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; - status = decode_putfh(&xdr); + status = decode_putfh(xdr); if (status) goto out; - status = decode_open(&xdr, res); + status = decode_open(xdr, res); if (status) goto out; - decode_getfattr(&xdr, res->f_attr, res->server, + decode_getfattr(xdr, res->f_attr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)); out: return status; @@ -5482,26 +5492,26 @@ out: /* * Decode SETATTR response */ -static int nfs4_xdr_dec_setattr(struct rpc_rqst *rqstp, __be32 *p, struct nfs_setattrres *res) +static int nfs4_xdr_dec_setattr(struct rpc_rqst *rqstp, + struct xdr_stream *xdr, + struct nfs_setattrres *res) { - struct xdr_stream xdr; struct compound_hdr hdr; int status; - xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); - status = decode_compound_hdr(&xdr, &hdr); + status = decode_compound_hdr(xdr, &hdr); if (status) goto out; - status = decode_sequence(&xdr, &res->seq_res, rqstp); + status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; - status = decode_putfh(&xdr); + status = decode_putfh(xdr); if (status) goto out; - status = decode_setattr(&xdr); + status = decode_setattr(xdr); if (status) goto out; - decode_getfattr(&xdr, res->fattr, res->server, + decode_getfattr(xdr, res->fattr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)); out: return status; @@ -5510,23 +5520,22 @@ out: /* * Decode LOCK response */ -static int nfs4_xdr_dec_lock(struct rpc_rqst *rqstp, __be32 *p, struct nfs_lock_res *res) +static int nfs4_xdr_dec_lock(struct rpc_rqst *rqstp, struct xdr_stream *xdr, + struct nfs_lock_res *res) { - struct xdr_stream xdr; struct compound_hdr hdr; int status; - xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); - status = decode_compound_hdr(&xdr, &hdr); + status = decode_compound_hdr(xdr, &hdr); if (status) goto out; - status = decode_sequence(&xdr, &res->seq_res, rqstp); + status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; - status = decode_putfh(&xdr); + status = decode_putfh(xdr); if (status) goto out; - status = decode_lock(&xdr, res); + status = decode_lock(xdr, res); out: return status; } @@ -5534,23 +5543,22 @@ out: /* * Decode LOCKT response */ -static int nfs4_xdr_dec_lockt(struct rpc_rqst *rqstp, __be32 *p, struct nfs_lockt_res *res) +static int nfs4_xdr_dec_lockt(struct rpc_rqst *rqstp, struct xdr_stream *xdr, + struct nfs_lockt_res *res) { - struct xdr_stream xdr; struct compound_hdr hdr; int status; - xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); - status = decode_compound_hdr(&xdr, &hdr); + status = decode_compound_hdr(xdr, &hdr); if (status) goto out; - status = decode_sequence(&xdr, &res->seq_res, rqstp); + status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; - status = decode_putfh(&xdr); + status = decode_putfh(xdr); if (status) goto out; - status = decode_lockt(&xdr, res); + status = decode_lockt(xdr, res); out: return status; } @@ -5558,61 +5566,58 @@ out: /* * Decode LOCKU response */ -static int nfs4_xdr_dec_locku(struct rpc_rqst *rqstp, __be32 *p, struct nfs_locku_res *res) +static int nfs4_xdr_dec_locku(struct rpc_rqst *rqstp, struct xdr_stream *xdr, + struct nfs_locku_res *res) { - struct xdr_stream xdr; struct compound_hdr hdr; int status; - xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); - status = decode_compound_hdr(&xdr, &hdr); + status = decode_compound_hdr(xdr, &hdr); if (status) goto out; - status = decode_sequence(&xdr, &res->seq_res, rqstp); + status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; - status = decode_putfh(&xdr); + status = decode_putfh(xdr); if (status) goto out; - status = decode_locku(&xdr, res); + status = decode_locku(xdr, res); out: return status; } -static int nfs4_xdr_dec_release_lockowner(struct rpc_rqst *rqstp, __be32 *p, void *dummy) +static int nfs4_xdr_dec_release_lockowner(struct rpc_rqst *rqstp, + struct xdr_stream *xdr, void *dummy) { - struct xdr_stream xdr; struct compound_hdr hdr; int status; - xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); - status = decode_compound_hdr(&xdr, &hdr); + status = decode_compound_hdr(xdr, &hdr); if (!status) - status = decode_release_lockowner(&xdr); + status = decode_release_lockowner(xdr); return status; } /* * Decode READLINK response */ -static int nfs4_xdr_dec_readlink(struct rpc_rqst *rqstp, __be32 *p, +static int nfs4_xdr_dec_readlink(struct rpc_rqst *rqstp, + struct xdr_stream *xdr, struct nfs4_readlink_res *res) { - struct xdr_stream xdr; struct compound_hdr hdr; int status; - xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); - status = decode_compound_hdr(&xdr, &hdr); + status = decode_compound_hdr(xdr, &hdr); if (status) goto out; - status = decode_sequence(&xdr, &res->seq_res, rqstp); + status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; - status = decode_putfh(&xdr); + status = decode_putfh(xdr); if (status) goto out; - status = decode_readlink(&xdr, rqstp); + status = decode_readlink(xdr, rqstp); out: return status; } @@ -5620,23 +5625,22 @@ out: /* * Decode READDIR response */ -static int nfs4_xdr_dec_readdir(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_readdir_res *res) +static int nfs4_xdr_dec_readdir(struct rpc_rqst *rqstp, struct xdr_stream *xdr, + struct nfs4_readdir_res *res) { - struct xdr_stream xdr; struct compound_hdr hdr; int status; - xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); - status = decode_compound_hdr(&xdr, &hdr); + status = decode_compound_hdr(xdr, &hdr); if (status) goto out; - status = decode_sequence(&xdr, &res->seq_res, rqstp); + status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; - status = decode_putfh(&xdr); + status = decode_putfh(xdr); if (status) goto out; - status = decode_readdir(&xdr, rqstp, res); + status = decode_readdir(xdr, rqstp, res); out: return status; } @@ -5644,23 +5648,22 @@ out: /* * Decode Read response */ -static int nfs4_xdr_dec_read(struct rpc_rqst *rqstp, __be32 *p, struct nfs_readres *res) +static int nfs4_xdr_dec_read(struct rpc_rqst *rqstp, struct xdr_stream *xdr, + struct nfs_readres *res) { - struct xdr_stream xdr; struct compound_hdr hdr; int status; - xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); - status = decode_compound_hdr(&xdr, &hdr); + status = decode_compound_hdr(xdr, &hdr); if (status) goto out; - status = decode_sequence(&xdr, &res->seq_res, rqstp); + status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; - status = decode_putfh(&xdr); + status = decode_putfh(xdr); if (status) goto out; - status = decode_read(&xdr, rqstp, res); + status = decode_read(xdr, rqstp, res); if (!status) status = res->count; out: @@ -5670,26 +5673,25 @@ out: /* * Decode WRITE response */ -static int nfs4_xdr_dec_write(struct rpc_rqst *rqstp, __be32 *p, struct nfs_writeres *res) +static int nfs4_xdr_dec_write(struct rpc_rqst *rqstp, struct xdr_stream *xdr, + struct nfs_writeres *res) { - struct xdr_stream xdr; struct compound_hdr hdr; int status; - xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); - status = decode_compound_hdr(&xdr, &hdr); + status = decode_compound_hdr(xdr, &hdr); if (status) goto out; - status = decode_sequence(&xdr, &res->seq_res, rqstp); + status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; - status = decode_putfh(&xdr); + status = decode_putfh(xdr); if (status) goto out; - status = decode_write(&xdr, res); + status = decode_write(xdr, res); if (status) goto out; - decode_getfattr(&xdr, res->fattr, res->server, + decode_getfattr(xdr, res->fattr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)); if (!status) status = res->count; @@ -5700,26 +5702,25 @@ out: /* * Decode COMMIT response */ -static int nfs4_xdr_dec_commit(struct rpc_rqst *rqstp, __be32 *p, struct nfs_writeres *res) +static int nfs4_xdr_dec_commit(struct rpc_rqst *rqstp, struct xdr_stream *xdr, + struct nfs_writeres *res) { - struct xdr_stream xdr; struct compound_hdr hdr; int status; - xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); - status = decode_compound_hdr(&xdr, &hdr); + status = decode_compound_hdr(xdr, &hdr); if (status) goto out; - status = decode_sequence(&xdr, &res->seq_res, rqstp); + status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; - status = decode_putfh(&xdr); + status = decode_putfh(xdr); if (status) goto out; - status = decode_commit(&xdr, res); + status = decode_commit(xdr, res); if (status) goto out; - decode_getfattr(&xdr, res->fattr, res->server, + decode_getfattr(xdr, res->fattr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)); out: return status; @@ -5728,85 +5729,80 @@ out: /* * Decode FSINFO response */ -static int nfs4_xdr_dec_fsinfo(struct rpc_rqst *req, __be32 *p, +static int nfs4_xdr_dec_fsinfo(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs4_fsinfo_res *res) { - struct xdr_stream xdr; struct compound_hdr hdr; int status; - xdr_init_decode(&xdr, &req->rq_rcv_buf, p); - status = decode_compound_hdr(&xdr, &hdr); + status = decode_compound_hdr(xdr, &hdr); if (!status) - status = decode_sequence(&xdr, &res->seq_res, req); + status = decode_sequence(xdr, &res->seq_res, req); if (!status) - status = decode_putfh(&xdr); + status = decode_putfh(xdr); if (!status) - status = decode_fsinfo(&xdr, res->fsinfo); + status = decode_fsinfo(xdr, res->fsinfo); return status; } /* * Decode PATHCONF response */ -static int nfs4_xdr_dec_pathconf(struct rpc_rqst *req, __be32 *p, +static int nfs4_xdr_dec_pathconf(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs4_pathconf_res *res) { - struct xdr_stream xdr; struct compound_hdr hdr; int status; - xdr_init_decode(&xdr, &req->rq_rcv_buf, p); - status = decode_compound_hdr(&xdr, &hdr); + status = decode_compound_hdr(xdr, &hdr); if (!status) - status = decode_sequence(&xdr, &res->seq_res, req); + status = decode_sequence(xdr, &res->seq_res, req); if (!status) - status = decode_putfh(&xdr); + status = decode_putfh(xdr); if (!status) - status = decode_pathconf(&xdr, res->pathconf); + status = decode_pathconf(xdr, res->pathconf); return status; } /* * Decode STATFS response */ -static int nfs4_xdr_dec_statfs(struct rpc_rqst *req, __be32 *p, +static int nfs4_xdr_dec_statfs(struct rpc_rqst *req, struct xdr_stream *xdr, struct nfs4_statfs_res *res) { - struct xdr_stream xdr; struct compound_hdr hdr; int status; - xdr_init_decode(&xdr, &req->rq_rcv_buf, p); - status = decode_compound_hdr(&xdr, &hdr); + status = decode_compound_hdr(xdr, &hdr); if (!status) - status = decode_sequence(&xdr, &res->seq_res, req); + status = decode_sequence(xdr, &res->seq_res, req); if (!status) - status = decode_putfh(&xdr); + status = decode_putfh(xdr); if (!status) - status = decode_statfs(&xdr, res->fsstat); + status = decode_statfs(xdr, res->fsstat); return status; } /* * Decode GETATTR_BITMAP response */ -static int nfs4_xdr_dec_server_caps(struct rpc_rqst *req, __be32 *p, struct nfs4_server_caps_res *res) +static int nfs4_xdr_dec_server_caps(struct rpc_rqst *req, + struct xdr_stream *xdr, + struct nfs4_server_caps_res *res) { - struct xdr_stream xdr; struct compound_hdr hdr; int status; - xdr_init_decode(&xdr, &req->rq_rcv_buf, p); - status = decode_compound_hdr(&xdr, &hdr); + status = decode_compound_hdr(xdr, &hdr); if (status) goto out; - status = decode_sequence(&xdr, &res->seq_res, req); + status = decode_sequence(xdr, &res->seq_res, req); if (status) goto out; - if ((status = decode_putfh(&xdr)) != 0) + status = decode_putfh(xdr); + if (status) goto out; - status = decode_server_caps(&xdr, res); + status = decode_server_caps(xdr, res); out: return status; } @@ -5814,79 +5810,77 @@ out: /* * Decode RENEW response */ -static int nfs4_xdr_dec_renew(struct rpc_rqst *rqstp, __be32 *p, void *dummy) +static int nfs4_xdr_dec_renew(struct rpc_rqst *rqstp, struct xdr_stream *xdr, + void *__unused) { - struct xdr_stream xdr; struct compound_hdr hdr; int status; - xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); - status = decode_compound_hdr(&xdr, &hdr); + status = decode_compound_hdr(xdr, &hdr); if (!status) - status = decode_renew(&xdr); + status = decode_renew(xdr); return status; } /* * Decode SETCLIENTID response */ -static int nfs4_xdr_dec_setclientid(struct rpc_rqst *req, __be32 *p, - struct nfs4_setclientid_res *res) +static int nfs4_xdr_dec_setclientid(struct rpc_rqst *req, + struct xdr_stream *xdr, + struct nfs4_setclientid_res *res) { - struct xdr_stream xdr; struct compound_hdr hdr; int status; - xdr_init_decode(&xdr, &req->rq_rcv_buf, p); - status = decode_compound_hdr(&xdr, &hdr); + status = decode_compound_hdr(xdr, &hdr); if (!status) - status = decode_setclientid(&xdr, res); + status = decode_setclientid(xdr, res); return status; } /* * Decode SETCLIENTID_CONFIRM response */ -static int nfs4_xdr_dec_setclientid_confirm(struct rpc_rqst *req, __be32 *p, struct nfs_fsinfo *fsinfo) +static int nfs4_xdr_dec_setclientid_confirm(struct rpc_rqst *req, + struct xdr_stream *xdr, + struct nfs_fsinfo *fsinfo) { - struct xdr_stream xdr; struct compound_hdr hdr; int status; - xdr_init_decode(&xdr, &req->rq_rcv_buf, p); - status = decode_compound_hdr(&xdr, &hdr); + status = decode_compound_hdr(xdr, &hdr); if (!status) - status = decode_setclientid_confirm(&xdr); + status = decode_setclientid_confirm(xdr); if (!status) - status = decode_putrootfh(&xdr); + status = decode_putrootfh(xdr); if (!status) - status = decode_fsinfo(&xdr, fsinfo); + status = decode_fsinfo(xdr, fsinfo); return status; } /* * Decode DELEGRETURN response */ -static int nfs4_xdr_dec_delegreturn(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_delegreturnres *res) +static int nfs4_xdr_dec_delegreturn(struct rpc_rqst *rqstp, + struct xdr_stream *xdr, + struct nfs4_delegreturnres *res) { - struct xdr_stream xdr; struct compound_hdr hdr; int status; - xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); - status = decode_compound_hdr(&xdr, &hdr); + status = decode_compound_hdr(xdr, &hdr); if (status) goto out; - status = decode_sequence(&xdr, &res->seq_res, rqstp); + status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; - status = decode_putfh(&xdr); + status = decode_putfh(xdr); if (status != 0) goto out; - status = decode_delegreturn(&xdr); + status = decode_delegreturn(xdr); if (status != 0) goto out; - decode_getfattr(&xdr, res->fattr, res->server, + decode_getfattr(xdr, res->fattr, res->server, !RPC_IS_ASYNC(rqstp->rq_task)); out: return status; @@ -5895,26 +5889,27 @@ out: /* * Decode FS_LOCATIONS response */ -static int nfs4_xdr_dec_fs_locations(struct rpc_rqst *req, __be32 *p, +static int nfs4_xdr_dec_fs_locations(struct rpc_rqst *req, + struct xdr_stream *xdr, struct nfs4_fs_locations_res *res) { - struct xdr_stream xdr; struct compound_hdr hdr; int status; - xdr_init_decode(&xdr, &req->rq_rcv_buf, p); - status = decode_compound_hdr(&xdr, &hdr); + status = decode_compound_hdr(xdr, &hdr); if (status) goto out; - status = decode_sequence(&xdr, &res->seq_res, req); + status = decode_sequence(xdr, &res->seq_res, req); if (status) goto out; - if ((status = decode_putfh(&xdr)) != 0) + status = decode_putfh(xdr); + if (status) goto out; - if ((status = decode_lookup(&xdr)) != 0) + status = decode_lookup(xdr); + if (status) goto out; - xdr_enter_page(&xdr, PAGE_SIZE); - status = decode_getfattr(&xdr, &res->fs_locations->fattr, + xdr_enter_page(xdr, PAGE_SIZE); + status = decode_getfattr(xdr, &res->fs_locations->fattr, res->fs_locations->server, !RPC_IS_ASYNC(req->rq_task)); out: @@ -5925,129 +5920,122 @@ out: /* * Decode EXCHANGE_ID response */ -static int nfs4_xdr_dec_exchange_id(struct rpc_rqst *rqstp, uint32_t *p, +static int nfs4_xdr_dec_exchange_id(struct rpc_rqst *rqstp, + struct xdr_stream *xdr, void *res) { - struct xdr_stream xdr; struct compound_hdr hdr; int status; - xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); - status = decode_compound_hdr(&xdr, &hdr); + status = decode_compound_hdr(xdr, &hdr); if (!status) - status = decode_exchange_id(&xdr, res); + status = decode_exchange_id(xdr, res); return status; } /* * Decode CREATE_SESSION response */ -static int nfs4_xdr_dec_create_session(struct rpc_rqst *rqstp, uint32_t *p, +static int nfs4_xdr_dec_create_session(struct rpc_rqst *rqstp, + struct xdr_stream *xdr, struct nfs41_create_session_res *res) { - struct xdr_stream xdr; struct compound_hdr hdr; int status; - xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); - status = decode_compound_hdr(&xdr, &hdr); + status = decode_compound_hdr(xdr, &hdr); if (!status) - status = decode_create_session(&xdr, res); + status = decode_create_session(xdr, res); return status; } /* * Decode DESTROY_SESSION response */ -static int nfs4_xdr_dec_destroy_session(struct rpc_rqst *rqstp, uint32_t *p, - void *dummy) +static int nfs4_xdr_dec_destroy_session(struct rpc_rqst *rqstp, + struct xdr_stream *xdr, + void *res) { - struct xdr_stream xdr; struct compound_hdr hdr; int status; - xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); - status = decode_compound_hdr(&xdr, &hdr); + status = decode_compound_hdr(xdr, &hdr); if (!status) - status = decode_destroy_session(&xdr, dummy); + status = decode_destroy_session(xdr, res); return status; } /* * Decode SEQUENCE response */ -static int nfs4_xdr_dec_sequence(struct rpc_rqst *rqstp, uint32_t *p, +static int nfs4_xdr_dec_sequence(struct rpc_rqst *rqstp, + struct xdr_stream *xdr, struct nfs4_sequence_res *res) { - struct xdr_stream xdr; struct compound_hdr hdr; int status; - xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); - status = decode_compound_hdr(&xdr, &hdr); + status = decode_compound_hdr(xdr, &hdr); if (!status) - status = decode_sequence(&xdr, res, rqstp); + status = decode_sequence(xdr, res, rqstp); return status; } /* * Decode GET_LEASE_TIME response */ -static int nfs4_xdr_dec_get_lease_time(struct rpc_rqst *rqstp, uint32_t *p, +static int nfs4_xdr_dec_get_lease_time(struct rpc_rqst *rqstp, + struct xdr_stream *xdr, struct nfs4_get_lease_time_res *res) { - struct xdr_stream xdr; struct compound_hdr hdr; int status; - xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); - status = decode_compound_hdr(&xdr, &hdr); + status = decode_compound_hdr(xdr, &hdr); if (!status) - status = decode_sequence(&xdr, &res->lr_seq_res, rqstp); + status = decode_sequence(xdr, &res->lr_seq_res, rqstp); if (!status) - status = decode_putrootfh(&xdr); + status = decode_putrootfh(xdr); if (!status) - status = decode_fsinfo(&xdr, res->lr_fsinfo); + status = decode_fsinfo(xdr, res->lr_fsinfo); return status; } /* * Decode RECLAIM_COMPLETE response */ -static int nfs4_xdr_dec_reclaim_complete(struct rpc_rqst *rqstp, uint32_t *p, +static int nfs4_xdr_dec_reclaim_complete(struct rpc_rqst *rqstp, + struct xdr_stream *xdr, struct nfs41_reclaim_complete_res *res) { - struct xdr_stream xdr; struct compound_hdr hdr; int status; - xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); - status = decode_compound_hdr(&xdr, &hdr); + status = decode_compound_hdr(xdr, &hdr); if (!status) - status = decode_sequence(&xdr, &res->seq_res, rqstp); + status = decode_sequence(xdr, &res->seq_res, rqstp); if (!status) - status = decode_reclaim_complete(&xdr, (void *)NULL); + status = decode_reclaim_complete(xdr, (void *)NULL); return status; } /* * Decode GETDEVINFO response */ -static int nfs4_xdr_dec_getdeviceinfo(struct rpc_rqst *rqstp, uint32_t *p, +static int nfs4_xdr_dec_getdeviceinfo(struct rpc_rqst *rqstp, + struct xdr_stream *xdr, struct nfs4_getdeviceinfo_res *res) { - struct xdr_stream xdr; struct compound_hdr hdr; int status; - xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); - status = decode_compound_hdr(&xdr, &hdr); + status = decode_compound_hdr(xdr, &hdr); if (status != 0) goto out; - status = decode_sequence(&xdr, &res->seq_res, rqstp); + status = decode_sequence(xdr, &res->seq_res, rqstp); if (status != 0) goto out; - status = decode_getdeviceinfo(&xdr, res->pdev); + status = decode_getdeviceinfo(xdr, res->pdev); out: return status; } @@ -6055,24 +6043,23 @@ out: /* * Decode LAYOUTGET response */ -static int nfs4_xdr_dec_layoutget(struct rpc_rqst *rqstp, uint32_t *p, +static int nfs4_xdr_dec_layoutget(struct rpc_rqst *rqstp, + struct xdr_stream *xdr, struct nfs4_layoutget_res *res) { - struct xdr_stream xdr; struct compound_hdr hdr; int status; - xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); - status = decode_compound_hdr(&xdr, &hdr); + status = decode_compound_hdr(xdr, &hdr); if (status) goto out; - status = decode_sequence(&xdr, &res->seq_res, rqstp); + status = decode_sequence(xdr, &res->seq_res, rqstp); if (status) goto out; - status = decode_putfh(&xdr); + status = decode_putfh(xdr); if (status) goto out; - status = decode_layoutget(&xdr, rqstp, res); + status = decode_layoutget(xdr, rqstp, res); out: return status; } @@ -6236,7 +6223,7 @@ nfs4_stat_to_errno(int stat) [NFSPROC4_CLNT_##proc] = { \ .p_proc = NFSPROC4_COMPOUND, \ .p_encode = (kxdreproc_t)nfs4_xdr_##argtype, \ - .p_decode = (kxdrproc_t)nfs4_xdr_##restype, \ + .p_decode = (kxdrdproc_t)nfs4_xdr_##restype, \ .p_arglen = NFS4_##argtype##_sz, \ .p_replen = NFS4_##restype##_sz, \ .p_statidx = NFSPROC4_CLNT_##proc, \ diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index c363efda8ecf..21a63da305ff 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -533,7 +533,8 @@ static void nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, struct xdr_stream *xdr, * Protocol". */ -static int nfs4_xdr_dec_cb_null(struct rpc_rqst *req, __be32 *p, void *__unused) +static int nfs4_xdr_dec_cb_null(struct rpc_rqst *req, struct xdr_stream *xdr, + void *__unused) { return 0; } @@ -541,26 +542,25 @@ static int nfs4_xdr_dec_cb_null(struct rpc_rqst *req, __be32 *p, void *__unused) /* * 20.2. Operation 4: CB_RECALL - Recall a Delegation */ -static int nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp, __be32 *p, +static int nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp, + struct xdr_stream *xdr, struct nfsd4_callback *cb) { - struct xdr_stream xdr; struct nfs4_cb_compound_hdr hdr; enum nfsstat4 nfserr; int status; - xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); - status = decode_cb_compound4res(&xdr, &hdr); + status = decode_cb_compound4res(xdr, &hdr); if (unlikely(status)) goto out; if (cb != NULL) { - status = decode_cb_sequence4res(&xdr, cb); + status = decode_cb_sequence4res(xdr, cb); if (unlikely(status)) goto out; } - status = decode_cb_op_status(&xdr, OP_CB_RECALL, &nfserr); + status = decode_cb_op_status(xdr, OP_CB_RECALL, &nfserr); if (unlikely(status)) goto out; if (unlikely(nfserr != NFS4_OK)) @@ -578,7 +578,7 @@ out_default: [NFSPROC4_CLNT_##proc] = { \ .p_proc = NFSPROC4_CB_##call, \ .p_encode = (kxdreproc_t)nfs4_xdr_enc_##argtype, \ - .p_decode = (kxdrproc_t)nfs4_xdr_dec_##restype, \ + .p_decode = (kxdrdproc_t)nfs4_xdr_dec_##restype, \ .p_arglen = NFS4_enc_##argtype##_sz, \ .p_replen = NFS4_dec_##restype##_sz, \ .p_statidx = NFSPROC4_CB_##call, \ -- cgit v1.2.2 From b113746888c260a02f6ae1e92b0b9ef7e9c38993 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Tue, 14 Dec 2010 15:05:03 +0000 Subject: lockd: define host_for_each{_safe} macros We've got a lot of loops like this, and I find them a little easier to read with the macros. More such loops are coming. Signed-off-by: J. Bruce Fields [ cel: Forward-ported to 2.6.37 ] Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- fs/lockd/host.c | 107 +++++++++++++++++++++++++++++--------------------------- 1 file changed, 55 insertions(+), 52 deletions(-) (limited to 'fs') diff --git a/fs/lockd/host.c b/fs/lockd/host.c index ed0c59fe23ce..cada3a12d557 100644 --- a/fs/lockd/host.c +++ b/fs/lockd/host.c @@ -26,6 +26,18 @@ #define NLM_HOST_COLLECT (120 * HZ) static struct hlist_head nlm_hosts[NLM_HOST_NRHASH]; + +#define for_each_host(host, pos, chain, table) \ + for ((chain) = (table); \ + (chain) < (table) + NLM_HOST_NRHASH; ++(chain)) \ + hlist_for_each_entry((host), (pos), (chain), h_hash) + +#define for_each_host_safe(host, pos, next, chain, table) \ + for ((chain) = (table); \ + (chain) < (table) + NLM_HOST_NRHASH; ++(chain)) \ + hlist_for_each_entry_safe((host), (pos), (next), \ + (chain), h_hash) + static unsigned long next_gc; static int nrhosts; static DEFINE_MUTEX(nlm_host_mutex); @@ -453,28 +465,26 @@ void nlm_host_rebooted(const struct nlm_reboot *info) * To avoid processing a host several times, we match the nsmstate. */ again: mutex_lock(&nlm_host_mutex); - for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) { - hlist_for_each_entry(host, pos, chain, h_hash) { - if (host->h_nsmhandle == nsm - && host->h_nsmstate != info->state) { - host->h_nsmstate = info->state; - host->h_state++; - - nlm_get_host(host); - mutex_unlock(&nlm_host_mutex); - - if (host->h_server) { - /* We're server for this guy, just ditch - * all the locks he held. */ - nlmsvc_free_host_resources(host); - } else { - /* He's the server, initiate lock recovery. */ - nlmclnt_recovery(host); - } - - nlm_release_host(host); - goto again; + for_each_host(host, pos, chain, nlm_hosts) { + if (host->h_nsmhandle == nsm + && host->h_nsmstate != info->state) { + host->h_nsmstate = info->state; + host->h_state++; + + nlm_get_host(host); + mutex_unlock(&nlm_host_mutex); + + if (host->h_server) { + /* We're server for this guy, just ditch + * all the locks he held. */ + nlmsvc_free_host_resources(host); + } else { + /* He's the server, initiate lock recovery. */ + nlmclnt_recovery(host); } + + nlm_release_host(host); + goto again; } } mutex_unlock(&nlm_host_mutex); @@ -497,13 +507,11 @@ nlm_shutdown_hosts(void) /* First, make all hosts eligible for gc */ dprintk("lockd: nuking all hosts...\n"); - for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) { - hlist_for_each_entry(host, pos, chain, h_hash) { - host->h_expires = jiffies - 1; - if (host->h_rpcclnt) { - rpc_shutdown_client(host->h_rpcclnt); - host->h_rpcclnt = NULL; - } + for_each_host(host, pos, chain, nlm_hosts) { + host->h_expires = jiffies - 1; + if (host->h_rpcclnt) { + rpc_shutdown_client(host->h_rpcclnt); + host->h_rpcclnt = NULL; } } @@ -515,12 +523,10 @@ nlm_shutdown_hosts(void) if (nrhosts) { printk(KERN_WARNING "lockd: couldn't shutdown host module!\n"); dprintk("lockd: %d hosts left:\n", nrhosts); - for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) { - hlist_for_each_entry(host, pos, chain, h_hash) { - dprintk(" %s (cnt %d use %d exp %ld)\n", - host->h_name, atomic_read(&host->h_count), - host->h_inuse, host->h_expires); - } + for_each_host(host, pos, chain, nlm_hosts) { + dprintk(" %s (cnt %d use %d exp %ld)\n", + host->h_name, atomic_read(&host->h_count), + host->h_inuse, host->h_expires); } } } @@ -538,29 +544,26 @@ nlm_gc_hosts(void) struct nlm_host *host; dprintk("lockd: host garbage collection\n"); - for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) { - hlist_for_each_entry(host, pos, chain, h_hash) - host->h_inuse = 0; - } + for_each_host(host, pos, chain, nlm_hosts) + host->h_inuse = 0; /* Mark all hosts that hold locks, blocks or shares */ nlmsvc_mark_resources(); - for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) { - hlist_for_each_entry_safe(host, pos, next, chain, h_hash) { - if (atomic_read(&host->h_count) || host->h_inuse - || time_before(jiffies, host->h_expires)) { - dprintk("nlm_gc_hosts skipping %s (cnt %d use %d exp %ld)\n", - host->h_name, atomic_read(&host->h_count), - host->h_inuse, host->h_expires); - continue; - } - dprintk("lockd: delete host %s\n", host->h_name); - hlist_del_init(&host->h_hash); - - nlm_destroy_host(host); - nrhosts--; + for_each_host_safe(host, pos, next, chain, nlm_hosts) { + if (atomic_read(&host->h_count) || host->h_inuse + || time_before(jiffies, host->h_expires)) { + dprintk("nlm_gc_hosts skipping %s " + "(cnt %d use %d exp %ld)\n", + host->h_name, atomic_read(&host->h_count), + host->h_inuse, host->h_expires); + continue; } + dprintk("lockd: delete host %s\n", host->h_name); + hlist_del_init(&host->h_hash); + + nlm_destroy_host(host); + nrhosts--; } next_gc = jiffies + NLM_HOST_COLLECT; -- cgit v1.2.2 From b10e30f6559978e3c8ca2a70c1cb35d6680a4021 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Tue, 14 Dec 2010 15:05:13 +0000 Subject: lockd: reorganize nlm_host_rebooted Minor reorganization; no change in behavior. This will save some duplicated code after we split the client and server host caches. Signed-off-by: J. Bruce Fields [ cel: Forward-ported to 2.6.37 ] Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- fs/lockd/host.c | 58 +++++++++++++++++++++++++++++++++------------------------ 1 file changed, 34 insertions(+), 24 deletions(-) (limited to 'fs') diff --git a/fs/lockd/host.c b/fs/lockd/host.c index cada3a12d557..2dbf1392acfc 100644 --- a/fs/lockd/host.c +++ b/fs/lockd/host.c @@ -441,6 +441,31 @@ void nlm_release_host(struct nlm_host *host) } } +static struct nlm_host *next_host_state(struct hlist_head *cache, + struct nsm_handle *nsm, + const struct nlm_reboot *info) +{ + struct nlm_host *host = NULL; + struct hlist_head *chain; + struct hlist_node *pos; + + mutex_lock(&nlm_host_mutex); + for_each_host(host, pos, chain, cache) { + if (host->h_nsmhandle == nsm + && host->h_nsmstate != info->state) { + host->h_nsmstate = info->state; + host->h_state++; + + nlm_get_host(host); + mutex_unlock(&nlm_host_mutex); + goto out; + } + } +out: + mutex_unlock(&nlm_host_mutex); + return host; +} + /** * nlm_host_rebooted - Release all resources held by rebooted host * @info: pointer to decoded results of NLM_SM_NOTIFY call @@ -450,8 +475,6 @@ void nlm_release_host(struct nlm_host *host) */ void nlm_host_rebooted(const struct nlm_reboot *info) { - struct hlist_head *chain; - struct hlist_node *pos; struct nsm_handle *nsm; struct nlm_host *host; @@ -464,30 +487,17 @@ void nlm_host_rebooted(const struct nlm_reboot *info) * lock for this. * To avoid processing a host several times, we match the nsmstate. */ -again: mutex_lock(&nlm_host_mutex); - for_each_host(host, pos, chain, nlm_hosts) { - if (host->h_nsmhandle == nsm - && host->h_nsmstate != info->state) { - host->h_nsmstate = info->state; - host->h_state++; - - nlm_get_host(host); - mutex_unlock(&nlm_host_mutex); - - if (host->h_server) { - /* We're server for this guy, just ditch - * all the locks he held. */ - nlmsvc_free_host_resources(host); - } else { - /* He's the server, initiate lock recovery. */ - nlmclnt_recovery(host); - } - - nlm_release_host(host); - goto again; + while ((host = next_host_state(nlm_hosts, nsm, info)) != NULL) { + if (host->h_server) { + /* We're server for this guy, just ditch + * all the locks he held. */ + nlmsvc_free_host_resources(host); + } else { + /* He's the server, initiate lock recovery. */ + nlmclnt_recovery(host); } + nlm_release_host(host); } - mutex_unlock(&nlm_host_mutex); nsm_release(nsm); } -- cgit v1.2.2 From a7952f4056d4d9c63c70534bcfd4f2c11e487000 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 14 Dec 2010 15:05:23 +0000 Subject: lockd: Add nlm_alloc_host() Refactor nlm_host allocation and initialization into a separate function. This will be the common piece of server and client nlm_host lookup logic after the nlm_host cache is split. Small change: use kmalloc() instead of kzalloc(), as we're overwriting almost all fields in the new nlm_host struct with non-zero values immediately after it is allocated. An added benefit is we now have an explicit reference to each field name where it is initialized (for all you cscope fans out there). Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- fs/lockd/host.c | 110 +++++++++++++++++++++++++++++++++----------------------- 1 file changed, 65 insertions(+), 45 deletions(-) (limited to 'fs') diff --git a/fs/lockd/host.c b/fs/lockd/host.c index 2dbf1392acfc..1911f34be976 100644 --- a/fs/lockd/host.c +++ b/fs/lockd/host.c @@ -99,6 +99,68 @@ static unsigned int nlm_hash_address(const struct sockaddr *sap) return hash & (NLM_HOST_NRHASH - 1); } +/* + * Allocate and initialize an nlm_host. Common to both client and server. + */ +static struct nlm_host *nlm_alloc_host(struct nlm_lookup_host_info *ni, + struct nsm_handle *nsm) +{ + struct nlm_host *host = NULL; + unsigned long now = jiffies; + + if (nsm != NULL) + atomic_inc(&nsm->sm_count); + else { + host = NULL; + nsm = nsm_get_handle(ni->sap, ni->salen, + ni->hostname, ni->hostname_len); + if (unlikely(nsm == NULL)) { + dprintk("lockd: %s failed; no nsm handle\n", + __func__); + goto out; + } + } + + host = kmalloc(sizeof(*host), GFP_KERNEL); + if (unlikely(host == NULL)) { + dprintk("lockd: %s failed; no memory\n", __func__); + nsm_release(nsm); + goto out; + } + + memcpy(nlm_addr(host), ni->sap, ni->salen); + host->h_addrlen = ni->salen; + rpc_set_port(nlm_addr(host), 0); + host->h_srcaddrlen = 0; + + host->h_rpcclnt = NULL; + host->h_name = nsm->sm_name; + host->h_version = ni->version; + host->h_proto = ni->protocol; + host->h_reclaiming = 0; + host->h_server = ni->server; + host->h_noresvport = ni->noresvport; + host->h_inuse = 0; + init_waitqueue_head(&host->h_gracewait); + init_rwsem(&host->h_rwsem); + host->h_state = 0; + host->h_nsmstate = 0; + host->h_pidcount = 0; + atomic_set(&host->h_count, 1); + mutex_init(&host->h_mutex); + host->h_nextrebind = now + NLM_HOST_REBIND; + host->h_expires = now + NLM_HOST_EXPIRE; + INIT_LIST_HEAD(&host->h_lockowners); + spin_lock_init(&host->h_lock); + INIT_LIST_HEAD(&host->h_granted); + INIT_LIST_HEAD(&host->h_reclaim); + host->h_nsmhandle = nsm; + host->h_addrbuf = nsm->sm_addrbuf; + +out: + return host; +} + /* * Common host lookup routine for server & client */ @@ -150,55 +212,13 @@ static struct nlm_host *nlm_lookup_host(struct nlm_lookup_host_info *ni) goto out; } - /* - * The host wasn't in our hash table. If we don't - * have an NSM handle for it yet, create one. - */ - if (nsm) - atomic_inc(&nsm->sm_count); - else { - host = NULL; - nsm = nsm_get_handle(ni->sap, ni->salen, - ni->hostname, ni->hostname_len); - if (!nsm) { - dprintk("lockd: nlm_lookup_host failed; " - "no nsm handle\n"); - goto out; - } - } - - host = kzalloc(sizeof(*host), GFP_KERNEL); - if (!host) { - nsm_release(nsm); - dprintk("lockd: nlm_lookup_host failed; no memory\n"); + host = nlm_alloc_host(ni, nsm); + if (unlikely(host == NULL)) goto out; - } - host->h_name = nsm->sm_name; - host->h_addrbuf = nsm->sm_addrbuf; - memcpy(nlm_addr(host), ni->sap, ni->salen); - host->h_addrlen = ni->salen; - rpc_set_port(nlm_addr(host), 0); + memcpy(nlm_srcaddr(host), ni->src_sap, ni->src_len); host->h_srcaddrlen = ni->src_len; - host->h_version = ni->version; - host->h_proto = ni->protocol; - host->h_rpcclnt = NULL; - mutex_init(&host->h_mutex); - host->h_nextrebind = jiffies + NLM_HOST_REBIND; - host->h_expires = jiffies + NLM_HOST_EXPIRE; - atomic_set(&host->h_count, 1); - init_waitqueue_head(&host->h_gracewait); - init_rwsem(&host->h_rwsem); - host->h_state = 0; /* pseudo NSM state */ - host->h_nsmstate = 0; /* real NSM state */ - host->h_nsmhandle = nsm; - host->h_server = ni->server; - host->h_noresvport = ni->noresvport; hlist_add_head(&host->h_hash, chain); - INIT_LIST_HEAD(&host->h_lockowners); - spin_lock_init(&host->h_lock); - INIT_LIST_HEAD(&host->h_granted); - INIT_LIST_HEAD(&host->h_reclaim); nrhosts++; -- cgit v1.2.2 From 723bb5b5052faba57060a2feb564ced22416b5bc Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 14 Dec 2010 15:05:33 +0000 Subject: lockd: Add nlm_destroy_host_locked() Refactor the tail of nlm_gc_hosts() into nlm_destroy_host() so that this logic can be used separately from garbage collection. Rename it _locked() to document that it must be called with the hosts cache mutex held. Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- fs/lockd/host.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/lockd/host.c b/fs/lockd/host.c index 1911f34be976..e58e1426d161 100644 --- a/fs/lockd/host.c +++ b/fs/lockd/host.c @@ -231,16 +231,21 @@ out: } /* - * Destroy a host + * Destroy an nlm_host and free associated resources + * + * Caller must hold nlm_host_mutex. */ -static void -nlm_destroy_host(struct nlm_host *host) +static void nlm_destroy_host_locked(struct nlm_host *host) { struct rpc_clnt *clnt; + dprintk("lockd: destroy host %s\n", host->h_name); + BUG_ON(!list_empty(&host->h_lockowners)); BUG_ON(atomic_read(&host->h_count)); + hlist_del_init(&host->h_hash); + nsm_unmonitor(host); nsm_release(host->h_nsmhandle); @@ -248,6 +253,8 @@ nlm_destroy_host(struct nlm_host *host) if (clnt != NULL) rpc_shutdown_client(clnt); kfree(host); + + nrhosts--; } /** @@ -589,11 +596,7 @@ nlm_gc_hosts(void) host->h_inuse, host->h_expires); continue; } - dprintk("lockd: delete host %s\n", host->h_name); - hlist_del_init(&host->h_hash); - - nlm_destroy_host(host); - nrhosts--; + nlm_destroy_host_locked(host); } next_gc = jiffies + NLM_HOST_COLLECT; -- cgit v1.2.2 From 7db836d4a427c3c64406b00b6d8d745d6335d72a Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 14 Dec 2010 15:05:42 +0000 Subject: lockd: Split nlm_release_call() The nlm_release_call() function is invoked from both the server and the client side. We're about to introduce a distinct server- and client-side nlm_release_host(), so nlm_release_call() must first be split into a client-side and a server-side version. Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- fs/lockd/clntproc.c | 12 ++++++------ fs/lockd/svc4proc.c | 4 ++-- fs/lockd/svclock.c | 4 ++-- fs/lockd/svcproc.c | 12 ++++++++++-- 4 files changed, 20 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c index 332c54cf75e0..fbc6617f76c4 100644 --- a/fs/lockd/clntproc.c +++ b/fs/lockd/clntproc.c @@ -211,7 +211,7 @@ struct nlm_rqst *nlm_alloc_call(struct nlm_host *host) return NULL; } -void nlm_release_call(struct nlm_rqst *call) +void nlmclnt_release_call(struct nlm_rqst *call) { if (!atomic_dec_and_test(&call->a_count)) return; @@ -222,7 +222,7 @@ void nlm_release_call(struct nlm_rqst *call) static void nlmclnt_rpc_release(void *data) { - nlm_release_call(data); + nlmclnt_release_call(data); } static int nlm_wait_on_grace(wait_queue_head_t *queue) @@ -436,7 +436,7 @@ nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl) status = nlm_stat_to_errno(req->a_res.status); } out: - nlm_release_call(req); + nlmclnt_release_call(req); return status; } @@ -593,7 +593,7 @@ again: out_unblock: nlmclnt_finish_block(block); out: - nlm_release_call(req); + nlmclnt_release_call(req); return status; out_unlock: /* Fatal error: ensure that we remove the lock altogether */ @@ -694,7 +694,7 @@ nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl) /* What to do now? I'm out of my depth... */ status = -ENOLCK; out: - nlm_release_call(req); + nlmclnt_release_call(req); return status; } @@ -755,7 +755,7 @@ static int nlmclnt_cancel(struct nlm_host *host, int block, struct file_lock *fl NLMPROC_CANCEL, &nlmclnt_cancel_ops); if (status == 0 && req->a_res.status == nlm_lck_denied) status = -ENOLCK; - nlm_release_call(req); + nlmclnt_release_call(req); return status; } diff --git a/fs/lockd/svc4proc.c b/fs/lockd/svc4proc.c index 38d261192453..c187422026d8 100644 --- a/fs/lockd/svc4proc.c +++ b/fs/lockd/svc4proc.c @@ -229,7 +229,7 @@ static void nlm4svc_callback_exit(struct rpc_task *task, void *data) static void nlm4svc_callback_release(void *data) { - nlm_release_call(data); + nlmsvc_release_call(data); } static const struct rpc_call_ops nlm4svc_callback_ops = { @@ -261,7 +261,7 @@ static __be32 nlm4svc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_args stat = func(rqstp, argp, &call->a_res); if (stat != 0) { - nlm_release_call(call); + nlmsvc_release_call(call); return stat; } diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c index 9266c4600208..6e31695d046f 100644 --- a/fs/lockd/svclock.c +++ b/fs/lockd/svclock.c @@ -234,7 +234,7 @@ nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_host *host, failed_free: kfree(block); failed: - nlm_release_call(call); + nlmsvc_release_call(call); return NULL; } @@ -267,7 +267,7 @@ static void nlmsvc_free_block(struct kref *kref) mutex_unlock(&file->f_mutex); nlmsvc_freegrantargs(block->b_call); - nlm_release_call(block->b_call); + nlmsvc_release_call(block->b_call); nlm_release_file(block->b_file); kfree(block->b_fl); kfree(block); diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c index 0caea5310ac3..0df65ec29e43 100644 --- a/fs/lockd/svcproc.c +++ b/fs/lockd/svcproc.c @@ -257,9 +257,17 @@ static void nlmsvc_callback_exit(struct rpc_task *task, void *data) -task->tk_status); } +void nlmsvc_release_call(struct nlm_rqst *call) +{ + if (!atomic_dec_and_test(&call->a_count)) + return; + nlm_release_host(call->a_host); + kfree(call); +} + static void nlmsvc_callback_release(void *data) { - nlm_release_call(data); + nlmsvc_release_call(data); } static const struct rpc_call_ops nlmsvc_callback_ops = { @@ -291,7 +299,7 @@ static __be32 nlmsvc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_args stat = func(rqstp, argp, &call->a_res); if (stat != 0) { - nlm_release_call(call); + nlmsvc_release_call(call); return stat; } -- cgit v1.2.2 From 8ea6ecc8b0759756a766c05dc7c98c51ec90de37 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 14 Dec 2010 15:05:52 +0000 Subject: lockd: Create client-side nlm_host cache NFS clients don't need the garbage collection processing that is performed on nlm_host structures. The client picks up an nlm_host at mount time and holds a reference to it until the file system is unmounted. Servers, on the other hand, don't have a precise way to tell when an nlm_host is no longer being used, so zero refcount nlm_host entries are left to expire in the cache after a time. Basically there's nothing holding a reference to an nlm_host between individual server-side NLM requests, but we can't afford the expense of recreating them for every new NLM request from a client. The nlm_host cache adds some lifetime hysteresis to entries in the cache so the next time a particular nlm_host is needed, it's likely to be discovered by a lookup rather than created from whole cloth. With the new implementation, client nlm_host cache items are no longer garbage collected, and are destroyed directly by a new release function specialized for client entries, nlmclnt_release_host(). They are cached in their own data structure, and have their own lookup logic, simplified and specialized for client nlm_host entries. However, the client nlm_host cache still shares reboot recovery logic with the server nlm_host cache. The NSM "peer rebooted" downcall for clients and servers still come through the same RPC call. This is a legacy formal API that would be difficult to alter, and besides, the user space NSM implementation can't tell the difference between peers that are clients or servers. For this reason, the client cache continues to share the nlm_host_mutex (and reboot recovery logic) with the server cache. Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- fs/lockd/clntlock.c | 4 +-- fs/lockd/clntproc.c | 6 ++-- fs/lockd/host.c | 81 +++++++++++++++++++++++++++++++++++++++++++++++------ 3 files changed, 77 insertions(+), 14 deletions(-) (limited to 'fs') diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c index 25509eb28fd7..8d4ea8351e3d 100644 --- a/fs/lockd/clntlock.c +++ b/fs/lockd/clntlock.c @@ -79,7 +79,7 @@ EXPORT_SYMBOL_GPL(nlmclnt_init); */ void nlmclnt_done(struct nlm_host *host) { - nlm_release_host(host); + nlmclnt_release_host(host); lockd_down(); } EXPORT_SYMBOL_GPL(nlmclnt_done); @@ -273,7 +273,7 @@ restart: spin_unlock(&nlm_blocked_lock); /* Release host handle after use */ - nlm_release_host(host); + nlmclnt_release_host(host); lockd_down(); return 0; } diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c index fbc6617f76c4..adb45ec9038c 100644 --- a/fs/lockd/clntproc.c +++ b/fs/lockd/clntproc.c @@ -58,7 +58,7 @@ static void nlm_put_lockowner(struct nlm_lockowner *lockowner) return; list_del(&lockowner->list); spin_unlock(&lockowner->host->h_lock); - nlm_release_host(lockowner->host); + nlmclnt_release_host(lockowner->host); kfree(lockowner); } @@ -207,7 +207,7 @@ struct nlm_rqst *nlm_alloc_call(struct nlm_host *host) printk("nlm_alloc_call: failed, waiting for memory\n"); schedule_timeout_interruptible(5*HZ); } - nlm_release_host(host); + nlmclnt_release_host(host); return NULL; } @@ -215,7 +215,7 @@ void nlmclnt_release_call(struct nlm_rqst *call) { if (!atomic_dec_and_test(&call->a_count)) return; - nlm_release_host(call->a_host); + nlmclnt_release_host(call->a_host); nlmclnt_release_lockargs(call); kfree(call); } diff --git a/fs/lockd/host.c b/fs/lockd/host.c index e58e1426d161..c6942fb4bd0d 100644 --- a/fs/lockd/host.c +++ b/fs/lockd/host.c @@ -26,6 +26,7 @@ #define NLM_HOST_COLLECT (120 * HZ) static struct hlist_head nlm_hosts[NLM_HOST_NRHASH]; +static struct hlist_head nlm_client_hosts[NLM_HOST_NRHASH]; #define for_each_host(host, pos, chain, table) \ for ((chain) = (table); \ @@ -288,12 +289,76 @@ struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap, .hostname_len = strlen(hostname), .noresvport = noresvport, }; + struct hlist_head *chain; + struct hlist_node *pos; + struct nlm_host *host; + struct nsm_handle *nsm = NULL; dprintk("lockd: %s(host='%s', vers=%u, proto=%s)\n", __func__, (hostname ? hostname : ""), version, (protocol == IPPROTO_UDP ? "udp" : "tcp")); - return nlm_lookup_host(&ni); + mutex_lock(&nlm_host_mutex); + + chain = &nlm_client_hosts[nlm_hash_address(sap)]; + hlist_for_each_entry(host, pos, chain, h_hash) { + if (!rpc_cmp_addr(nlm_addr(host), sap)) + continue; + + /* Same address. Share an NSM handle if we already have one */ + if (nsm == NULL) + nsm = host->h_nsmhandle; + + if (host->h_proto != protocol) + continue; + if (host->h_version != version) + continue; + + nlm_get_host(host); + dprintk("lockd: %s found host %s (%s)\n", __func__, + host->h_name, host->h_addrbuf); + goto out; + } + + host = nlm_alloc_host(&ni, nsm); + if (unlikely(host == NULL)) + goto out; + + hlist_add_head(&host->h_hash, chain); + nrhosts++; + + dprintk("lockd: %s created host %s (%s)\n", __func__, + host->h_name, host->h_addrbuf); + +out: + mutex_unlock(&nlm_host_mutex); + return host; +} + +/** + * nlmclnt_release_host - release client nlm_host + * @host: nlm_host to release + * + */ +void nlmclnt_release_host(struct nlm_host *host) +{ + if (host == NULL) + return; + + dprintk("lockd: release client host %s\n", host->h_name); + + BUG_ON(atomic_read(&host->h_count) < 0); + BUG_ON(host->h_server); + + if (atomic_dec_and_test(&host->h_count)) { + BUG_ON(!list_empty(&host->h_lockowners)); + BUG_ON(!list_empty(&host->h_granted)); + BUG_ON(!list_empty(&host->h_reclaim)); + + mutex_lock(&nlm_host_mutex); + nlm_destroy_host_locked(host); + mutex_unlock(&nlm_host_mutex); + } } /** @@ -515,16 +580,14 @@ void nlm_host_rebooted(const struct nlm_reboot *info) * To avoid processing a host several times, we match the nsmstate. */ while ((host = next_host_state(nlm_hosts, nsm, info)) != NULL) { - if (host->h_server) { - /* We're server for this guy, just ditch - * all the locks he held. */ - nlmsvc_free_host_resources(host); - } else { - /* He's the server, initiate lock recovery. */ - nlmclnt_recovery(host); - } + nlmsvc_free_host_resources(host); nlm_release_host(host); } + while ((host = next_host_state(nlm_client_hosts, nsm, info)) != NULL) { + nlmclnt_recovery(host); + nlmclnt_release_host(host); + } + nsm_release(nsm); } -- cgit v1.2.2 From 67216b94d498f5880d8bba2a6b841880739dd524 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 14 Dec 2010 15:06:12 +0000 Subject: lockd: Clean up nlmsvc_lookup_host() Clean up. Change nlmsvc_lookup_host() to be purpose-built for server-side nlm_host management. This replaces the generic nlm_lookup_host() helper function, just like on the client side. The lookup logic is specialized for server host lookups. The server side cache also gets its own specialized equivalent of the nlm_release_host() function. Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- fs/lockd/host.c | 91 ++++++++++++++++++++++++++++++++++++++++++----------- fs/lockd/svc4proc.c | 16 +++++----- fs/lockd/svcproc.c | 18 +++++------ 3 files changed, 89 insertions(+), 36 deletions(-) (limited to 'fs') diff --git a/fs/lockd/host.c b/fs/lockd/host.c index c6942fb4bd0d..0250b0e4f5e9 100644 --- a/fs/lockd/host.c +++ b/fs/lockd/host.c @@ -383,6 +383,10 @@ struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp, const char *hostname, const size_t hostname_len) { + struct hlist_head *chain; + struct hlist_node *pos; + struct nlm_host *host = NULL; + struct nsm_handle *nsm = NULL; struct sockaddr_in sin = { .sin_family = AF_INET, }; @@ -404,6 +408,8 @@ struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp, (int)hostname_len, hostname, rqstp->rq_vers, (rqstp->rq_prot == IPPROTO_UDP ? "udp" : "tcp")); + mutex_lock(&nlm_host_mutex); + switch (ni.sap->sa_family) { case AF_INET: sin.sin_addr.s_addr = rqstp->rq_daddr.addr.s_addr; @@ -414,10 +420,73 @@ struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp, ni.src_sap = (struct sockaddr *)&sin6; break; default: - return NULL; + dprintk("lockd: %s failed; unrecognized address family\n", + __func__); + goto out; } - return nlm_lookup_host(&ni); + if (time_after_eq(jiffies, next_gc)) + nlm_gc_hosts(); + + chain = &nlm_hosts[nlm_hash_address(ni.sap)]; + hlist_for_each_entry(host, pos, chain, h_hash) { + if (!rpc_cmp_addr(nlm_addr(host), ni.sap)) + continue; + + /* Same address. Share an NSM handle if we already have one */ + if (nsm == NULL) + nsm = host->h_nsmhandle; + + if (host->h_proto != ni.protocol) + continue; + if (host->h_version != ni.version) + continue; + if (!rpc_cmp_addr(nlm_srcaddr(host), ni.src_sap)) + continue; + + /* Move to head of hash chain. */ + hlist_del(&host->h_hash); + hlist_add_head(&host->h_hash, chain); + + nlm_get_host(host); + dprintk("lockd: %s found host %s (%s)\n", + __func__, host->h_name, host->h_addrbuf); + goto out; + } + + host = nlm_alloc_host(&ni, nsm); + if (unlikely(host == NULL)) + goto out; + + memcpy(nlm_srcaddr(host), ni.src_sap, ni.src_len); + host->h_srcaddrlen = ni.src_len; + hlist_add_head(&host->h_hash, chain); + nrhosts++; + + dprintk("lockd: %s created host %s (%s)\n", + __func__, host->h_name, host->h_addrbuf); + +out: + mutex_unlock(&nlm_host_mutex); + return host; +} + +/** + * nlmsvc_release_host - release server nlm_host + * @host: nlm_host to release + * + * Host is destroyed later in nlm_gc_host(). + */ +void nlmsvc_release_host(struct nlm_host *host) +{ + if (host == NULL) + return; + + dprintk("lockd: release server host %s\n", host->h_name); + + BUG_ON(atomic_read(&host->h_count) < 0); + BUG_ON(!host->h_server); + atomic_dec(&host->h_count); } /* @@ -517,22 +586,6 @@ struct nlm_host * nlm_get_host(struct nlm_host *host) return host; } -/* - * Release NLM host after use - */ -void nlm_release_host(struct nlm_host *host) -{ - if (host != NULL) { - dprintk("lockd: release host %s\n", host->h_name); - BUG_ON(atomic_read(&host->h_count) < 0); - if (atomic_dec_and_test(&host->h_count)) { - BUG_ON(!list_empty(&host->h_lockowners)); - BUG_ON(!list_empty(&host->h_granted)); - BUG_ON(!list_empty(&host->h_reclaim)); - } - } -} - static struct nlm_host *next_host_state(struct hlist_head *cache, struct nsm_handle *nsm, const struct nlm_reboot *info) @@ -581,7 +634,7 @@ void nlm_host_rebooted(const struct nlm_reboot *info) */ while ((host = next_host_state(nlm_hosts, nsm, info)) != NULL) { nlmsvc_free_host_resources(host); - nlm_release_host(host); + nlmsvc_release_host(host); } while ((host = next_host_state(nlm_client_hosts, nsm, info)) != NULL) { nlmclnt_recovery(host); diff --git a/fs/lockd/svc4proc.c b/fs/lockd/svc4proc.c index c187422026d8..9a41fdc19511 100644 --- a/fs/lockd/svc4proc.c +++ b/fs/lockd/svc4proc.c @@ -51,7 +51,7 @@ nlm4svc_retrieve_args(struct svc_rqst *rqstp, struct nlm_args *argp, return 0; no_locks: - nlm_release_host(host); + nlmsvc_release_host(host); if (error) return error; return nlm_lck_denied_nolocks; @@ -92,7 +92,7 @@ nlm4svc_proc_test(struct svc_rqst *rqstp, struct nlm_args *argp, else dprintk("lockd: TEST4 status %d\n", ntohl(resp->status)); - nlm_release_host(host); + nlmsvc_release_host(host); nlm_release_file(file); return rc; } @@ -134,7 +134,7 @@ nlm4svc_proc_lock(struct svc_rqst *rqstp, struct nlm_args *argp, else dprintk("lockd: LOCK status %d\n", ntohl(resp->status)); - nlm_release_host(host); + nlmsvc_release_host(host); nlm_release_file(file); return rc; } @@ -164,7 +164,7 @@ nlm4svc_proc_cancel(struct svc_rqst *rqstp, struct nlm_args *argp, resp->status = nlmsvc_cancel_blocked(file, &argp->lock); dprintk("lockd: CANCEL status %d\n", ntohl(resp->status)); - nlm_release_host(host); + nlmsvc_release_host(host); nlm_release_file(file); return rpc_success; } @@ -197,7 +197,7 @@ nlm4svc_proc_unlock(struct svc_rqst *rqstp, struct nlm_args *argp, resp->status = nlmsvc_unlock(file, &argp->lock); dprintk("lockd: UNLOCK status %d\n", ntohl(resp->status)); - nlm_release_host(host); + nlmsvc_release_host(host); nlm_release_file(file); return rpc_success; } @@ -334,7 +334,7 @@ nlm4svc_proc_share(struct svc_rqst *rqstp, struct nlm_args *argp, resp->status = nlmsvc_share_file(host, file, argp); dprintk("lockd: SHARE status %d\n", ntohl(resp->status)); - nlm_release_host(host); + nlmsvc_release_host(host); nlm_release_file(file); return rpc_success; } @@ -367,7 +367,7 @@ nlm4svc_proc_unshare(struct svc_rqst *rqstp, struct nlm_args *argp, resp->status = nlmsvc_unshare_file(host, file, argp); dprintk("lockd: UNSHARE status %d\n", ntohl(resp->status)); - nlm_release_host(host); + nlmsvc_release_host(host); nlm_release_file(file); return rpc_success; } @@ -399,7 +399,7 @@ nlm4svc_proc_free_all(struct svc_rqst *rqstp, struct nlm_args *argp, return rpc_success; nlmsvc_free_host_resources(host); - nlm_release_host(host); + nlmsvc_release_host(host); return rpc_success; } diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c index 0df65ec29e43..d27aab11f324 100644 --- a/fs/lockd/svcproc.c +++ b/fs/lockd/svcproc.c @@ -80,7 +80,7 @@ nlmsvc_retrieve_args(struct svc_rqst *rqstp, struct nlm_args *argp, return 0; no_locks: - nlm_release_host(host); + nlmsvc_release_host(host); if (error) return error; return nlm_lck_denied_nolocks; @@ -122,7 +122,7 @@ nlmsvc_proc_test(struct svc_rqst *rqstp, struct nlm_args *argp, dprintk("lockd: TEST status %d vers %d\n", ntohl(resp->status), rqstp->rq_vers); - nlm_release_host(host); + nlmsvc_release_host(host); nlm_release_file(file); return rc; } @@ -164,7 +164,7 @@ nlmsvc_proc_lock(struct svc_rqst *rqstp, struct nlm_args *argp, else dprintk("lockd: LOCK status %d\n", ntohl(resp->status)); - nlm_release_host(host); + nlmsvc_release_host(host); nlm_release_file(file); return rc; } @@ -194,7 +194,7 @@ nlmsvc_proc_cancel(struct svc_rqst *rqstp, struct nlm_args *argp, resp->status = cast_status(nlmsvc_cancel_blocked(file, &argp->lock)); dprintk("lockd: CANCEL status %d\n", ntohl(resp->status)); - nlm_release_host(host); + nlmsvc_release_host(host); nlm_release_file(file); return rpc_success; } @@ -227,7 +227,7 @@ nlmsvc_proc_unlock(struct svc_rqst *rqstp, struct nlm_args *argp, resp->status = cast_status(nlmsvc_unlock(file, &argp->lock)); dprintk("lockd: UNLOCK status %d\n", ntohl(resp->status)); - nlm_release_host(host); + nlmsvc_release_host(host); nlm_release_file(file); return rpc_success; } @@ -261,7 +261,7 @@ void nlmsvc_release_call(struct nlm_rqst *call) { if (!atomic_dec_and_test(&call->a_count)) return; - nlm_release_host(call->a_host); + nlmsvc_release_host(call->a_host); kfree(call); } @@ -374,7 +374,7 @@ nlmsvc_proc_share(struct svc_rqst *rqstp, struct nlm_args *argp, resp->status = cast_status(nlmsvc_share_file(host, file, argp)); dprintk("lockd: SHARE status %d\n", ntohl(resp->status)); - nlm_release_host(host); + nlmsvc_release_host(host); nlm_release_file(file); return rpc_success; } @@ -407,7 +407,7 @@ nlmsvc_proc_unshare(struct svc_rqst *rqstp, struct nlm_args *argp, resp->status = cast_status(nlmsvc_unshare_file(host, file, argp)); dprintk("lockd: UNSHARE status %d\n", ntohl(resp->status)); - nlm_release_host(host); + nlmsvc_release_host(host); nlm_release_file(file); return rpc_success; } @@ -439,7 +439,7 @@ nlmsvc_proc_free_all(struct svc_rqst *rqstp, struct nlm_args *argp, return rpc_success; nlmsvc_free_host_resources(host); - nlm_release_host(host); + nlmsvc_release_host(host); return rpc_success; } -- cgit v1.2.2 From d2df0484bb38f2e0d9754b00597d4a6d1cf666d0 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 14 Dec 2010 15:06:22 +0000 Subject: lockd: Rename nlm_hosts Clean up. nlm_hosts now contains only server-side entries. Rename it to match convention of client side cache. Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- fs/lockd/host.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/lockd/host.c b/fs/lockd/host.c index 0250b0e4f5e9..87fbde1d1a1f 100644 --- a/fs/lockd/host.c +++ b/fs/lockd/host.c @@ -25,7 +25,7 @@ #define NLM_HOST_EXPIRE (300 * HZ) #define NLM_HOST_COLLECT (120 * HZ) -static struct hlist_head nlm_hosts[NLM_HOST_NRHASH]; +static struct hlist_head nlm_server_hosts[NLM_HOST_NRHASH]; static struct hlist_head nlm_client_hosts[NLM_HOST_NRHASH]; #define for_each_host(host, pos, chain, table) \ @@ -184,7 +184,7 @@ static struct nlm_host *nlm_lookup_host(struct nlm_lookup_host_info *ni) * different NLM rpc_clients into one single nlm_host object. * This would allow us to have one nlm_host per address. */ - chain = &nlm_hosts[nlm_hash_address(ni->sap)]; + chain = &nlm_server_hosts[nlm_hash_address(ni->sap)]; hlist_for_each_entry(host, pos, chain, h_hash) { if (!rpc_cmp_addr(nlm_addr(host), ni->sap)) continue; @@ -428,7 +428,7 @@ struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp, if (time_after_eq(jiffies, next_gc)) nlm_gc_hosts(); - chain = &nlm_hosts[nlm_hash_address(ni.sap)]; + chain = &nlm_server_hosts[nlm_hash_address(ni.sap)]; hlist_for_each_entry(host, pos, chain, h_hash) { if (!rpc_cmp_addr(nlm_addr(host), ni.sap)) continue; @@ -632,7 +632,7 @@ void nlm_host_rebooted(const struct nlm_reboot *info) * lock for this. * To avoid processing a host several times, we match the nsmstate. */ - while ((host = next_host_state(nlm_hosts, nsm, info)) != NULL) { + while ((host = next_host_state(nlm_server_hosts, nsm, info)) != NULL) { nlmsvc_free_host_resources(host); nlmsvc_release_host(host); } @@ -660,7 +660,7 @@ nlm_shutdown_hosts(void) /* First, make all hosts eligible for gc */ dprintk("lockd: nuking all hosts...\n"); - for_each_host(host, pos, chain, nlm_hosts) { + for_each_host(host, pos, chain, nlm_server_hosts) { host->h_expires = jiffies - 1; if (host->h_rpcclnt) { rpc_shutdown_client(host->h_rpcclnt); @@ -676,7 +676,7 @@ nlm_shutdown_hosts(void) if (nrhosts) { printk(KERN_WARNING "lockd: couldn't shutdown host module!\n"); dprintk("lockd: %d hosts left:\n", nrhosts); - for_each_host(host, pos, chain, nlm_hosts) { + for_each_host(host, pos, chain, nlm_server_hosts) { dprintk(" %s (cnt %d use %d exp %ld)\n", host->h_name, atomic_read(&host->h_count), host->h_inuse, host->h_expires); @@ -697,13 +697,13 @@ nlm_gc_hosts(void) struct nlm_host *host; dprintk("lockd: host garbage collection\n"); - for_each_host(host, pos, chain, nlm_hosts) + for_each_host(host, pos, chain, nlm_server_hosts) host->h_inuse = 0; /* Mark all hosts that hold locks, blocks or shares */ nlmsvc_mark_resources(); - for_each_host_safe(host, pos, next, chain, nlm_hosts) { + for_each_host_safe(host, pos, next, chain, nlm_server_hosts) { if (atomic_read(&host->h_count) || host->h_inuse || time_before(jiffies, host->h_expires)) { dprintk("nlm_gc_hosts skipping %s " -- cgit v1.2.2 From fcc072c783491ca465e4d1e74da7dbb48dbf7a31 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 14 Dec 2010 15:06:32 +0000 Subject: lockd: Make nrhosts an unsigned long Clean up. Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- fs/lockd/host.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/lockd/host.c b/fs/lockd/host.c index 87fbde1d1a1f..77ec21a808db 100644 --- a/fs/lockd/host.c +++ b/fs/lockd/host.c @@ -40,7 +40,7 @@ static struct hlist_head nlm_client_hosts[NLM_HOST_NRHASH]; (chain), h_hash) static unsigned long next_gc; -static int nrhosts; +static unsigned long nrhosts; static DEFINE_MUTEX(nlm_host_mutex); static void nlm_gc_hosts(void); @@ -673,9 +673,9 @@ nlm_shutdown_hosts(void) mutex_unlock(&nlm_host_mutex); /* complain if any hosts are left */ - if (nrhosts) { + if (nrhosts != 0) { printk(KERN_WARNING "lockd: couldn't shutdown host module!\n"); - dprintk("lockd: %d hosts left:\n", nrhosts); + dprintk("lockd: %lu hosts left:\n", nrhosts); for_each_host(host, pos, chain, nlm_server_hosts) { dprintk(" %s (cnt %d use %d exp %ld)\n", host->h_name, atomic_read(&host->h_count), -- cgit v1.2.2 From 2025889828bb14b56d9aa4c1a785bd9847ccdc4b Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 14 Dec 2010 15:06:41 +0000 Subject: lockd: Remove nlm_lookup_host() Clean up. Remove the now unused helper nlm_lookup_host(). Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- fs/lockd/host.c | 69 --------------------------------------------------------- 1 file changed, 69 deletions(-) (limited to 'fs') diff --git a/fs/lockd/host.c b/fs/lockd/host.c index 77ec21a808db..6d4aa8b3d610 100644 --- a/fs/lockd/host.c +++ b/fs/lockd/host.c @@ -162,75 +162,6 @@ out: return host; } -/* - * Common host lookup routine for server & client - */ -static struct nlm_host *nlm_lookup_host(struct nlm_lookup_host_info *ni) -{ - struct hlist_head *chain; - struct hlist_node *pos; - struct nlm_host *host; - struct nsm_handle *nsm = NULL; - - mutex_lock(&nlm_host_mutex); - - if (time_after_eq(jiffies, next_gc)) - nlm_gc_hosts(); - - /* We may keep several nlm_host objects for a peer, because each - * nlm_host is identified by - * (address, protocol, version, server/client) - * We could probably simplify this a little by putting all those - * different NLM rpc_clients into one single nlm_host object. - * This would allow us to have one nlm_host per address. - */ - chain = &nlm_server_hosts[nlm_hash_address(ni->sap)]; - hlist_for_each_entry(host, pos, chain, h_hash) { - if (!rpc_cmp_addr(nlm_addr(host), ni->sap)) - continue; - - /* See if we have an NSM handle for this client */ - if (!nsm) - nsm = host->h_nsmhandle; - - if (host->h_proto != ni->protocol) - continue; - if (host->h_version != ni->version) - continue; - if (host->h_server != ni->server) - continue; - if (ni->server && ni->src_len != 0 && - !rpc_cmp_addr(nlm_srcaddr(host), ni->src_sap)) - continue; - - /* Move to head of hash chain. */ - hlist_del(&host->h_hash); - hlist_add_head(&host->h_hash, chain); - - nlm_get_host(host); - dprintk("lockd: nlm_lookup_host found host %s (%s)\n", - host->h_name, host->h_addrbuf); - goto out; - } - - host = nlm_alloc_host(ni, nsm); - if (unlikely(host == NULL)) - goto out; - - memcpy(nlm_srcaddr(host), ni->src_sap, ni->src_len); - host->h_srcaddrlen = ni->src_len; - hlist_add_head(&host->h_hash, chain); - - nrhosts++; - - dprintk("lockd: nlm_lookup_host created host %s\n", - host->h_name); - -out: - mutex_unlock(&nlm_host_mutex); - return host; -} - /* * Destroy an nlm_host and free associated resources * -- cgit v1.2.2 From 79691836603541e81a3793970826ac4a75429572 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Tue, 14 Dec 2010 15:06:52 +0000 Subject: lockd: Remove src_sap and src_len from nlm_lookup_host_info struct Clean up. The contents of the src_sap field is not used in nlm_alloc_host(). Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- fs/lockd/host.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/lockd/host.c b/fs/lockd/host.c index 6d4aa8b3d610..c106d6a93e5d 100644 --- a/fs/lockd/host.c +++ b/fs/lockd/host.c @@ -53,8 +53,6 @@ struct nlm_lookup_host_info { const u32 version; /* NLM version to search for */ const char *hostname; /* remote's hostname */ const size_t hostname_len; /* it's length */ - const struct sockaddr *src_sap; /* our address (optional) */ - const size_t src_len; /* it's length */ const int noresvport; /* use non-priv port */ }; @@ -324,6 +322,8 @@ struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp, struct sockaddr_in6 sin6 = { .sin6_family = AF_INET6, }; + struct sockaddr *src_sap; + size_t src_len = rqstp->rq_addrlen; struct nlm_lookup_host_info ni = { .server = 1, .sap = svc_addr(rqstp), @@ -332,7 +332,6 @@ struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp, .version = rqstp->rq_vers, .hostname = hostname, .hostname_len = hostname_len, - .src_len = rqstp->rq_addrlen, }; dprintk("lockd: %s(host='%*s', vers=%u, proto=%s)\n", __func__, @@ -344,11 +343,11 @@ struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp, switch (ni.sap->sa_family) { case AF_INET: sin.sin_addr.s_addr = rqstp->rq_daddr.addr.s_addr; - ni.src_sap = (struct sockaddr *)&sin; + src_sap = (struct sockaddr *)&sin; break; case AF_INET6: ipv6_addr_copy(&sin6.sin6_addr, &rqstp->rq_daddr.addr6); - ni.src_sap = (struct sockaddr *)&sin6; + src_sap = (struct sockaddr *)&sin6; break; default: dprintk("lockd: %s failed; unrecognized address family\n", @@ -372,7 +371,7 @@ struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp, continue; if (host->h_version != ni.version) continue; - if (!rpc_cmp_addr(nlm_srcaddr(host), ni.src_sap)) + if (!rpc_cmp_addr(nlm_srcaddr(host), src_sap)) continue; /* Move to head of hash chain. */ @@ -389,8 +388,8 @@ struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp, if (unlikely(host == NULL)) goto out; - memcpy(nlm_srcaddr(host), ni.src_sap, ni.src_len); - host->h_srcaddrlen = ni.src_len; + memcpy(nlm_srcaddr(host), src_sap, src_len); + host->h_srcaddrlen = src_len; hlist_add_head(&host->h_hash, chain); nrhosts++; -- cgit v1.2.2 From 225db7d35c33f076115a583abec238a696f4467e Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Thu, 16 Dec 2010 16:38:26 -0500 Subject: ext4: Fix up comments in inode.c This fixes up some broken argument descriptions that Namhyung Kim had originally submitted for ext3. This fixes the comments that were still applicable in ext4. Signed-off-by: "Theodore Ts'o" --- fs/ext4/inode.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index e659597b690b..db3cc913ee8f 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -552,7 +552,7 @@ static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block, } /** - * ext4_blks_to_allocate: Look up the block map and count the number + * ext4_blks_to_allocate - Look up the block map and count the number * of direct blocks need to be allocated for the given branch. * * @branch: chain of indirect blocks @@ -591,13 +591,19 @@ static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks, /** * ext4_alloc_blocks: multiple allocate blocks needed for a branch + * @handle: handle for this transaction + * @inode: inode which needs allocated blocks + * @iblock: the logical block to start allocated at + * @goal: preferred physical block of allocation * @indirect_blks: the number of blocks need to allocate for indirect * blocks - * + * @blks: number of desired blocks * @new_blocks: on return it will store the new block numbers for * the indirect blocks(if needed) and the first direct block, - * @blks: on return it will store the total number of allocated - * direct blocks + * @err: on return it will store the error code + * + * This function will return the number of blocks allocated as + * requested by the passed-in parameters. */ static int ext4_alloc_blocks(handle_t *handle, struct inode *inode, ext4_lblk_t iblock, ext4_fsblk_t goal, @@ -711,9 +717,11 @@ failed_out: /** * ext4_alloc_branch - allocate and set up a chain of blocks. + * @handle: handle for this transaction * @inode: owner * @indirect_blks: number of allocated indirect blocks * @blks: number of allocated direct blocks + * @goal: preferred place for allocation * @offsets: offsets (in the blocks) to store the pointers to next. * @branch: place to store the chain in. * @@ -826,6 +834,7 @@ failed: /** * ext4_splice_branch - splice the allocated branch onto inode. + * @handle: handle for this transaction * @inode: owner * @block: (logical) number of block we are adding * @chain: chain of indirect blocks (with a missing link - see -- cgit v1.2.2 From a8adbe378b56acd5945df70753c7e8f6fe223304 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Miros=C5=82aw?= Date: Fri, 17 Dec 2010 08:56:44 +0100 Subject: fs/splice: Pull buf->ops->confirm() from splice_from_pipe actors MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch pulls calls to buf->ops->confirm() from all actors passed (also indirectly) to splice_from_pipe_feed(). Is avoiding the call to buf->ops->confirm() while splice()ing to /dev/null is an intentional optimization? No other user does that and this will remove this special case. Against current linux.git 6313e3c21743cc88bb5bd8aa72948ee1e83937b6. Signed-off-by: Michał Mirosław Signed-off-by: Jens Axboe --- fs/nfsd/vfs.c | 4 ---- fs/splice.c | 43 ++++++++++++++----------------------------- 2 files changed, 14 insertions(+), 33 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 184938fcff04..c6e08661adee 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -847,10 +847,6 @@ nfsd_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf, size_t size; int ret; - ret = buf->ops->confirm(pipe, buf); - if (unlikely(ret)) - return ret; - size = sd->len; if (rqstp->rq_res.page_len == 0) { diff --git a/fs/splice.c b/fs/splice.c index 8f1dfaecc8f0..d2026382ac3e 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -682,19 +682,14 @@ static int pipe_to_sendpage(struct pipe_inode_info *pipe, { struct file *file = sd->u.file; loff_t pos = sd->pos; - int ret, more; - - ret = buf->ops->confirm(pipe, buf); - if (!ret) { - more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len; - if (file->f_op && file->f_op->sendpage) - ret = file->f_op->sendpage(file, buf->page, buf->offset, - sd->len, &pos, more); - else - ret = -EINVAL; - } + int more; - return ret; + if (!likely(file->f_op && file->f_op->sendpage)) + return -EINVAL; + + more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len; + return file->f_op->sendpage(file, buf->page, buf->offset, + sd->len, &pos, more); } /* @@ -727,13 +722,6 @@ int pipe_to_file(struct pipe_inode_info *pipe, struct pipe_buffer *buf, void *fsdata; int ret; - /* - * make sure the data in this buffer is uptodate - */ - ret = buf->ops->confirm(pipe, buf); - if (unlikely(ret)) - return ret; - offset = sd->pos & ~PAGE_CACHE_MASK; this_len = sd->len; @@ -805,12 +793,17 @@ int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_desc *sd, if (sd->len > sd->total_len) sd->len = sd->total_len; - ret = actor(pipe, buf, sd); - if (ret <= 0) { + ret = buf->ops->confirm(pipe, buf); + if (unlikely(ret)) { if (ret == -ENODATA) ret = 0; return ret; } + + ret = actor(pipe, buf, sd); + if (ret <= 0) + return ret; + buf->offset += ret; buf->len -= ret; @@ -1044,10 +1037,6 @@ static int write_pipe_buf(struct pipe_inode_info *pipe, struct pipe_buffer *buf, int ret; void *data; - ret = buf->ops->confirm(pipe, buf); - if (ret) - return ret; - data = buf->ops->map(pipe, buf, 0); ret = kernel_write(sd->u.file, data + buf->offset, sd->len, sd->pos); buf->ops->unmap(pipe, buf, data); @@ -1507,10 +1496,6 @@ static int pipe_to_user(struct pipe_inode_info *pipe, struct pipe_buffer *buf, char *src; int ret; - ret = buf->ops->confirm(pipe, buf); - if (unlikely(ret)) - return ret; - /* * See if we can use the atomic maps, by prefaulting in the * pages and doing an atomic copy -- cgit v1.2.2 From e61eb2e93fe86931d46831752a82dab25a5335ca Mon Sep 17 00:00:00 2001 From: Yang Zhang Date: Fri, 17 Dec 2010 09:00:18 +0100 Subject: fs/block: type signature of major_to_index(int) to major_to_index(unsigned) The major/minor device numbers are always defined and used as `unsigned'. Signed-off-by: Yang Zhang Signed-off-by: Jens Axboe --- fs/char_dev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/char_dev.c b/fs/char_dev.c index e5b9df993b93..143f0207c7eb 100644 --- a/fs/char_dev.c +++ b/fs/char_dev.c @@ -59,7 +59,7 @@ static struct char_device_struct { } *chrdevs[CHRDEV_MAJOR_HASH_SIZE]; /* index in the above */ -static inline int major_to_index(int major) +static inline int major_to_index(unsigned major) { return major % CHRDEV_MAJOR_HASH_SIZE; } -- cgit v1.2.2 From a8901d34872dafcafa23efa0865dcecfd4fddf8c Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Fri, 17 Dec 2010 10:40:47 -0500 Subject: ext4: Use pr_warning_ratelimited() instead of printk_ratelimit() printk_ratelimit() is deprecated since it is a global instead of a per-printk ratelimit. Signed-off-by: "Theodore Ts'o" --- fs/ext4/inode.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index db3cc913ee8f..c0fe426d444a 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -40,6 +40,7 @@ #include #include #include +#include #include "ext4_jbd2.h" #include "xattr.h" @@ -3729,8 +3730,7 @@ static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode) retry: io_end = ext4_init_io_end(inode, GFP_ATOMIC); if (!io_end) { - if (printk_ratelimit()) - printk(KERN_WARNING "%s: allocation fail\n", __func__); + pr_warning_ratelimited("%s: allocation fail\n", __func__); schedule(); goto retry; } -- cgit v1.2.2 From 670be5a78ac7c80f0d6009d648c84c65a03f373a Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Fri, 17 Dec 2010 10:44:16 -0500 Subject: jbd2: Use pr_notice_ratelimited() in journal_alloc_journal_head() We had an open-coded version of printk_ratelimited(); use the provided abstraction to make the code cleaner and easier to understand. Based on a similar patch for fs/jbd from Namhyung Kim Signed-off-by: "Theodore Ts'o" --- fs/jbd2/journal.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index f837ba953529..06dfd778cae5 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -43,6 +43,7 @@ #include #include #include +#include #define CREATE_TRACE_POINTS #include @@ -1982,7 +1983,6 @@ static void jbd2_journal_destroy_jbd2_journal_head_cache(void) static struct journal_head *journal_alloc_journal_head(void) { struct journal_head *ret; - static unsigned long last_warning; #ifdef CONFIG_JBD2_DEBUG atomic_inc(&nr_journal_heads); @@ -1990,11 +1990,7 @@ static struct journal_head *journal_alloc_journal_head(void) ret = kmem_cache_alloc(jbd2_journal_head_cache, GFP_NOFS); if (!ret) { jbd_debug(1, "out of memory for journal_head\n"); - if (time_after(jiffies, last_warning + 5*HZ)) { - printk(KERN_NOTICE "ENOMEM in %s, retrying.\n", - __func__); - last_warning = jiffies; - } + pr_notice_ratelimited("ENOMEM in %s, retrying.\n", __func__); while (!ret) { yield(); ret = kmem_cache_alloc(jbd2_journal_head_cache, GFP_NOFS); -- cgit v1.2.2 From 6e5f15c93dc745d46c2bb9e4597b44463203844b Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Wed, 24 Nov 2010 17:17:34 -0500 Subject: nfsd4: replace unintuitive match_clientid_establishment Reviewed-by: Benny Halevy Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4state.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 4d542cfd6960..0c61cc1eadca 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -1135,19 +1135,13 @@ find_unconfirmed_client(clientid_t *clid) } /* - * Return 1 iff clp's clientid establishment method matches the use_exchange_id - * parameter. Matching is based on the fact the at least one of the - * EXCHGID4_FLAG_USE_{NON_PNFS,PNFS_MDS,PNFS_DS} flags must be set for v4.1 - * * FIXME: we need to unify the clientid namespaces for nfsv4.x * and correctly deal with client upgrade/downgrade in EXCHANGE_ID * and SET_CLIENTID{,_CONFIRM} */ -static inline int -match_clientid_establishment(struct nfs4_client *clp, bool use_exchange_id) +static bool clp_used_exchangeid(struct nfs4_client *clp) { - bool has_exchange_flags = (clp->cl_exchange_flags != 0); - return use_exchange_id == has_exchange_flags; + return clp->cl_exchange_flags != 0; } static struct nfs4_client * @@ -1158,7 +1152,7 @@ find_confirmed_client_by_str(const char *dname, unsigned int hashval, list_for_each_entry(clp, &conf_str_hashtbl[hashval], cl_strhash) { if (same_name(clp->cl_recdir, dname) && - match_clientid_establishment(clp, use_exchange_id)) + clp_used_exchangeid(clp) == use_exchange_id) return clp; } return NULL; @@ -1172,7 +1166,7 @@ find_unconfirmed_client_by_str(const char *dname, unsigned int hashval, list_for_each_entry(clp, &unconf_str_hashtbl[hashval], cl_strhash) { if (same_name(clp->cl_recdir, dname) && - match_clientid_establishment(clp, use_exchange_id)) + clp_used_exchangeid(clp) == use_exchange_id) return clp; } return NULL; -- cgit v1.2.2 From e203d506bd221bfa5b3acbb7336ae7b7646636a4 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Wed, 24 Nov 2010 17:30:54 -0500 Subject: nfsd4: fix mixed 4.0/4.1 handling, 4.1 reboot Instead of failing to find client entries which don't match the minorversion, we should be finding them, then either erroring out or expiring them as appropriate. This also fixes a problem which would cause the 4.1 server to fail to recognize clients after a second reboot. Reported-by: Casey Bodley Reviewed-by: Benny Halevy Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4recover.c | 1 - fs/nfsd/nfs4state.c | 37 +++++++++++++++++-------------------- 2 files changed, 17 insertions(+), 21 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c index 7e26caab2a26..ffb59ef6f82f 100644 --- a/fs/nfsd/nfs4recover.c +++ b/fs/nfsd/nfs4recover.c @@ -302,7 +302,6 @@ purge_old(struct dentry *parent, struct dentry *child) { int status; - /* note: we currently use this path only for minorversion 0 */ if (nfs4_has_reclaimed_state(child->d_name.name, false)) return 0; diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 0c61cc1eadca..73adcfb2dc17 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -1134,39 +1134,30 @@ find_unconfirmed_client(clientid_t *clid) return NULL; } -/* - * FIXME: we need to unify the clientid namespaces for nfsv4.x - * and correctly deal with client upgrade/downgrade in EXCHANGE_ID - * and SET_CLIENTID{,_CONFIRM} - */ static bool clp_used_exchangeid(struct nfs4_client *clp) { return clp->cl_exchange_flags != 0; -} +} static struct nfs4_client * -find_confirmed_client_by_str(const char *dname, unsigned int hashval, - bool use_exchange_id) +find_confirmed_client_by_str(const char *dname, unsigned int hashval) { struct nfs4_client *clp; list_for_each_entry(clp, &conf_str_hashtbl[hashval], cl_strhash) { - if (same_name(clp->cl_recdir, dname) && - clp_used_exchangeid(clp) == use_exchange_id) + if (same_name(clp->cl_recdir, dname)) return clp; } return NULL; } static struct nfs4_client * -find_unconfirmed_client_by_str(const char *dname, unsigned int hashval, - bool use_exchange_id) +find_unconfirmed_client_by_str(const char *dname, unsigned int hashval) { struct nfs4_client *clp; list_for_each_entry(clp, &unconf_str_hashtbl[hashval], cl_strhash) { - if (same_name(clp->cl_recdir, dname) && - clp_used_exchangeid(clp) == use_exchange_id) + if (same_name(clp->cl_recdir, dname)) return clp; } return NULL; @@ -1357,8 +1348,12 @@ nfsd4_exchange_id(struct svc_rqst *rqstp, nfs4_lock_state(); status = nfs_ok; - conf = find_confirmed_client_by_str(dname, strhashval, true); + conf = find_confirmed_client_by_str(dname, strhashval); if (conf) { + if (!clp_used_exchangeid(conf)) { + status = nfserr_clid_inuse; /* XXX: ? */ + goto out; + } if (!same_verf(&verf, &conf->cl_verifier)) { /* 18.35.4 case 8 */ if (exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A) { @@ -1399,7 +1394,7 @@ nfsd4_exchange_id(struct svc_rqst *rqstp, goto out; } - unconf = find_unconfirmed_client_by_str(dname, strhashval, true); + unconf = find_unconfirmed_client_by_str(dname, strhashval); if (unconf) { /* * Possible retry or client restart. Per 18.35.4 case 4, @@ -1799,10 +1794,12 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, strhashval = clientstr_hashval(dname); nfs4_lock_state(); - conf = find_confirmed_client_by_str(dname, strhashval, false); + conf = find_confirmed_client_by_str(dname, strhashval); if (conf) { /* RFC 3530 14.2.33 CASE 0: */ status = nfserr_clid_inuse; + if (clp_used_exchangeid(conf)) + goto out; if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) { char addr_str[INET6_ADDRSTRLEN]; rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str, @@ -1817,7 +1814,7 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, * has a description of SETCLIENTID request processing consisting * of 5 bullet points, labeled as CASE0 - CASE4 below. */ - unconf = find_unconfirmed_client_by_str(dname, strhashval, false); + unconf = find_unconfirmed_client_by_str(dname, strhashval); status = nfserr_resource; if (!conf) { /* @@ -1962,7 +1959,7 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp, unsigned int hash = clientstr_hashval(unconf->cl_recdir); conf = find_confirmed_client_by_str(unconf->cl_recdir, - hash, false); + hash); if (conf) { nfsd4_remove_clid_dir(conf); expire_client(conf); @@ -4106,7 +4103,7 @@ nfs4_has_reclaimed_state(const char *name, bool use_exchange_id) unsigned int strhashval = clientstr_hashval(name); struct nfs4_client *clp; - clp = find_confirmed_client_by_str(name, strhashval, use_exchange_id); + clp = find_confirmed_client_by_str(name, strhashval); return clp ? 1 : 0; } -- cgit v1.2.2 From 18b631f83810e95eeb2e1839889b27142bd8d6d8 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Mon, 29 Nov 2010 15:28:10 -0500 Subject: nfsd: fix offset printk's in nfsd3 read/write Thanks to dysbr01@ca.com for noticing that the debugging printk in the v3 write procedure can print >2GB offsets as negative numbers: https://bugzilla.kernel.org/show_bug.cgi?id=23342 Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs3proc.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c index 5b7e3021e06b..2247fc91d5e9 100644 --- a/fs/nfsd/nfs3proc.c +++ b/fs/nfsd/nfs3proc.c @@ -151,10 +151,10 @@ nfsd3_proc_read(struct svc_rqst *rqstp, struct nfsd3_readargs *argp, __be32 nfserr; u32 max_blocksize = svc_max_payload(rqstp); - dprintk("nfsd: READ(3) %s %lu bytes at %lu\n", + dprintk("nfsd: READ(3) %s %lu bytes at %Lu\n", SVCFH_fmt(&argp->fh), (unsigned long) argp->count, - (unsigned long) argp->offset); + (unsigned long long) argp->offset); /* Obtain buffer pointer for payload. * 1 (status) + 22 (post_op_attr) + 1 (count) + 1 (eof) @@ -191,10 +191,10 @@ nfsd3_proc_write(struct svc_rqst *rqstp, struct nfsd3_writeargs *argp, __be32 nfserr; unsigned long cnt = argp->len; - dprintk("nfsd: WRITE(3) %s %d bytes at %ld%s\n", + dprintk("nfsd: WRITE(3) %s %d bytes at %Lu%s\n", SVCFH_fmt(&argp->fh), argp->len, - (unsigned long) argp->offset, + (unsigned long long) argp->offset, argp->stable? " stable" : ""); fh_copy(&resp->fh, &argp->fh); -- cgit v1.2.2 From 5b6a599f0da3722dea9ecc01d97f54061662ce49 Mon Sep 17 00:00:00 2001 From: "bookjovi@gmail.com" Date: Sat, 11 Dec 2010 00:21:17 -0500 Subject: nfs: add missed CONFIG_NFSD_DEPRECATED these pieces of code only make sense when CONFIG_NFSD_DEPRECATED enabled Signed-off-by: Jovi Zhang fs/nfsd/nfsctl.c | 2 ++ 1 files changed, 2 insertions(+), 0 deletions(-) Signed-off-by: J. Bruce Fields --- fs/nfsd/nfsctl.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs') diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index 4514ebbee4d6..6840ec3ceecf 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c @@ -127,6 +127,7 @@ static ssize_t nfsctl_transaction_write(struct file *file, const char __user *bu static ssize_t nfsctl_transaction_read(struct file *file, char __user *buf, size_t size, loff_t *pos) { +#ifdef CONFIG_NFSD_DEPRECATED static int warned; if (file->f_dentry->d_name.name[0] == '.' && !warned) { printk(KERN_INFO @@ -135,6 +136,7 @@ static ssize_t nfsctl_transaction_read(struct file *file, char __user *buf, size current->comm, file->f_dentry->d_name.name); warned = 1; } +#endif if (! file->private_data) { /* An attempt to read a transaction file without writing * causes a 0-byte write so that the file can return -- cgit v1.2.2 From 56560b9ae0c2d07bb5bbcec16f799d7bf756d3de Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Thu, 16 Dec 2010 09:57:15 -0500 Subject: nfsd4: 4.1 SECINFO should consume filehandle See the referenced spec language; an attempt by a 4.1 client to use the current filehandle after a secinfo call should result in a NOFILEHANDLE error. Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4proc.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'fs') diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index 0cdfd022bb7b..bad4bf8e4bbc 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -769,6 +769,9 @@ nfsd4_secinfo(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, } else secinfo->si_exp = exp; dput(dentry); + if (cstate->minorversion) + /* See rfc 5661 section 2.6.3.1.1.8 */ + fh_put(&cstate->current_fh); return err; } -- cgit v1.2.2 From 0ff7ab46719a9c1e264b8d8e85416d59737ff13c Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Thu, 16 Dec 2010 10:06:27 -0500 Subject: nfsd4: move guts of nfsd4_lookupp into helper We'll reuse this code in secinfo_no_name. Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4proc.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index bad4bf8e4bbc..095431a35722 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -604,9 +604,7 @@ nfsd4_link(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, return status; } -static __be32 -nfsd4_lookupp(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, - void *arg) +static __be32 nfsd4_do_lookupp(struct svc_rqst *rqstp, struct svc_fh *fh) { struct svc_fh tmp_fh; __be32 ret; @@ -615,13 +613,19 @@ nfsd4_lookupp(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, ret = exp_pseudoroot(rqstp, &tmp_fh); if (ret) return ret; - if (tmp_fh.fh_dentry == cstate->current_fh.fh_dentry) { + if (tmp_fh.fh_dentry == fh->fh_dentry) { fh_put(&tmp_fh); return nfserr_noent; } fh_put(&tmp_fh); - return nfsd_lookup(rqstp, &cstate->current_fh, - "..", 2, &cstate->current_fh); + return nfsd_lookup(rqstp, fh, "..", 2, fh); +} + +static __be32 +nfsd4_lookupp(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, + void *arg) +{ + return nfsd4_do_lookupp(rqstp, &cstate->current_fh); } static __be32 -- cgit v1.2.2 From 04f4ad16b231abbfde34c762697ad035a3af0b5f Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Thu, 16 Dec 2010 09:51:13 -0500 Subject: nfsd4: implement secinfo_no_name Implementation of this operation is mandatory for NFSv4.1. Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4proc.c | 27 +++++++++++++++++++++++++++ fs/nfsd/nfs4xdr.c | 15 +++++++++++++-- fs/nfsd/xdr4.h | 5 +++++ 3 files changed, 45 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index 095431a35722..f80c3997d24c 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -779,6 +779,29 @@ nfsd4_secinfo(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, return err; } +static __be32 +nfsd4_secinfo_no_name(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, + struct nfsd4_secinfo_no_name *sin) +{ + __be32 err; + + switch (sin->sin_style) { + case NFS4_SECINFO_STYLE4_CURRENT_FH: + break; + case NFS4_SECINFO_STYLE4_PARENT: + err = nfsd4_do_lookupp(rqstp, &cstate->current_fh); + if (err) + return err; + break; + default: + return nfserr_inval; + } + exp_get(cstate->current_fh.fh_export); + sin->sin_exp = cstate->current_fh.fh_export; + fh_put(&cstate->current_fh); + return nfs_ok; +} + static __be32 nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_setattr *setattr) @@ -1327,6 +1350,10 @@ static struct nfsd4_operation nfsd4_ops[] = { .op_flags = ALLOWED_WITHOUT_FH, .op_name = "OP_RECLAIM_COMPLETE", }, + [OP_SECINFO_NO_NAME] = { + .op_func = (nfsd4op_func)nfsd4_secinfo_no_name, + .op_name = "OP_SECINFO_NO_NAME", + }, }; static const char *nfsd4_op_name(unsigned opnum) diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 71d7d339e44a..b543b2410b54 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -846,6 +846,17 @@ nfsd4_decode_secinfo(struct nfsd4_compoundargs *argp, DECODE_TAIL; } +static __be32 +nfsd4_decode_secinfo_no_name(struct nfsd4_compoundargs *argp, + struct nfsd4_secinfo_no_name *sin) +{ + DECODE_HEAD; + + READ_BUF(4); + READ32(sin->sin_style); + DECODE_TAIL; +} + static __be32 nfsd4_decode_setattr(struct nfsd4_compoundargs *argp, struct nfsd4_setattr *setattr) { @@ -1358,7 +1369,7 @@ static nfsd4_dec nfsd41_dec_ops[] = { [OP_LAYOUTCOMMIT] = (nfsd4_dec)nfsd4_decode_notsupp, [OP_LAYOUTGET] = (nfsd4_dec)nfsd4_decode_notsupp, [OP_LAYOUTRETURN] = (nfsd4_dec)nfsd4_decode_notsupp, - [OP_SECINFO_NO_NAME] = (nfsd4_dec)nfsd4_decode_notsupp, + [OP_SECINFO_NO_NAME] = (nfsd4_dec)nfsd4_decode_secinfo_no_name, [OP_SEQUENCE] = (nfsd4_dec)nfsd4_decode_sequence, [OP_SET_SSV] = (nfsd4_dec)nfsd4_decode_notsupp, [OP_TEST_STATEID] = (nfsd4_dec)nfsd4_decode_notsupp, @@ -3162,7 +3173,7 @@ static nfsd4_enc nfsd4_enc_ops[] = { [OP_LAYOUTCOMMIT] = (nfsd4_enc)nfsd4_encode_noop, [OP_LAYOUTGET] = (nfsd4_enc)nfsd4_encode_noop, [OP_LAYOUTRETURN] = (nfsd4_enc)nfsd4_encode_noop, - [OP_SECINFO_NO_NAME] = (nfsd4_enc)nfsd4_encode_noop, + [OP_SECINFO_NO_NAME] = (nfsd4_enc)nfsd4_encode_secinfo, [OP_SEQUENCE] = (nfsd4_enc)nfsd4_encode_sequence, [OP_SET_SSV] = (nfsd4_enc)nfsd4_encode_noop, [OP_TEST_STATEID] = (nfsd4_enc)nfsd4_encode_noop, diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h index 60fce3dc5cb5..799c30c3b495 100644 --- a/fs/nfsd/xdr4.h +++ b/fs/nfsd/xdr4.h @@ -311,6 +311,11 @@ struct nfsd4_secinfo { struct svc_export *si_exp; /* response */ }; +struct nfsd4_secinfo_no_name { + u32 sin_style; /* request */ + struct svc_export *sin_exp; /* response */ +}; + struct nfsd4_setattr { stateid_t sa_stateid; /* request */ u32 sa_bmval[3]; /* request */ -- cgit v1.2.2 From cfef2c6a559b1e37cbc7e7c1b51f82d26abf24ec Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Sat, 18 Dec 2010 13:07:34 -0500 Subject: jbd2: Fix a debug message in do_get_write_access() 'buffer_head' should be 'journal_head' This is a port of a patch which Namhyung Kim made to fs/jbd to jbd2. Signed-off-by: "Theodore Ts'o" --- fs/jbd2/transaction.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 6bf0a242613e..10b5e3b1ca8b 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -589,7 +589,7 @@ do_get_write_access(handle_t *handle, struct journal_head *jh, transaction = handle->h_transaction; journal = transaction->t_journal; - jbd_debug(5, "buffer_head %p, force_copy %d\n", jh, force_copy); + jbd_debug(5, "journal_head %p, force_copy %d\n", jh, force_copy); JBUFFER_TRACE(jh, "entry"); repeat: -- cgit v1.2.2 From a1dd53318409ed6a27a8ce4fecf52e1326a100c0 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Sat, 18 Dec 2010 13:13:40 -0500 Subject: jbd2: use offset_in_page() instead of manual calculation This is a port to jbd2 of a patch which Namhyung Kim originally made to fs/jbd. Signed-off-by: "Theodore Ts'o" --- fs/jbd2/transaction.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 10b5e3b1ca8b..80f9b2a3880b 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -774,7 +774,7 @@ done: J_EXPECT_JH(jh, buffer_uptodate(jh2bh(jh)), "Possible IO failure.\n"); page = jh2bh(jh)->b_page; - offset = ((unsigned long) jh2bh(jh)->b_data) & ~PAGE_MASK; + offset = offset_in_page(jh2bh(jh)->b_data); source = kmap_atomic(page, KM_USER0); /* Fire data frozen trigger just before we copy the data */ jbd2_buffer_frozen_trigger(jh, source + offset, -- cgit v1.2.2 From ae00b267f3827ba88309fb74bdf7527396f0acf9 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Sat, 18 Dec 2010 13:34:20 -0500 Subject: jbd2: remove unnecessary goto statement This is a port to jbd2 of a patch which Namhyung Kim originally made to fs/jbd. Signed-off-by: "Theodore Ts'o" --- fs/jbd2/transaction.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'fs') diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 80f9b2a3880b..394893242ae3 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -340,9 +340,7 @@ handle_t *jbd2__journal_start(journal_t *journal, int nblocks, int gfp_mask) jbd2_free_handle(handle); current->journal_info = NULL; handle = ERR_PTR(err); - goto out; } -out: return handle; } EXPORT_SYMBOL(jbd2__journal_start); -- cgit v1.2.2 From 9a4f6271b68b9693290963b97b320d2e6e6f3446 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Sat, 18 Dec 2010 13:36:33 -0500 Subject: jbd2: move debug message into debug #ifdef This is a port to jbd2 of a patch which Namhyung Kim originally made to fs/jbd. Signed-off-by: "Theodore Ts'o" --- fs/jbd2/recovery.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c index 2bc4d5f116f1..1cad869494f0 100644 --- a/fs/jbd2/recovery.c +++ b/fs/jbd2/recovery.c @@ -299,10 +299,10 @@ int jbd2_journal_skip_recovery(journal_t *journal) #ifdef CONFIG_JBD2_DEBUG int dropped = info.end_transaction - be32_to_cpu(journal->j_superblock->s_sequence); -#endif jbd_debug(1, "JBD: ignoring %d transaction%s from the journal.\n", dropped, (dropped == 1) ? "" : "s"); +#endif journal->j_transaction_sequence = ++info.end_transaction; } -- cgit v1.2.2 From b7271b0a39947f757d7969f6150dcb16c1976b91 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Sat, 18 Dec 2010 13:39:38 -0500 Subject: jbd2: simplify return path of journal_init_common Signed-off-by: "Theodore Ts'o" --- fs/jbd2/journal.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index 06dfd778cae5..2447bd86f801 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -828,7 +828,7 @@ static journal_t * journal_init_common (void) journal = kzalloc(sizeof(*journal), GFP_KERNEL); if (!journal) - goto fail; + return NULL; init_waitqueue_head(&journal->j_wait_transaction_locked); init_waitqueue_head(&journal->j_wait_logspace); @@ -853,14 +853,12 @@ static journal_t * journal_init_common (void) err = jbd2_journal_init_revoke(journal, JOURNAL_REVOKE_DEFAULT_HASH); if (err) { kfree(journal); - goto fail; + return NULL; } spin_lock_init(&journal->j_history_lock); return journal; -fail: - return NULL; } /* jbd2_journal_init_dev and jbd2_journal_init_inode: -- cgit v1.2.2 From 6ca7b13dea385484e2fcc89790b8030697c5014a Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Sun, 19 Dec 2010 21:38:46 -0500 Subject: ext4: Remove redundant unlikely() IS_ERR() already implies unlikely(), so it can be omitted here. Signed-off-by: Tobias Klauser Signed-off-by: "Theodore Ts'o" --- fs/ext4/namei.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index dc40e75cba88..203086498caa 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -1036,7 +1036,7 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, stru return ERR_PTR(-EIO); } inode = ext4_iget(dir->i_sb, ino); - if (unlikely(IS_ERR(inode))) { + if (IS_ERR(inode)) { if (PTR_ERR(inode) == -ESTALE) { EXT4_ERROR_INODE(dir, "deleted inode referenced: %u", -- cgit v1.2.2 From b17b35ec13adfeb0346d4b329110b14adc509327 Mon Sep 17 00:00:00 2001 From: Jesper Juhl Date: Sun, 19 Dec 2010 21:41:55 -0500 Subject: ext4: use kmem_cache_zalloc() in ext4_init_io_end() Use advantage of kmem_cache_zalloc() to remove a memset() call in ext4_init_io_end() and save a few bytes. Before: [jj@dragon linux-2.6]$ size fs/ext4/page-io.o text data bss dec hex filename 3016 0 624 3640 e38 fs/ext4/page-io.o After: [jj@dragon linux-2.6]$ size fs/ext4/page-io.o text data bss dec hex filename 3000 0 624 3624 e28 fs/ext4/page-io.o Signed-off-by: Jesper Juhl Signed-off-by: "Theodore Ts'o" --- fs/ext4/page-io.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index beacce11ac50..0f5dfe0e83e7 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c @@ -158,11 +158,8 @@ static void ext4_end_io_work(struct work_struct *work) ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags) { - ext4_io_end_t *io = NULL; - - io = kmem_cache_alloc(io_end_cachep, flags); + ext4_io_end_t *io = kmem_cache_zalloc(io_end_cachep, flags); if (io) { - memset(io, 0, sizeof(*io)); atomic_inc(&EXT4_I(inode)->i_ioend_count); io->inode = inode; INIT_WORK(&io->work, ext4_end_io_work); -- cgit v1.2.2 From cad3f00763dcf9dfc62cbddf4bd714ab5a71a0eb Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Sun, 19 Dec 2010 22:07:02 -0500 Subject: ext4: optimize ext4_check_dir_entry() with unlikely() annotations This function gets called a lot for large directories, and the answer is almost always "no, no, there's no problem". This means using unlikely() is a good thing. Signed-off-by: "Theodore Ts'o" --- fs/ext4/dir.c | 40 +++++++++++++++++++++++----------------- fs/ext4/ext4.h | 3 ++- fs/ext4/namei.c | 14 +++++++------- 3 files changed, 32 insertions(+), 25 deletions(-) (limited to 'fs') diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c index ece76fb6a40c..bd5d74d06399 100644 --- a/fs/ext4/dir.c +++ b/fs/ext4/dir.c @@ -60,7 +60,11 @@ static unsigned char get_dtype(struct super_block *sb, int filetype) return (ext4_filetype_table[filetype]); } - +/* + * Return 0 if the directory entry is OK, and 1 if there is a problem + * + * Note: this is the opposite of what ext2 and ext3 historically returned... + */ int __ext4_check_dir_entry(const char *function, unsigned int line, struct inode *dir, struct ext4_dir_entry_2 *de, @@ -71,26 +75,28 @@ int __ext4_check_dir_entry(const char *function, unsigned int line, const int rlen = ext4_rec_len_from_disk(de->rec_len, dir->i_sb->s_blocksize); - if (rlen < EXT4_DIR_REC_LEN(1)) + if (unlikely(rlen < EXT4_DIR_REC_LEN(1))) error_msg = "rec_len is smaller than minimal"; - else if (rlen % 4 != 0) + else if (unlikely(rlen % 4 != 0)) error_msg = "rec_len % 4 != 0"; - else if (rlen < EXT4_DIR_REC_LEN(de->name_len)) + else if (unlikely(rlen < EXT4_DIR_REC_LEN(de->name_len))) error_msg = "rec_len is too small for name_len"; - else if (((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize) + else if (unlikely(((char *) de - bh->b_data) + rlen > + dir->i_sb->s_blocksize)) error_msg = "directory entry across blocks"; - else if (le32_to_cpu(de->inode) > - le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count)) + else if (unlikely(le32_to_cpu(de->inode) > + le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count))) error_msg = "inode out of bounds"; + else + return 0; - if (error_msg != NULL) - ext4_error_inode(dir, function, line, bh->b_blocknr, - "bad entry in directory: %s - " - "offset=%u(%u), inode=%u, rec_len=%d, name_len=%d", - error_msg, (unsigned) (offset%bh->b_size), offset, - le32_to_cpu(de->inode), - rlen, de->name_len); - return error_msg == NULL ? 1 : 0; + ext4_error_inode(dir, function, line, bh->b_blocknr, + "bad entry in directory: %s - " + "offset=%u(%u), inode=%u, rec_len=%d, name_len=%d", + error_msg, (unsigned) (offset%bh->b_size), offset, + le32_to_cpu(de->inode), + rlen, de->name_len); + return 1; } static int ext4_readdir(struct file *filp, @@ -194,8 +200,8 @@ revalidate: while (!error && filp->f_pos < inode->i_size && offset < sb->s_blocksize) { de = (struct ext4_dir_entry_2 *) (bh->b_data + offset); - if (!ext4_check_dir_entry(inode, de, - bh, offset)) { + if (ext4_check_dir_entry(inode, de, + bh, offset)) { /* * On error, skip the f_pos to the next block */ diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 17baecbf8cda..49f1ceaac57d 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -1639,7 +1639,8 @@ extern int __ext4_check_dir_entry(const char *, unsigned int, struct inode *, struct ext4_dir_entry_2 *, struct buffer_head *, unsigned int); #define ext4_check_dir_entry(dir, de, bh, offset) \ - __ext4_check_dir_entry(__func__, __LINE__, (dir), (de), (bh), (offset)) + unlikely(__ext4_check_dir_entry(__func__, __LINE__, (dir), (de), \ + (bh), (offset))) extern int ext4_htree_store_dirent(struct file *dir_file, __u32 hash, __u32 minor_hash, struct ext4_dir_entry_2 *dirent); diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 203086498caa..e275464f7754 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -581,9 +581,9 @@ static int htree_dirblock_to_tree(struct file *dir_file, dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(0)); for (; de < top; de = ext4_next_entry(de, dir->i_sb->s_blocksize)) { - if (!ext4_check_dir_entry(dir, de, bh, - (block<i_sb)) - +((char *)de - bh->b_data))) { + if (ext4_check_dir_entry(dir, de, bh, + (block<i_sb)) + + ((char *)de - bh->b_data))) { /* On error, skip the f_pos to the next block. */ dir_file->f_pos = (dir_file->f_pos | (dir->i_sb->s_blocksize - 1)) + 1; @@ -820,7 +820,7 @@ static inline int search_dirblock(struct buffer_head *bh, if ((char *) de + namelen <= dlimit && ext4_match (namelen, name, de)) { /* found a match - just to be sure, do a full check */ - if (!ext4_check_dir_entry(dir, de, bh, offset)) + if (ext4_check_dir_entry(dir, de, bh, offset)) return -1; *res_dir = de; return 1; @@ -1269,7 +1269,7 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry, de = (struct ext4_dir_entry_2 *)bh->b_data; top = bh->b_data + blocksize - reclen; while ((char *) de <= top) { - if (!ext4_check_dir_entry(dir, de, bh, offset)) + if (ext4_check_dir_entry(dir, de, bh, offset)) return -EIO; if (ext4_match(namelen, name, de)) return -EEXIST; @@ -1636,7 +1636,7 @@ static int ext4_delete_entry(handle_t *handle, pde = NULL; de = (struct ext4_dir_entry_2 *) bh->b_data; while (i < bh->b_size) { - if (!ext4_check_dir_entry(dir, de, bh, i)) + if (ext4_check_dir_entry(dir, de, bh, i)) return -EIO; if (de == de_del) { BUFFER_TRACE(bh, "get_write_access"); @@ -1919,7 +1919,7 @@ static int empty_dir(struct inode *inode) } de = (struct ext4_dir_entry_2 *) bh->b_data; } - if (!ext4_check_dir_entry(inode, de, bh, offset)) { + if (ext4_check_dir_entry(inode, de, bh, offset)) { de = (struct ext4_dir_entry_2 *)(bh->b_data + sb->s_blocksize); offset = (offset | (sb->s_blocksize - 1)) + 1; -- cgit v1.2.2 From af0b44a1970fed1cda31d2969c99c46ffc515160 Mon Sep 17 00:00:00 2001 From: Eric Sandeen Date: Sun, 19 Dec 2010 22:10:31 -0500 Subject: ext4: zero out nanosecond timestamps for small inodes When nanosecond timestamp resolution isn't supported on an ext4 partition (inode size = 128), stat() appears to be returning uninitialized garbage in the nanosecond component of timestamps. EXT4_INODE_GET_XTIME should zero out tv_nsec when EXT4_FITS_IN_INODE evaluates to false. Reported-by: Jordan Russell Signed-off-by: Eric Sandeen Signed-off-by: "Theodore Ts'o" --- fs/ext4/ext4.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'fs') diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 49f1ceaac57d..8104ab7eb7d4 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -693,6 +693,8 @@ do { \ if (EXT4_FITS_IN_INODE(raw_inode, EXT4_I(inode), xtime ## _extra)) \ ext4_decode_extra_time(&(inode)->xtime, \ raw_inode->xtime ## _extra); \ + else \ + (inode)->xtime.tv_nsec = 0; \ } while (0) #define EXT4_EINODE_GET_XTIME(xtime, einode, raw_inode) \ @@ -703,6 +705,8 @@ do { \ if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime ## _extra)) \ ext4_decode_extra_time(&(einode)->xtime, \ raw_inode->xtime ## _extra); \ + else \ + (einode)->xtime.tv_nsec = 0; \ } while (0) #define i_disk_version osd1.linux1.l_i_version -- cgit v1.2.2 From 94de56ab2062be59d80e2efb7c0dc60ecf616075 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 19 Dec 2010 22:21:02 -0500 Subject: ext4: Use vzalloc in ext4_fill_flex_info() Signed-off-by: Joe Perches Signed-off-by: "Theodore Ts'o" --- fs/ext4/super.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 072ff973ff2b..10290f8f5922 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1930,14 +1930,13 @@ static int ext4_fill_flex_info(struct super_block *sb) size = flex_group_count * sizeof(struct flex_groups); sbi->s_flex_groups = kzalloc(size, GFP_KERNEL); if (sbi->s_flex_groups == NULL) { - sbi->s_flex_groups = vmalloc(size); - if (sbi->s_flex_groups) - memset(sbi->s_flex_groups, 0, size); - } - if (sbi->s_flex_groups == NULL) { - ext4_msg(sb, KERN_ERR, "not enough memory for " - "%u flex groups", flex_group_count); - goto failed; + sbi->s_flex_groups = vzalloc(size); + if (sbi->s_flex_groups == NULL) { + ext4_msg(sb, KERN_ERR, + "not enough memory for %u flex groups", + flex_group_count); + goto failed; + } } for (i = 0; i < sbi->s_groups_count; i++) { -- cgit v1.2.2 From 0ff2ea7d84e31176a046a1eabea59d6e4eecd998 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 19 Dec 2010 22:43:19 -0500 Subject: ext4: Use printf extension %pV Using %pV reduces the number of printk calls and eliminates any possible message interleaving from other printk calls. In function __ext4_grp_locked_error also added KERN_CONT to some printks. Signed-off-by: Joe Perches Signed-off-by: "Theodore Ts'o" --- fs/ext4/super.c | 40 +++++++++++++++++++++++----------------- 1 file changed, 23 insertions(+), 17 deletions(-) (limited to 'fs') diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 10290f8f5922..c228da112de0 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -388,13 +388,14 @@ static void ext4_handle_error(struct super_block *sb) void __ext4_error(struct super_block *sb, const char *function, unsigned int line, const char *fmt, ...) { + struct va_format vaf; va_list args; va_start(args, fmt); - printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: comm %s: ", - sb->s_id, function, line, current->comm); - vprintk(fmt, args); - printk("\n"); + vaf.fmt = fmt; + vaf.va = &args; + printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n", + sb->s_id, function, line, current->comm, &vaf); va_end(args); ext4_handle_error(sb); @@ -543,28 +544,29 @@ void __ext4_abort(struct super_block *sb, const char *function, panic("EXT4-fs panic from previous error\n"); } -void ext4_msg (struct super_block * sb, const char *prefix, - const char *fmt, ...) +void ext4_msg(struct super_block *sb, const char *prefix, const char *fmt, ...) { + struct va_format vaf; va_list args; va_start(args, fmt); - printk("%sEXT4-fs (%s): ", prefix, sb->s_id); - vprintk(fmt, args); - printk("\n"); + vaf.fmt = fmt; + vaf.va = &args; + printk("%sEXT4-fs (%s): %pV\n", prefix, sb->s_id, &vaf); va_end(args); } void __ext4_warning(struct super_block *sb, const char *function, unsigned int line, const char *fmt, ...) { + struct va_format vaf; va_list args; va_start(args, fmt); - printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: ", - sb->s_id, function, line); - vprintk(fmt, args); - printk("\n"); + vaf.fmt = fmt; + vaf.va = &args; + printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: %pV\n", + sb->s_id, function, line, &vaf); va_end(args); } @@ -575,21 +577,25 @@ void __ext4_grp_locked_error(const char *function, unsigned int line, __releases(bitlock) __acquires(bitlock) { + struct va_format vaf; va_list args; struct ext4_super_block *es = EXT4_SB(sb)->s_es; es->s_last_error_ino = cpu_to_le32(ino); es->s_last_error_block = cpu_to_le64(block); __save_error_info(sb, function, line); + va_start(args, fmt); + + vaf.fmt = fmt; + vaf.va = &args; printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u", sb->s_id, function, line, grp); if (ino) - printk("inode %lu: ", ino); + printk(KERN_CONT "inode %lu: ", ino); if (block) - printk("block %llu:", (unsigned long long) block); - vprintk(fmt, args); - printk("\n"); + printk(KERN_CONT "block %llu:", (unsigned long long) block); + printk(KERN_CONT "%pV\n", &vaf); va_end(args); if (test_opt(sb, ERRORS_CONT)) { -- cgit v1.2.2 From 3603b8eaccc8e41d3f355b3cadd662a3dd6699fd Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 20 Dec 2010 09:15:19 +0100 Subject: Fix compile warnings due to missing removal of a 'ret' variable Commit a8adbe3 forgot to remove the return variable, kill it. drivers/block/loop.c: In function 'lo_splice_actor': drivers/block/loop.c:398: warning: unused variable 'ret' [...] fs/nfsd/vfs.c: In function 'nfsd_splice_actor': fs/nfsd/vfs.c:848: warning: unused variable 'ret' Reported-by: Stephen Rothwell Signed-off-by: Jens Axboe --- fs/nfsd/vfs.c | 1 - 1 file changed, 1 deletion(-) (limited to 'fs') diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index c6e08661adee..106ed482f119 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -845,7 +845,6 @@ nfsd_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf, struct page **pp = rqstp->rq_respages + rqstp->rq_resused; struct page *page = buf->page; size_t size; - int ret; size = sd->len; -- cgit v1.2.2 From b72143ab3ed566a12560fa4411a1f02c276dcc39 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Mon, 20 Dec 2010 07:26:59 -0500 Subject: ext4: Add error checking to kmem_cache_alloc() call in ext4_free_blocks() Signed-off-by: "Theodore Ts'o" --- fs/ext4/mballoc.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 731b6f738a03..46d5414f59c1 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -4626,7 +4626,11 @@ do_more: * blocks being freed are metadata. these blocks shouldn't * be used until this transaction is committed */ - new_entry = kmem_cache_alloc(ext4_free_ext_cachep, GFP_NOFS); + new_entry = kmem_cache_alloc(ext4_free_ext_cachep, GFP_NOFS); + if (!new_entry) { + err = -ENOMEM; + goto error_return; + } new_entry->start_blk = bit; new_entry->group = block_group; new_entry->count = count; -- cgit v1.2.2 From 3f16b9850743b702380f098ab5e0308cd6af1792 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Tue, 21 Dec 2010 12:29:01 +1100 Subject: xfs: introduce new locks for the log grant ticket wait queues The log grant ticket wait queues are currently protected by the log grant lock. However, the queues are functionally independent from each other, and operations on them only require serialisation against other queue operations now that all of the other log variables they use are atomic values. Hence, we can make them independent of the grant lock by introducing new locks just to protect the lists operations. because the lists are independent, we can use a lock per list and ensure that reserve and write head queuing do not contend. To ensure forced shutdowns work correctly in conjunction with the new fast paths, ensure that we check whether the log has been shut down in the grant functions once we hold the relevant spin locks but before we go to sleep. This is needed to co-ordinate correctly with the wakeups that are issued on the ticket queues so we don't leave any processes sleeping on the queues during a shutdown. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig --- fs/xfs/linux-2.6/xfs_trace.h | 2 + fs/xfs/xfs_log.c | 139 ++++++++++++++++++++++++++----------------- fs/xfs/xfs_log_priv.h | 16 +++-- 3 files changed, 97 insertions(+), 60 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_trace.h b/fs/xfs/linux-2.6/xfs_trace.h index b180e1bf8257..647af2a2e7aa 100644 --- a/fs/xfs/linux-2.6/xfs_trace.h +++ b/fs/xfs/linux-2.6/xfs_trace.h @@ -837,6 +837,7 @@ DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep1); DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake1); DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep2); DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake2); +DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake_up); DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_enter); DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_exit); DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_error); @@ -844,6 +845,7 @@ DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep1); DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake1); DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep2); DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake2); +DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake_up); DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_enter); DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_exit); DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_sub); diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index a1d7d12fc51f..6fcc9d0af524 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -682,12 +682,12 @@ xfs_log_move_tail(xfs_mount_t *mp, if (tail_lsn != 1) atomic64_set(&log->l_tail_lsn, tail_lsn); - spin_lock(&log->l_grant_lock); - if (!list_empty(&log->l_writeq)) { + if (!list_empty_careful(&log->l_writeq)) { #ifdef DEBUG if (log->l_flags & XLOG_ACTIVE_RECOVERY) panic("Recovery problem"); #endif + spin_lock(&log->l_grant_write_lock); free_bytes = xlog_space_left(log, &log->l_grant_write_head); list_for_each_entry(tic, &log->l_writeq, t_queue) { ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV); @@ -696,15 +696,18 @@ xfs_log_move_tail(xfs_mount_t *mp, break; tail_lsn = 0; free_bytes -= tic->t_unit_res; + trace_xfs_log_regrant_write_wake_up(log, tic); wake_up(&tic->t_wait); } + spin_unlock(&log->l_grant_write_lock); } - if (!list_empty(&log->l_reserveq)) { + if (!list_empty_careful(&log->l_reserveq)) { #ifdef DEBUG if (log->l_flags & XLOG_ACTIVE_RECOVERY) panic("Recovery problem"); #endif + spin_lock(&log->l_grant_reserve_lock); free_bytes = xlog_space_left(log, &log->l_grant_reserve_head); list_for_each_entry(tic, &log->l_reserveq, t_queue) { if (tic->t_flags & XLOG_TIC_PERM_RESERV) @@ -715,11 +718,12 @@ xfs_log_move_tail(xfs_mount_t *mp, break; tail_lsn = 0; free_bytes -= need_bytes; + trace_xfs_log_grant_wake_up(log, tic); wake_up(&tic->t_wait); } + spin_unlock(&log->l_grant_reserve_lock); } - spin_unlock(&log->l_grant_lock); -} /* xfs_log_move_tail */ +} /* * Determine if we have a transaction that has gone to disk @@ -1010,6 +1014,8 @@ xlog_alloc_log(xfs_mount_t *mp, xlog_assign_grant_head(&log->l_grant_write_head, 1, 0); INIT_LIST_HEAD(&log->l_reserveq); INIT_LIST_HEAD(&log->l_writeq); + spin_lock_init(&log->l_grant_reserve_lock); + spin_lock_init(&log->l_grant_write_lock); error = EFSCORRUPTED; if (xfs_sb_version_hassector(&mp->m_sb)) { @@ -2477,6 +2483,18 @@ restart: * * Once a ticket gets put onto the reserveq, it will only return after * the needed reservation is satisfied. + * + * This function is structured so that it has a lock free fast path. This is + * necessary because every new transaction reservation will come through this + * path. Hence any lock will be globally hot if we take it unconditionally on + * every pass. + * + * As tickets are only ever moved on and off the reserveq under the + * l_grant_reserve_lock, we only need to take that lock if we are going + * to add the ticket to the queue and sleep. We can avoid taking the lock if the + * ticket was never added to the reserveq because the t_queue list head will be + * empty and we hold the only reference to it so it can safely be checked + * unlocked. */ STATIC int xlog_grant_log_space(xlog_t *log, @@ -2490,13 +2508,20 @@ xlog_grant_log_space(xlog_t *log, panic("grant Recovery problem"); #endif - /* Is there space or do we need to sleep? */ - spin_lock(&log->l_grant_lock); - trace_xfs_log_grant_enter(log, tic); + need_bytes = tic->t_unit_res; + if (tic->t_flags & XFS_LOG_PERM_RESERV) + need_bytes *= tic->t_ocnt; + /* something is already sleeping; insert new transaction at end */ - if (!list_empty(&log->l_reserveq)) { + if (!list_empty_careful(&log->l_reserveq)) { + spin_lock(&log->l_grant_reserve_lock); + /* recheck the queue now we are locked */ + if (list_empty(&log->l_reserveq)) { + spin_unlock(&log->l_grant_reserve_lock); + goto redo; + } list_add_tail(&tic->t_queue, &log->l_reserveq); trace_xfs_log_grant_sleep1(log, tic); @@ -2509,48 +2534,47 @@ xlog_grant_log_space(xlog_t *log, goto error_return; XFS_STATS_INC(xs_sleep_logspace); - xlog_wait(&tic->t_wait, &log->l_grant_lock); + xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock); /* * If we got an error, and the filesystem is shutting down, * we'll catch it down below. So just continue... */ trace_xfs_log_grant_wake1(log, tic); - spin_lock(&log->l_grant_lock); } - if (tic->t_flags & XFS_LOG_PERM_RESERV) - need_bytes = tic->t_unit_res*tic->t_ocnt; - else - need_bytes = tic->t_unit_res; redo: if (XLOG_FORCED_SHUTDOWN(log)) - goto error_return; + goto error_return_unlocked; free_bytes = xlog_space_left(log, &log->l_grant_reserve_head); if (free_bytes < need_bytes) { + spin_lock(&log->l_grant_reserve_lock); if (list_empty(&tic->t_queue)) list_add_tail(&tic->t_queue, &log->l_reserveq); trace_xfs_log_grant_sleep2(log, tic); + if (XLOG_FORCED_SHUTDOWN(log)) + goto error_return; + xlog_grant_push_ail(log, need_bytes); XFS_STATS_INC(xs_sleep_logspace); - xlog_wait(&tic->t_wait, &log->l_grant_lock); - - spin_lock(&log->l_grant_lock); - if (XLOG_FORCED_SHUTDOWN(log)) - goto error_return; + xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock); trace_xfs_log_grant_wake2(log, tic); - goto redo; } - list_del_init(&tic->t_queue); + if (!list_empty(&tic->t_queue)) { + spin_lock(&log->l_grant_reserve_lock); + list_del_init(&tic->t_queue); + spin_unlock(&log->l_grant_reserve_lock); + } /* we've got enough space */ + spin_lock(&log->l_grant_lock); xlog_grant_add_space(log, &log->l_grant_reserve_head, need_bytes); xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes); trace_xfs_log_grant_exit(log, tic); @@ -2559,8 +2583,11 @@ redo: spin_unlock(&log->l_grant_lock); return 0; - error_return: +error_return_unlocked: + spin_lock(&log->l_grant_reserve_lock); +error_return: list_del_init(&tic->t_queue); + spin_unlock(&log->l_grant_reserve_lock); trace_xfs_log_grant_error(log, tic); /* @@ -2570,7 +2597,6 @@ redo: */ tic->t_curr_res = 0; tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */ - spin_unlock(&log->l_grant_lock); return XFS_ERROR(EIO); } /* xlog_grant_log_space */ @@ -2578,7 +2604,8 @@ redo: /* * Replenish the byte reservation required by moving the grant write head. * - * + * Similar to xlog_grant_log_space, the function is structured to have a lock + * free fast path. */ STATIC int xlog_regrant_write_log_space(xlog_t *log, @@ -2597,12 +2624,9 @@ xlog_regrant_write_log_space(xlog_t *log, panic("regrant Recovery problem"); #endif - spin_lock(&log->l_grant_lock); - trace_xfs_log_regrant_write_enter(log, tic); - if (XLOG_FORCED_SHUTDOWN(log)) - goto error_return; + goto error_return_unlocked; /* If there are other waiters on the queue then give them a * chance at logspace before us. Wake up the first waiters, @@ -2611,8 +2635,10 @@ xlog_regrant_write_log_space(xlog_t *log, * this transaction. */ need_bytes = tic->t_unit_res; - if (!list_empty(&log->l_writeq)) { + if (!list_empty_careful(&log->l_writeq)) { struct xlog_ticket *ntic; + + spin_lock(&log->l_grant_write_lock); free_bytes = xlog_space_left(log, &log->l_grant_write_head); list_for_each_entry(ntic, &log->l_writeq, t_queue) { ASSERT(ntic->t_flags & XLOG_TIC_PERM_RESERV); @@ -2627,50 +2653,48 @@ xlog_regrant_write_log_space(xlog_t *log, struct xlog_ticket, t_queue)) { if (list_empty(&tic->t_queue)) list_add_tail(&tic->t_queue, &log->l_writeq); - trace_xfs_log_regrant_write_sleep1(log, tic); xlog_grant_push_ail(log, need_bytes); XFS_STATS_INC(xs_sleep_logspace); - xlog_wait(&tic->t_wait, &log->l_grant_lock); - - /* If we're shutting down, this tic is already - * off the queue */ - spin_lock(&log->l_grant_lock); - if (XLOG_FORCED_SHUTDOWN(log)) - goto error_return; - + xlog_wait(&tic->t_wait, &log->l_grant_write_lock); trace_xfs_log_regrant_write_wake1(log, tic); - } + } else + spin_unlock(&log->l_grant_write_lock); } redo: if (XLOG_FORCED_SHUTDOWN(log)) - goto error_return; + goto error_return_unlocked; free_bytes = xlog_space_left(log, &log->l_grant_write_head); if (free_bytes < need_bytes) { + spin_lock(&log->l_grant_write_lock); if (list_empty(&tic->t_queue)) list_add_tail(&tic->t_queue, &log->l_writeq); + + if (XLOG_FORCED_SHUTDOWN(log)) + goto error_return; + xlog_grant_push_ail(log, need_bytes); XFS_STATS_INC(xs_sleep_logspace); trace_xfs_log_regrant_write_sleep2(log, tic); - xlog_wait(&tic->t_wait, &log->l_grant_lock); - - /* If we're shutting down, this tic is already off the queue */ - spin_lock(&log->l_grant_lock); - if (XLOG_FORCED_SHUTDOWN(log)) - goto error_return; + xlog_wait(&tic->t_wait, &log->l_grant_write_lock); trace_xfs_log_regrant_write_wake2(log, tic); goto redo; } - list_del_init(&tic->t_queue); + if (!list_empty(&tic->t_queue)) { + spin_lock(&log->l_grant_write_lock); + list_del_init(&tic->t_queue); + spin_unlock(&log->l_grant_write_lock); + } /* we've got enough space */ + spin_lock(&log->l_grant_lock); xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes); trace_xfs_log_regrant_write_exit(log, tic); xlog_verify_grant_head(log, 1); @@ -2679,8 +2703,11 @@ redo: return 0; + error_return_unlocked: + spin_lock(&log->l_grant_write_lock); error_return: list_del_init(&tic->t_queue); + spin_unlock(&log->l_grant_write_lock); trace_xfs_log_regrant_write_error(log, tic); /* @@ -2690,7 +2717,6 @@ redo: */ tic->t_curr_res = 0; tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */ - spin_unlock(&log->l_grant_lock); return XFS_ERROR(EIO); } /* xlog_regrant_write_log_space */ @@ -3664,12 +3690,10 @@ xfs_log_force_umount( xlog_cil_force(log); /* - * We must hold both the GRANT lock and the LOG lock, - * before we mark the filesystem SHUTDOWN and wake - * everybody up to tell the bad news. + * mark the filesystem and the as in a shutdown state and wake + * everybody up to tell them the bad news. */ spin_lock(&log->l_icloglock); - spin_lock(&log->l_grant_lock); mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN; if (mp->m_sb_bp) XFS_BUF_DONE(mp->m_sb_bp); @@ -3694,14 +3718,17 @@ xfs_log_force_umount( * means we have to wake up everybody queued up on reserveq as well as * writeq. In addition, we make sure in xlog_{re}grant_log_space that * we don't enqueue anything once the SHUTDOWN flag is set, and this - * action is protected by the GRANTLOCK. + * action is protected by the grant locks. */ + spin_lock(&log->l_grant_reserve_lock); list_for_each_entry(tic, &log->l_reserveq, t_queue) wake_up(&tic->t_wait); + spin_unlock(&log->l_grant_reserve_lock); + spin_lock(&log->l_grant_write_lock); list_for_each_entry(tic, &log->l_writeq, t_queue) wake_up(&tic->t_wait); - spin_unlock(&log->l_grant_lock); + spin_unlock(&log->l_grant_write_lock); if (!(log->l_iclog->ic_state & XLOG_STATE_IOERROR)) { ASSERT(!logerror); diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h index 7619d6a02388..befb2fc5b027 100644 --- a/fs/xfs/xfs_log_priv.h +++ b/fs/xfs/xfs_log_priv.h @@ -512,10 +512,6 @@ typedef struct log { /* The following block of fields are changed while holding grant_lock */ spinlock_t l_grant_lock ____cacheline_aligned_in_smp; - struct list_head l_reserveq; - struct list_head l_writeq; - atomic64_t l_grant_reserve_head; - atomic64_t l_grant_write_head; /* * l_last_sync_lsn and l_tail_lsn are atomics so they can be set and @@ -528,6 +524,18 @@ typedef struct log { /* lsn of 1st LR with unflushed * buffers */ atomic64_t l_tail_lsn ____cacheline_aligned_in_smp; + /* + * ticket grant locks, queues and accounting have their own cachlines + * as these are quite hot and can be operated on concurrently. + */ + spinlock_t l_grant_reserve_lock ____cacheline_aligned_in_smp; + struct list_head l_reserveq; + atomic64_t l_grant_reserve_head; + + spinlock_t l_grant_write_lock ____cacheline_aligned_in_smp; + struct list_head l_writeq; + atomic64_t l_grant_write_head; + /* The following field are used for debugging; need to hold icloglock */ #ifdef DEBUG char *l_iclog_bak[XLOG_MAX_ICLOGS]; -- cgit v1.2.2 From d0eb2f38b250b7d6c993adf81b0e4ded0565497e Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Tue, 21 Dec 2010 12:29:14 +1100 Subject: xfs: convert grant head manipulations to lockless algorithm The only thing that the grant lock remains to protect is the grant head manipulations when adding or removing space from the log. These calculations are already based on atomic variables, so we can already update them safely without locks. However, the grant head manpulations require atomic multi-step calculations to be executed, which the algorithms currently don't allow. To make these multi-step calculations atomic, convert the algorithms to compare-and-exchange loops on the atomic variables. That is, we sample the old value, perform the calculation and use atomic64_cmpxchg() to attempt to update the head with the new value. If the head has not changed since we sampled it, it will succeed and we are done. Otherwise, we rerun the calculation again from a new sample of the head. This allows us to remove the grant lock from around all the grant head space manipulations, and that effectively removes the grant lock from the log completely. Hence we can remove the grant lock completely from the log at this point. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig --- fs/xfs/xfs_log.c | 103 +++++++++++++++++--------------------------------- fs/xfs/xfs_log_priv.h | 23 +++++++---- 2 files changed, 49 insertions(+), 77 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 6fcc9d0af524..0bf24b11d0c4 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -81,7 +81,6 @@ STATIC void xlog_ungrant_log_space(xlog_t *log, #if defined(DEBUG) STATIC void xlog_verify_dest_ptr(xlog_t *log, char *ptr); -STATIC void xlog_verify_grant_head(xlog_t *log, int equals); STATIC void xlog_verify_grant_tail(struct log *log); STATIC void xlog_verify_iclog(xlog_t *log, xlog_in_core_t *iclog, int count, boolean_t syncing); @@ -89,7 +88,6 @@ STATIC void xlog_verify_tail_lsn(xlog_t *log, xlog_in_core_t *iclog, xfs_lsn_t tail_lsn); #else #define xlog_verify_dest_ptr(a,b) -#define xlog_verify_grant_head(a,b) #define xlog_verify_grant_tail(a) #define xlog_verify_iclog(a,b,c,d) #define xlog_verify_tail_lsn(a,b,c) @@ -103,17 +101,24 @@ xlog_grant_sub_space( atomic64_t *head, int bytes) { - int cycle, space; + int64_t head_val = atomic64_read(head); + int64_t new, old; - xlog_crack_grant_head(head, &cycle, &space); + do { + int cycle, space; - space -= bytes; - if (space < 0) { - space += log->l_logsize; - cycle--; - } + xlog_crack_grant_head_val(head_val, &cycle, &space); - xlog_assign_grant_head(head, cycle, space); + space -= bytes; + if (space < 0) { + space += log->l_logsize; + cycle--; + } + + old = head_val; + new = xlog_assign_grant_head_val(cycle, space); + head_val = atomic64_cmpxchg(head, old, new); + } while (head_val != old); } static void @@ -122,20 +127,27 @@ xlog_grant_add_space( atomic64_t *head, int bytes) { - int tmp; - int cycle, space; + int64_t head_val = atomic64_read(head); + int64_t new, old; - xlog_crack_grant_head(head, &cycle, &space); + do { + int tmp; + int cycle, space; - tmp = log->l_logsize - space; - if (tmp > bytes) - space += bytes; - else { - space = bytes - tmp; - cycle++; - } + xlog_crack_grant_head_val(head_val, &cycle, &space); - xlog_assign_grant_head(head, cycle, space); + tmp = log->l_logsize - space; + if (tmp > bytes) + space += bytes; + else { + space = bytes - tmp; + cycle++; + } + + old = head_val; + new = xlog_assign_grant_head_val(cycle, space); + head_val = atomic64_cmpxchg(head, old, new); + } while (head_val != old); } static void @@ -318,9 +330,7 @@ xfs_log_reserve( trace_xfs_log_reserve(log, internal_ticket); - spin_lock(&log->l_grant_lock); xlog_grant_push_ail(log, internal_ticket->t_unit_res); - spin_unlock(&log->l_grant_lock); retval = xlog_regrant_write_log_space(log, internal_ticket); } else { /* may sleep if need to allocate more tickets */ @@ -334,11 +344,9 @@ xfs_log_reserve( trace_xfs_log_reserve(log, internal_ticket); - spin_lock(&log->l_grant_lock); xlog_grant_push_ail(log, (internal_ticket->t_unit_res * internal_ticket->t_cnt)); - spin_unlock(&log->l_grant_lock); retval = xlog_grant_log_space(log, internal_ticket); } @@ -1057,7 +1065,6 @@ xlog_alloc_log(xfs_mount_t *mp, log->l_xbuf = bp; spin_lock_init(&log->l_icloglock); - spin_lock_init(&log->l_grant_lock); init_waitqueue_head(&log->l_flush_wait); /* log record size must be multiple of BBSIZE; see xlog_rec_header_t */ @@ -1135,7 +1142,6 @@ out_free_iclog: kmem_free(iclog); } spinlock_destroy(&log->l_icloglock); - spinlock_destroy(&log->l_grant_lock); xfs_buf_free(log->l_xbuf); out_free_log: kmem_free(log); @@ -1331,10 +1337,8 @@ xlog_sync(xlog_t *log, roundoff < BBTOB(1))); /* move grant heads by roundoff in sync */ - spin_lock(&log->l_grant_lock); xlog_grant_add_space(log, &log->l_grant_reserve_head, roundoff); xlog_grant_add_space(log, &log->l_grant_write_head, roundoff); - spin_unlock(&log->l_grant_lock); /* put cycle number in every block */ xlog_pack_data(log, iclog, roundoff); @@ -1455,7 +1459,6 @@ xlog_dealloc_log(xlog_t *log) iclog = next_iclog; } spinlock_destroy(&log->l_icloglock); - spinlock_destroy(&log->l_grant_lock); xfs_buf_free(log->l_xbuf); log->l_mp->m_log = NULL; @@ -2574,13 +2577,10 @@ redo: } /* we've got enough space */ - spin_lock(&log->l_grant_lock); xlog_grant_add_space(log, &log->l_grant_reserve_head, need_bytes); xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes); trace_xfs_log_grant_exit(log, tic); - xlog_verify_grant_head(log, 1); xlog_verify_grant_tail(log); - spin_unlock(&log->l_grant_lock); return 0; error_return_unlocked: @@ -2694,12 +2694,9 @@ redo: } /* we've got enough space */ - spin_lock(&log->l_grant_lock); xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes); trace_xfs_log_regrant_write_exit(log, tic); - xlog_verify_grant_head(log, 1); xlog_verify_grant_tail(log); - spin_unlock(&log->l_grant_lock); return 0; @@ -2737,7 +2734,6 @@ xlog_regrant_reserve_log_space(xlog_t *log, if (ticket->t_cnt > 0) ticket->t_cnt--; - spin_lock(&log->l_grant_lock); xlog_grant_sub_space(log, &log->l_grant_reserve_head, ticket->t_curr_res); xlog_grant_sub_space(log, &log->l_grant_write_head, @@ -2747,21 +2743,15 @@ xlog_regrant_reserve_log_space(xlog_t *log, trace_xfs_log_regrant_reserve_sub(log, ticket); - xlog_verify_grant_head(log, 1); - /* just return if we still have some of the pre-reserved space */ - if (ticket->t_cnt > 0) { - spin_unlock(&log->l_grant_lock); + if (ticket->t_cnt > 0) return; - } xlog_grant_add_space(log, &log->l_grant_reserve_head, ticket->t_unit_res); trace_xfs_log_regrant_reserve_exit(log, ticket); - xlog_verify_grant_head(log, 0); - spin_unlock(&log->l_grant_lock); ticket->t_curr_res = ticket->t_unit_res; xlog_tic_reset_res(ticket); } /* xlog_regrant_reserve_log_space */ @@ -2790,7 +2780,6 @@ xlog_ungrant_log_space(xlog_t *log, if (ticket->t_cnt > 0) ticket->t_cnt--; - spin_lock(&log->l_grant_lock); trace_xfs_log_ungrant_enter(log, ticket); trace_xfs_log_ungrant_sub(log, ticket); @@ -2809,8 +2798,6 @@ xlog_ungrant_log_space(xlog_t *log, trace_xfs_log_ungrant_exit(log, ticket); - xlog_verify_grant_head(log, 1); - spin_unlock(&log->l_grant_lock); xfs_log_move_tail(log->l_mp, 1); } /* xlog_ungrant_log_space */ @@ -3428,28 +3415,6 @@ xlog_verify_dest_ptr( xlog_panic("xlog_verify_dest_ptr: invalid ptr"); } -STATIC void -xlog_verify_grant_head(xlog_t *log, int equals) -{ - int reserve_cycle, reserve_space; - int write_cycle, write_space; - - xlog_crack_grant_head(&log->l_grant_reserve_head, - &reserve_cycle, &reserve_space); - xlog_crack_grant_head(&log->l_grant_write_head, - &write_cycle, &write_space); - - if (reserve_cycle == write_cycle) { - if (equals) - ASSERT(reserve_space >= write_space); - else - ASSERT(reserve_space > write_space); - } else { - ASSERT(reserve_cycle - 1 == write_cycle); - ASSERT(write_space >= reserve_space); - } -} - STATIC void xlog_verify_grant_tail( struct log *log) diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h index befb2fc5b027..d5f8be8f4bf6 100644 --- a/fs/xfs/xfs_log_priv.h +++ b/fs/xfs/xfs_log_priv.h @@ -510,9 +510,6 @@ typedef struct log { int l_curr_block; /* current logical log block */ int l_prev_block; /* previous logical log block */ - /* The following block of fields are changed while holding grant_lock */ - spinlock_t l_grant_lock ____cacheline_aligned_in_smp; - /* * l_last_sync_lsn and l_tail_lsn are atomics so they can be set and * read without needing to hold specific locks. To avoid operations @@ -599,23 +596,33 @@ xlog_assign_atomic_lsn(atomic64_t *lsn, uint cycle, uint block) } /* - * When we crack the grrant head, we sample it first so that the value will not + * When we crack the grant head, we sample it first so that the value will not * change while we are cracking it into the component values. This means we * will always get consistent component values to work from. */ static inline void -xlog_crack_grant_head(atomic64_t *head, int *cycle, int *space) +xlog_crack_grant_head_val(int64_t val, int *cycle, int *space) { - int64_t val = atomic64_read(head); - *cycle = val >> 32; *space = val & 0xffffffff; } +static inline void +xlog_crack_grant_head(atomic64_t *head, int *cycle, int *space) +{ + xlog_crack_grant_head_val(atomic64_read(head), cycle, space); +} + +static inline int64_t +xlog_assign_grant_head_val(int cycle, int space) +{ + return ((int64_t)cycle << 32) | space; +} + static inline void xlog_assign_grant_head(atomic64_t *head, int cycle, int space) { - atomic64_set(head, ((int64_t)cycle << 32) | space); + atomic64_set(head, xlog_assign_grant_head_val(cycle, space)); } /* -- cgit v1.2.2 From c8b031ebc1246d42463c5c69df8f610ca9f48e77 Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Thu, 9 Dec 2010 15:53:28 +0100 Subject: NFS: Remove redundant unlikely() IS_ERR() already implies unlikely(), so it can be omitted here. Signed-off-by: Tobias Klauser Signed-off-by: Trond Myklebust --- fs/nfs/mount_clnt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c index 697e07235f30..d4c2d6b7507e 100644 --- a/fs/nfs/mount_clnt.c +++ b/fs/nfs/mount_clnt.c @@ -246,7 +246,7 @@ void nfs_umount(const struct nfs_mount_request *info) args.flags |= RPC_CLNT_CREATE_NONPRIVPORT; clnt = rpc_create(&args); - if (unlikely(IS_ERR(clnt))) + if (IS_ERR(clnt)) goto out_clnt_err; dprintk("NFS: sending UMNT request for %s:%s\n", -- cgit v1.2.2 From 72895b1ac7baeda76835cddb3edb019a90d32bcb Mon Sep 17 00:00:00 2001 From: Jesper Juhl Date: Thu, 9 Dec 2010 23:17:15 +0100 Subject: nfs: Take advantage of kmem_cache_zalloc() in nfs_page_alloc() Take advantage of kmem_cache_zalloc() in nfs_page_alloc(). Save a call to memset() and a few bytes. Before: [jj@dragon linux-2.6]$ size fs/nfs/pagelist.o text data bss dec hex filename 1765 0 8 1773 6ed fs/nfs/pagelist.o After: [jj@dragon linux-2.6]$ size fs/nfs/pagelist.o text data bss dec hex filename 1749 0 8 1757 6dd fs/nfs/pagelist.o Signed-off-by: Jesper Juhl Signed-off-by: Trond Myklebust --- fs/nfs/pagelist.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index b68536cc9046..e1164e3f9e69 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -26,12 +26,9 @@ static struct kmem_cache *nfs_page_cachep; static inline struct nfs_page * nfs_page_alloc(void) { - struct nfs_page *p; - p = kmem_cache_alloc(nfs_page_cachep, GFP_KERNEL); - if (p) { - memset(p, 0, sizeof(*p)); + struct nfs_page *p = kmem_cache_zalloc(nfs_page_cachep, GFP_KERNEL); + if (p) INIT_LIST_HEAD(&p->wb_list); - } return p; } -- cgit v1.2.2 From 611c96c8f728c4bcdbadaa2387942d3c0641cadf Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Mon, 13 Dec 2010 19:05:46 -0500 Subject: nfs4: fix units bug causing hang on recovery Note that cl_lease_time is in jiffies. This can cause a very long wait in the NFS4ERR_CLID_INUSE case. Signed-off-by: J. Bruce Fields Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 4435e5e1f904..494c68739ff4 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -3517,7 +3517,7 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, if (signalled()) break; if (loop++ & 1) - ssleep(clp->cl_lease_time + 1); + ssleep(clp->cl_lease_time / HZ + 1); else if (++clp->cl_id_uniquifier == 0) break; -- cgit v1.2.2 From aa69947399a119d7f1b35bbcd62c849839b35449 Mon Sep 17 00:00:00 2001 From: Stanislav Kinsbursky Date: Wed, 8 Dec 2010 12:40:13 +0300 Subject: NFS: suppressing showing of default mount port value in /proc fixed Update: added check for zero value as it was before (note: can't simply check mountd_port for positive value because it's typeof unsigned short) Default value for mount server port is set to NFS_UNSPEC_PORT (-1) and will not be changed during parsing mount options for mound data version 6. This default value will be showed for mountport in /proc/mounts always since current default check is for zero value. This small mistake leads to big problem, because during umount.nfs execution from old user-space utils (at least nfs-utils 1.0.9) this value will be used as the server port to connect to. This request will be rejected (since port is 65535) and thus nfs mount point can't be unmounted. Note from Chuck Lever (chuck.lever@oracle.com): this is only possible if /etc/mtab is a link to /proc/mounts. Not all systems have this configuration. Signed-off-by: Stanislav Kinsbursky Signed-off-by: Trond Myklebust --- fs/nfs/super.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 4100630c9a5b..dd56eec16eac 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -598,7 +598,9 @@ static void nfs_show_mountd_options(struct seq_file *m, struct nfs_server *nfss, if (nfss->mountd_version || showdefaults) seq_printf(m, ",mountvers=%u", nfss->mountd_version); - if (nfss->mountd_port || showdefaults) + if ((nfss->mountd_port && + nfss->mountd_port != (unsigned short)NFS_UNSPEC_PORT) || + showdefaults) seq_printf(m, ",mountport=%u", nfss->mountd_port); nfs_show_mountd_netid(m, nfss, showdefaults); -- cgit v1.2.2 From 1174dd1f890b7f8be8ec6a7071657fe8f59e18b7 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 21 Dec 2010 10:52:24 -0500 Subject: NFSv4: Convert a few commas into semicolons... Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 16 ++++++++-------- fs/nfs/unlink.c | 2 +- 2 files changed, 9 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 494c68739ff4..78b08993a38b 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -1984,8 +1984,8 @@ int nfs4_do_close(struct path *path, struct nfs4_state *state, gfp_t gfp_mask, i path_get(path); calldata->path = *path; - msg.rpc_argp = &calldata->arg, - msg.rpc_resp = &calldata->res, + msg.rpc_argp = &calldata->arg; + msg.rpc_resp = &calldata->res; task_setup_data.callback_data = calldata; task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) @@ -3663,8 +3663,8 @@ static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, co data->rpc_status = 0; task_setup_data.callback_data = data; - msg.rpc_argp = &data->args, - msg.rpc_resp = &data->res, + msg.rpc_argp = &data->args; + msg.rpc_resp = &data->res; task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) return PTR_ERR(task); @@ -3908,8 +3908,8 @@ static struct rpc_task *nfs4_do_unlck(struct file_lock *fl, return ERR_PTR(-ENOMEM); } - msg.rpc_argp = &data->arg, - msg.rpc_resp = &data->res, + msg.rpc_argp = &data->arg; + msg.rpc_resp = &data->res; task_setup_data.callback_data = data; return rpc_run_task(&task_setup_data); } @@ -4145,8 +4145,8 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f data->arg.reclaim = NFS_LOCK_RECLAIM; task_setup_data.callback_ops = &nfs4_recover_lock_ops; } - msg.rpc_argp = &data->arg, - msg.rpc_resp = &data->res, + msg.rpc_argp = &data->arg; + msg.rpc_resp = &data->res; task_setup_data.callback_data = data; task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c index 7bdec8531400..3bf1e53c4a3f 100644 --- a/fs/nfs/unlink.c +++ b/fs/nfs/unlink.c @@ -429,7 +429,7 @@ nfs_async_rename(struct inode *old_dir, struct inode *new_dir, data = kzalloc(sizeof(*data), GFP_KERNEL); if (data == NULL) return ERR_PTR(-ENOMEM); - task_setup_data.callback_data = data, + task_setup_data.callback_data = data; data->cred = rpc_lookup_cred(); if (IS_ERR(data->cred)) { -- cgit v1.2.2 From 8844355df7f4e091b03cc131e1549631238b397b Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Mon, 25 Oct 2010 15:11:43 +0800 Subject: btrfs: Fix bugs in zlib workspace - Fix a race that can result in alloc_workspace > cpus. - Fix to check num_workspace after wakeup. Signed-off-by: Li Zefan --- fs/btrfs/zlib.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index b9cd5445f71c..e5b8b22e07d6 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c @@ -75,16 +75,19 @@ again: return workspace; } - spin_unlock(&workspace_lock); if (atomic_read(&alloc_workspace) > cpus) { DEFINE_WAIT(wait); + + spin_unlock(&workspace_lock); prepare_to_wait(&workspace_wait, &wait, TASK_UNINTERRUPTIBLE); - if (atomic_read(&alloc_workspace) > cpus) + if (atomic_read(&alloc_workspace) > cpus && !num_workspace) schedule(); finish_wait(&workspace_wait, &wait); goto again; } atomic_inc(&alloc_workspace); + spin_unlock(&workspace_lock); + workspace = kzalloc(sizeof(*workspace), GFP_NOFS); if (!workspace) { ret = -ENOMEM; -- cgit v1.2.2 From 4b72029dc3fd6ba7dc45ccd1cf0aa0ebfa209bd3 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Tue, 9 Nov 2010 08:27:27 +0800 Subject: btrfs: Fix error handling in zlib Return failure if alloc_page() fails to allocate memory, and the upper code will just give up compression. Signed-off-by: Li Zefan --- fs/btrfs/zlib.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'fs') diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index e5b8b22e07d6..b01558661e3b 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c @@ -225,6 +225,10 @@ int btrfs_zlib_compress_pages(struct address_space *mapping, data_in = kmap(in_page); out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); + if (out_page == NULL) { + ret = -1; + goto out; + } cpage_out = kmap(out_page); pages[0] = out_page; nr_pages = 1; @@ -263,6 +267,10 @@ int btrfs_zlib_compress_pages(struct address_space *mapping, goto out; } out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); + if (out_page == NULL) { + ret = -1; + goto out; + } cpage_out = kmap(out_page); pages[nr_pages] = out_page; nr_pages++; -- cgit v1.2.2 From 261507a02ccba9afda919852263b6bc1581ce1ef Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Fri, 17 Dec 2010 14:21:50 +0800 Subject: btrfs: Allow to add new compression algorithm Make the code aware of compression type, instead of always assuming zlib compression. Also make the zlib workspace function as common code for all compression types. Signed-off-by: Li Zefan --- fs/btrfs/btrfs_inode.h | 2 +- fs/btrfs/compression.c | 236 +++++++++++++++++++++++++++++++++++++++++++- fs/btrfs/compression.h | 66 +++++++++---- fs/btrfs/ctree.h | 10 +- fs/btrfs/extent_io.c | 5 +- fs/btrfs/extent_io.h | 17 +++- fs/btrfs/extent_map.c | 2 + fs/btrfs/extent_map.h | 3 +- fs/btrfs/file.c | 2 + fs/btrfs/inode.c | 82 ++++++++++------ fs/btrfs/ioctl.c | 4 +- fs/btrfs/ordered-data.c | 18 +++- fs/btrfs/ordered-data.h | 8 +- fs/btrfs/super.c | 47 ++++++--- fs/btrfs/zlib.c | 253 ++++++++++-------------------------------------- 15 files changed, 473 insertions(+), 282 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index 6ad63f17eca0..ccc991c542df 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h @@ -157,7 +157,7 @@ struct btrfs_inode { /* * always compress this one file */ - unsigned force_compress:1; + unsigned force_compress:4; struct inode vfs_inode; }; diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index b50bc4bd5c56..6638c9877720 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -62,6 +62,9 @@ struct compressed_bio { /* number of bytes on disk */ unsigned long compressed_len; + /* the compression algorithm for this bio */ + int compress_type; + /* number of compressed pages in the array */ unsigned long nr_pages; @@ -173,11 +176,12 @@ static void end_compressed_bio_read(struct bio *bio, int err) /* ok, we're the last bio for this extent, lets start * the decompression. */ - ret = btrfs_zlib_decompress_biovec(cb->compressed_pages, - cb->start, - cb->orig_bio->bi_io_vec, - cb->orig_bio->bi_vcnt, - cb->compressed_len); + ret = btrfs_decompress_biovec(cb->compress_type, + cb->compressed_pages, + cb->start, + cb->orig_bio->bi_io_vec, + cb->orig_bio->bi_vcnt, + cb->compressed_len); csum_failed: if (ret) cb->errors = 1; @@ -588,6 +592,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, cb->len = uncompressed_len; cb->compressed_len = compressed_len; + cb->compress_type = extent_compress_type(bio_flags); cb->orig_bio = bio; nr_pages = (compressed_len + PAGE_CACHE_SIZE - 1) / @@ -677,3 +682,224 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, bio_put(comp_bio); return 0; } + +static struct list_head comp_idle_workspace[BTRFS_COMPRESS_TYPES]; +static spinlock_t comp_workspace_lock[BTRFS_COMPRESS_TYPES]; +static int comp_num_workspace[BTRFS_COMPRESS_TYPES]; +static atomic_t comp_alloc_workspace[BTRFS_COMPRESS_TYPES]; +static wait_queue_head_t comp_workspace_wait[BTRFS_COMPRESS_TYPES]; + +struct btrfs_compress_op *btrfs_compress_op[] = { + &btrfs_zlib_compress, +}; + +int __init btrfs_init_compress(void) +{ + int i; + + for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) { + INIT_LIST_HEAD(&comp_idle_workspace[i]); + spin_lock_init(&comp_workspace_lock[i]); + atomic_set(&comp_alloc_workspace[i], 0); + init_waitqueue_head(&comp_workspace_wait[i]); + } + return 0; +} + +/* + * this finds an available workspace or allocates a new one + * ERR_PTR is returned if things go bad. + */ +static struct list_head *find_workspace(int type) +{ + struct list_head *workspace; + int cpus = num_online_cpus(); + int idx = type - 1; + + struct list_head *idle_workspace = &comp_idle_workspace[idx]; + spinlock_t *workspace_lock = &comp_workspace_lock[idx]; + atomic_t *alloc_workspace = &comp_alloc_workspace[idx]; + wait_queue_head_t *workspace_wait = &comp_workspace_wait[idx]; + int *num_workspace = &comp_num_workspace[idx]; +again: + spin_lock(workspace_lock); + if (!list_empty(idle_workspace)) { + workspace = idle_workspace->next; + list_del(workspace); + (*num_workspace)--; + spin_unlock(workspace_lock); + return workspace; + + } + if (atomic_read(alloc_workspace) > cpus) { + DEFINE_WAIT(wait); + + spin_unlock(workspace_lock); + prepare_to_wait(workspace_wait, &wait, TASK_UNINTERRUPTIBLE); + if (atomic_read(alloc_workspace) > cpus && !*num_workspace) + schedule(); + finish_wait(workspace_wait, &wait); + goto again; + } + atomic_inc(alloc_workspace); + spin_unlock(workspace_lock); + + workspace = btrfs_compress_op[idx]->alloc_workspace(); + if (IS_ERR(workspace)) { + atomic_dec(alloc_workspace); + wake_up(workspace_wait); + } + return workspace; +} + +/* + * put a workspace struct back on the list or free it if we have enough + * idle ones sitting around + */ +static void free_workspace(int type, struct list_head *workspace) +{ + int idx = type - 1; + struct list_head *idle_workspace = &comp_idle_workspace[idx]; + spinlock_t *workspace_lock = &comp_workspace_lock[idx]; + atomic_t *alloc_workspace = &comp_alloc_workspace[idx]; + wait_queue_head_t *workspace_wait = &comp_workspace_wait[idx]; + int *num_workspace = &comp_num_workspace[idx]; + + spin_lock(workspace_lock); + if (*num_workspace < num_online_cpus()) { + list_add_tail(workspace, idle_workspace); + (*num_workspace)++; + spin_unlock(workspace_lock); + goto wake; + } + spin_unlock(workspace_lock); + + btrfs_compress_op[idx]->free_workspace(workspace); + atomic_dec(alloc_workspace); +wake: + if (waitqueue_active(workspace_wait)) + wake_up(workspace_wait); +} + +/* + * cleanup function for module exit + */ +static void free_workspaces(void) +{ + struct list_head *workspace; + int i; + + for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) { + while (!list_empty(&comp_idle_workspace[i])) { + workspace = comp_idle_workspace[i].next; + list_del(workspace); + btrfs_compress_op[i]->free_workspace(workspace); + atomic_dec(&comp_alloc_workspace[i]); + } + } +} + +/* + * given an address space and start/len, compress the bytes. + * + * pages are allocated to hold the compressed result and stored + * in 'pages' + * + * out_pages is used to return the number of pages allocated. There + * may be pages allocated even if we return an error + * + * total_in is used to return the number of bytes actually read. It + * may be smaller then len if we had to exit early because we + * ran out of room in the pages array or because we cross the + * max_out threshold. + * + * total_out is used to return the total number of compressed bytes + * + * max_out tells us the max number of bytes that we're allowed to + * stuff into pages + */ +int btrfs_compress_pages(int type, struct address_space *mapping, + u64 start, unsigned long len, + struct page **pages, + unsigned long nr_dest_pages, + unsigned long *out_pages, + unsigned long *total_in, + unsigned long *total_out, + unsigned long max_out) +{ + struct list_head *workspace; + int ret; + + workspace = find_workspace(type); + if (IS_ERR(workspace)) + return -1; + + ret = btrfs_compress_op[type-1]->compress_pages(workspace, mapping, + start, len, pages, + nr_dest_pages, out_pages, + total_in, total_out, + max_out); + free_workspace(type, workspace); + return ret; +} + +/* + * pages_in is an array of pages with compressed data. + * + * disk_start is the starting logical offset of this array in the file + * + * bvec is a bio_vec of pages from the file that we want to decompress into + * + * vcnt is the count of pages in the biovec + * + * srclen is the number of bytes in pages_in + * + * The basic idea is that we have a bio that was created by readpages. + * The pages in the bio are for the uncompressed data, and they may not + * be contiguous. They all correspond to the range of bytes covered by + * the compressed extent. + */ +int btrfs_decompress_biovec(int type, struct page **pages_in, u64 disk_start, + struct bio_vec *bvec, int vcnt, size_t srclen) +{ + struct list_head *workspace; + int ret; + + workspace = find_workspace(type); + if (IS_ERR(workspace)) + return -ENOMEM; + + ret = btrfs_compress_op[type-1]->decompress_biovec(workspace, pages_in, + disk_start, + bvec, vcnt, srclen); + free_workspace(type, workspace); + return ret; +} + +/* + * a less complex decompression routine. Our compressed data fits in a + * single page, and we want to read a single page out of it. + * start_byte tells us the offset into the compressed data we're interested in + */ +int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page, + unsigned long start_byte, size_t srclen, size_t destlen) +{ + struct list_head *workspace; + int ret; + + workspace = find_workspace(type); + if (IS_ERR(workspace)) + return -ENOMEM; + + ret = btrfs_compress_op[type-1]->decompress(workspace, data_in, + dest_page, start_byte, + srclen, destlen); + + free_workspace(type, workspace); + return ret; +} + +void __exit btrfs_exit_compress(void) +{ + free_workspaces(); +} diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index 421f5b4aa715..9b5f2f365b79 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h @@ -19,24 +19,22 @@ #ifndef __BTRFS_COMPRESSION_ #define __BTRFS_COMPRESSION_ -int btrfs_zlib_decompress(unsigned char *data_in, - struct page *dest_page, - unsigned long start_byte, - size_t srclen, size_t destlen); -int btrfs_zlib_compress_pages(struct address_space *mapping, - u64 start, unsigned long len, - struct page **pages, - unsigned long nr_dest_pages, - unsigned long *out_pages, - unsigned long *total_in, - unsigned long *total_out, - unsigned long max_out); -int btrfs_zlib_decompress_biovec(struct page **pages_in, - u64 disk_start, - struct bio_vec *bvec, - int vcnt, - size_t srclen); -void btrfs_zlib_exit(void); +int btrfs_init_compress(void); +void btrfs_exit_compress(void); + +int btrfs_compress_pages(int type, struct address_space *mapping, + u64 start, unsigned long len, + struct page **pages, + unsigned long nr_dest_pages, + unsigned long *out_pages, + unsigned long *total_in, + unsigned long *total_out, + unsigned long max_out); +int btrfs_decompress_biovec(int type, struct page **pages_in, u64 disk_start, + struct bio_vec *bvec, int vcnt, size_t srclen); +int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page, + unsigned long start_byte, size_t srclen, size_t destlen); + int btrfs_submit_compressed_write(struct inode *inode, u64 start, unsigned long len, u64 disk_start, unsigned long compressed_len, @@ -44,4 +42,36 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, unsigned long nr_pages); int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, int mirror_num, unsigned long bio_flags); + +struct btrfs_compress_op { + struct list_head *(*alloc_workspace)(void); + + void (*free_workspace)(struct list_head *workspace); + + int (*compress_pages)(struct list_head *workspace, + struct address_space *mapping, + u64 start, unsigned long len, + struct page **pages, + unsigned long nr_dest_pages, + unsigned long *out_pages, + unsigned long *total_in, + unsigned long *total_out, + unsigned long max_out); + + int (*decompress_biovec)(struct list_head *workspace, + struct page **pages_in, + u64 disk_start, + struct bio_vec *bvec, + int vcnt, + size_t srclen); + + int (*decompress)(struct list_head *workspace, + unsigned char *data_in, + struct page *dest_page, + unsigned long start_byte, + size_t srclen, size_t destlen); +}; + +extern struct btrfs_compress_op btrfs_zlib_compress; + #endif diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index af52f6d7a4d8..e06534438592 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -551,9 +551,10 @@ struct btrfs_timespec { } __attribute__ ((__packed__)); enum btrfs_compression_type { - BTRFS_COMPRESS_NONE = 0, - BTRFS_COMPRESS_ZLIB = 1, - BTRFS_COMPRESS_LAST = 2, + BTRFS_COMPRESS_NONE = 0, + BTRFS_COMPRESS_ZLIB = 1, + BTRFS_COMPRESS_TYPES = 1, + BTRFS_COMPRESS_LAST = 2, }; struct btrfs_inode_item { @@ -895,7 +896,8 @@ struct btrfs_fs_info { */ u64 last_trans_log_full_commit; u64 open_ioctl_trans; - unsigned long mount_opt; + unsigned long mount_opt:20; + unsigned long compress_type:4; u64 max_inline; u64 alloc_start; struct btrfs_transaction *running_transaction; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 5e7a94d7da89..f1d198128959 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2028,8 +2028,11 @@ static int __extent_read_full_page(struct extent_io_tree *tree, BUG_ON(extent_map_end(em) <= cur); BUG_ON(end < cur); - if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) + if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { this_bio_flag = EXTENT_BIO_COMPRESSED; + extent_set_compress_type(&this_bio_flag, + em->compress_type); + } iosize = min(extent_map_end(em) - cur, end - cur + 1); cur_end = min(extent_map_end(em) - 1, end); diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 4183c8178f01..7083cfafd061 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -20,8 +20,12 @@ #define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK) #define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING | EXTENT_FIRST_DELALLOC) -/* flags for bio submission */ +/* + * flags for bio submission. The high bits indicate the compression + * type for this bio + */ #define EXTENT_BIO_COMPRESSED 1 +#define EXTENT_BIO_FLAG_SHIFT 16 /* these are bit numbers for test/set bit */ #define EXTENT_BUFFER_UPTODATE 0 @@ -135,6 +139,17 @@ struct extent_buffer { wait_queue_head_t lock_wq; }; +static inline void extent_set_compress_type(unsigned long *bio_flags, + int compress_type) +{ + *bio_flags |= compress_type << EXTENT_BIO_FLAG_SHIFT; +} + +static inline int extent_compress_type(unsigned long bio_flags) +{ + return bio_flags >> EXTENT_BIO_FLAG_SHIFT; +} + struct extent_map_tree; static inline struct extent_state *extent_state_next(struct extent_state *state) diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index 23cb8da3ff66..b0e1fce12530 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c @@ -3,6 +3,7 @@ #include #include #include +#include "ctree.h" #include "extent_map.h" @@ -54,6 +55,7 @@ struct extent_map *alloc_extent_map(gfp_t mask) return em; em->in_tree = 0; em->flags = 0; + em->compress_type = BTRFS_COMPRESS_NONE; atomic_set(&em->refs, 1); return em; } diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h index ab6d74b6e647..28b44dbd1e35 100644 --- a/fs/btrfs/extent_map.h +++ b/fs/btrfs/extent_map.h @@ -26,7 +26,8 @@ struct extent_map { unsigned long flags; struct block_device *bdev; atomic_t refs; - int in_tree; + unsigned int in_tree:1; + unsigned int compress_type:4; }; struct extent_map_tree { diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 66836d85763b..05df688c96f4 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -224,6 +224,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, split->bdev = em->bdev; split->flags = flags; + split->compress_type = em->compress_type; ret = add_extent_mapping(em_tree, split); BUG_ON(ret); free_extent_map(split); @@ -238,6 +239,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, split->len = em->start + em->len - (start + len); split->bdev = em->bdev; split->flags = flags; + split->compress_type = em->compress_type; if (compressed) { split->block_len = em->block_len; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 5f9194438f7c..ba563b2a5d6c 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -122,10 +122,10 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans, size_t cur_size = size; size_t datasize; unsigned long offset; - int use_compress = 0; + int compress_type = BTRFS_COMPRESS_NONE; if (compressed_size && compressed_pages) { - use_compress = 1; + compress_type = root->fs_info->compress_type; cur_size = compressed_size; } @@ -159,7 +159,7 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans, btrfs_set_file_extent_ram_bytes(leaf, ei, size); ptr = btrfs_file_extent_inline_start(ei); - if (use_compress) { + if (compress_type != BTRFS_COMPRESS_NONE) { struct page *cpage; int i = 0; while (compressed_size > 0) { @@ -176,7 +176,7 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans, compressed_size -= cur_size; } btrfs_set_file_extent_compression(leaf, ei, - BTRFS_COMPRESS_ZLIB); + compress_type); } else { page = find_get_page(inode->i_mapping, start >> PAGE_CACHE_SHIFT); @@ -263,6 +263,7 @@ struct async_extent { u64 compressed_size; struct page **pages; unsigned long nr_pages; + int compress_type; struct list_head list; }; @@ -280,7 +281,8 @@ static noinline int add_async_extent(struct async_cow *cow, u64 start, u64 ram_size, u64 compressed_size, struct page **pages, - unsigned long nr_pages) + unsigned long nr_pages, + int compress_type) { struct async_extent *async_extent; @@ -290,6 +292,7 @@ static noinline int add_async_extent(struct async_cow *cow, async_extent->compressed_size = compressed_size; async_extent->pages = pages; async_extent->nr_pages = nr_pages; + async_extent->compress_type = compress_type; list_add_tail(&async_extent->list, &cow->extents); return 0; } @@ -332,6 +335,7 @@ static noinline int compress_file_range(struct inode *inode, unsigned long max_uncompressed = 128 * 1024; int i; int will_compress; + int compress_type = root->fs_info->compress_type; actual_end = min_t(u64, isize, end + 1); again: @@ -381,12 +385,16 @@ again: WARN_ON(pages); pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS); - ret = btrfs_zlib_compress_pages(inode->i_mapping, start, - total_compressed, pages, - nr_pages, &nr_pages_ret, - &total_in, - &total_compressed, - max_compressed); + if (BTRFS_I(inode)->force_compress) + compress_type = BTRFS_I(inode)->force_compress; + + ret = btrfs_compress_pages(compress_type, + inode->i_mapping, start, + total_compressed, pages, + nr_pages, &nr_pages_ret, + &total_in, + &total_compressed, + max_compressed); if (!ret) { unsigned long offset = total_compressed & @@ -493,7 +501,8 @@ again: * and will submit them to the elevator. */ add_async_extent(async_cow, start, num_bytes, - total_compressed, pages, nr_pages_ret); + total_compressed, pages, nr_pages_ret, + compress_type); if (start + num_bytes < end) { start += num_bytes; @@ -515,7 +524,8 @@ cleanup_and_bail_uncompressed: __set_page_dirty_nobuffers(locked_page); /* unlocked later on in the async handlers */ } - add_async_extent(async_cow, start, end - start + 1, 0, NULL, 0); + add_async_extent(async_cow, start, end - start + 1, + 0, NULL, 0, BTRFS_COMPRESS_NONE); *num_added += 1; } @@ -640,6 +650,7 @@ retry: em->block_start = ins.objectid; em->block_len = ins.offset; em->bdev = root->fs_info->fs_devices->latest_bdev; + em->compress_type = async_extent->compress_type; set_bit(EXTENT_FLAG_PINNED, &em->flags); set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); @@ -656,11 +667,13 @@ retry: async_extent->ram_size - 1, 0); } - ret = btrfs_add_ordered_extent(inode, async_extent->start, - ins.objectid, - async_extent->ram_size, - ins.offset, - BTRFS_ORDERED_COMPRESSED); + ret = btrfs_add_ordered_extent_compress(inode, + async_extent->start, + ins.objectid, + async_extent->ram_size, + ins.offset, + BTRFS_ORDERED_COMPRESSED, + async_extent->compress_type); BUG_ON(ret); /* @@ -1670,7 +1683,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) struct btrfs_ordered_extent *ordered_extent = NULL; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; struct extent_state *cached_state = NULL; - int compressed = 0; + int compress_type = 0; int ret; bool nolock = false; @@ -1711,9 +1724,9 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) trans->block_rsv = &root->fs_info->delalloc_block_rsv; if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags)) - compressed = 1; + compress_type = ordered_extent->compress_type; if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { - BUG_ON(compressed); + BUG_ON(compress_type); ret = btrfs_mark_extent_written(trans, inode, ordered_extent->file_offset, ordered_extent->file_offset + @@ -1727,7 +1740,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) ordered_extent->disk_len, ordered_extent->len, ordered_extent->len, - compressed, 0, 0, + compress_type, 0, 0, BTRFS_FILE_EXTENT_REG); unpin_extent_cache(&BTRFS_I(inode)->extent_tree, ordered_extent->file_offset, @@ -1829,6 +1842,8 @@ static int btrfs_io_failed_hook(struct bio *failed_bio, if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { logical = em->block_start; failrec->bio_flags = EXTENT_BIO_COMPRESSED; + extent_set_compress_type(&failrec->bio_flags, + em->compress_type); } failrec->logical = logical; free_extent_map(em); @@ -4930,8 +4945,10 @@ static noinline int uncompress_inline(struct btrfs_path *path, size_t max_size; unsigned long inline_size; unsigned long ptr; + int compress_type; WARN_ON(pg_offset != 0); + compress_type = btrfs_file_extent_compression(leaf, item); max_size = btrfs_file_extent_ram_bytes(leaf, item); inline_size = btrfs_file_extent_inline_item_len(leaf, btrfs_item_nr(leaf, path->slots[0])); @@ -4941,8 +4958,8 @@ static noinline int uncompress_inline(struct btrfs_path *path, read_extent_buffer(leaf, tmp, ptr, inline_size); max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size); - ret = btrfs_zlib_decompress(tmp, page, extent_offset, - inline_size, max_size); + ret = btrfs_decompress(compress_type, tmp, page, + extent_offset, inline_size, max_size); if (ret) { char *kaddr = kmap_atomic(page, KM_USER0); unsigned long copy_size = min_t(u64, @@ -4984,7 +5001,7 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; struct btrfs_trans_handle *trans = NULL; - int compressed; + int compress_type; again: read_lock(&em_tree->lock); @@ -5043,7 +5060,7 @@ again: found_type = btrfs_file_extent_type(leaf, item); extent_start = found_key.offset; - compressed = btrfs_file_extent_compression(leaf, item); + compress_type = btrfs_file_extent_compression(leaf, item); if (found_type == BTRFS_FILE_EXTENT_REG || found_type == BTRFS_FILE_EXTENT_PREALLOC) { extent_end = extent_start + @@ -5089,8 +5106,9 @@ again: em->block_start = EXTENT_MAP_HOLE; goto insert; } - if (compressed) { + if (compress_type != BTRFS_COMPRESS_NONE) { set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); + em->compress_type = compress_type; em->block_start = bytenr; em->block_len = btrfs_file_extent_disk_num_bytes(leaf, item); @@ -5124,12 +5142,14 @@ again: em->len = (copy_size + root->sectorsize - 1) & ~((u64)root->sectorsize - 1); em->orig_start = EXTENT_MAP_INLINE; - if (compressed) + if (compress_type) { set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); + em->compress_type = compress_type; + } ptr = btrfs_file_extent_inline_start(item) + extent_offset; if (create == 0 && !PageUptodate(page)) { - if (btrfs_file_extent_compression(leaf, item) == - BTRFS_COMPRESS_ZLIB) { + if (btrfs_file_extent_compression(leaf, item) != + BTRFS_COMPRESS_NONE) { ret = uncompress_inline(path, inode, page, pg_offset, extent_offset, item); @@ -6479,7 +6499,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb) ei->ordered_data_close = 0; ei->orphan_meta_reserved = 0; ei->dummy_inode = 0; - ei->force_compress = 0; + ei->force_compress = BTRFS_COMPRESS_NONE; inode = &ei->vfs_inode; extent_map_tree_init(&ei->extent_tree, GFP_NOFS); diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index f87552a1d7ea..8cb86d4d763c 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -683,7 +683,7 @@ static int btrfs_defrag_file(struct file *file, total_read++; mutex_lock(&inode->i_mutex); if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS) - BTRFS_I(inode)->force_compress = 1; + BTRFS_I(inode)->force_compress = BTRFS_COMPRESS_ZLIB; ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE); if (ret) @@ -781,7 +781,7 @@ loop_unlock: atomic_dec(&root->fs_info->async_submit_draining); mutex_lock(&inode->i_mutex); - BTRFS_I(inode)->force_compress = 0; + BTRFS_I(inode)->force_compress = BTRFS_COMPRESS_NONE; mutex_unlock(&inode->i_mutex); } diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index ae7737e352c9..2b61e1ddcd99 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -172,7 +172,7 @@ static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree, */ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, u64 start, u64 len, u64 disk_len, - int type, int dio) + int type, int dio, int compress_type) { struct btrfs_ordered_inode_tree *tree; struct rb_node *node; @@ -189,6 +189,7 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, entry->disk_len = disk_len; entry->bytes_left = len; entry->inode = inode; + entry->compress_type = compress_type; if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE) set_bit(type, &entry->flags); @@ -220,14 +221,25 @@ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, u64 start, u64 len, u64 disk_len, int type) { return __btrfs_add_ordered_extent(inode, file_offset, start, len, - disk_len, type, 0); + disk_len, type, 0, + BTRFS_COMPRESS_NONE); } int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset, u64 start, u64 len, u64 disk_len, int type) { return __btrfs_add_ordered_extent(inode, file_offset, start, len, - disk_len, type, 1); + disk_len, type, 1, + BTRFS_COMPRESS_NONE); +} + +int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset, + u64 start, u64 len, u64 disk_len, + int type, int compress_type) +{ + return __btrfs_add_ordered_extent(inode, file_offset, start, len, + disk_len, type, 0, + compress_type); } /* diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h index 61dca83119dd..ff1f69aa1883 100644 --- a/fs/btrfs/ordered-data.h +++ b/fs/btrfs/ordered-data.h @@ -68,7 +68,7 @@ struct btrfs_ordered_sum { #define BTRFS_ORDERED_NOCOW 2 /* set when we want to write in place */ -#define BTRFS_ORDERED_COMPRESSED 3 /* writing a compressed extent */ +#define BTRFS_ORDERED_COMPRESSED 3 /* writing a zlib compressed extent */ #define BTRFS_ORDERED_PREALLOC 4 /* set when writing to prealloced extent */ @@ -93,6 +93,9 @@ struct btrfs_ordered_extent { /* flags (described above) */ unsigned long flags; + /* compression algorithm */ + int compress_type; + /* reference count */ atomic_t refs; @@ -148,6 +151,9 @@ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, u64 start, u64 len, u64 disk_len, int type); int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset, u64 start, u64 len, u64 disk_len, int type); +int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset, + u64 start, u64 len, u64 disk_len, + int type, int compress_type); int btrfs_add_ordered_sum(struct inode *inode, struct btrfs_ordered_extent *entry, struct btrfs_ordered_sum *sum); diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 61bd79abb805..f348f2b93164 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -69,9 +69,9 @@ enum { Opt_degraded, Opt_subvol, Opt_subvolid, Opt_device, Opt_nodatasum, Opt_nodatacow, Opt_max_inline, Opt_alloc_start, Opt_nobarrier, Opt_ssd, Opt_nossd, Opt_ssd_spread, Opt_thread_pool, Opt_noacl, Opt_compress, - Opt_compress_force, Opt_notreelog, Opt_ratio, Opt_flushoncommit, - Opt_discard, Opt_space_cache, Opt_clear_cache, Opt_err, - Opt_user_subvol_rm_allowed, + Opt_compress_type, Opt_compress_force, Opt_compress_force_type, + Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard, + Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed, Opt_err, }; static match_table_t tokens = { @@ -86,7 +86,9 @@ static match_table_t tokens = { {Opt_alloc_start, "alloc_start=%s"}, {Opt_thread_pool, "thread_pool=%d"}, {Opt_compress, "compress"}, + {Opt_compress_type, "compress=%s"}, {Opt_compress_force, "compress-force"}, + {Opt_compress_force_type, "compress-force=%s"}, {Opt_ssd, "ssd"}, {Opt_ssd_spread, "ssd_spread"}, {Opt_nossd, "nossd"}, @@ -112,6 +114,8 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) char *p, *num, *orig; int intarg; int ret = 0; + char *compress_type; + bool compress_force = false; if (!options) return 0; @@ -154,14 +158,29 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) btrfs_set_opt(info->mount_opt, NODATACOW); btrfs_set_opt(info->mount_opt, NODATASUM); break; - case Opt_compress: - printk(KERN_INFO "btrfs: use compression\n"); - btrfs_set_opt(info->mount_opt, COMPRESS); - break; case Opt_compress_force: - printk(KERN_INFO "btrfs: forcing compression\n"); - btrfs_set_opt(info->mount_opt, FORCE_COMPRESS); + case Opt_compress_force_type: + compress_force = true; + case Opt_compress: + case Opt_compress_type: + if (token == Opt_compress || + token == Opt_compress_force || + strcmp(args[0].from, "zlib") == 0) { + compress_type = "zlib"; + info->compress_type = BTRFS_COMPRESS_ZLIB; + } else { + ret = -EINVAL; + goto out; + } + btrfs_set_opt(info->mount_opt, COMPRESS); + if (compress_force) { + btrfs_set_opt(info->mount_opt, FORCE_COMPRESS); + pr_info("btrfs: force %s compression\n", + compress_type); + } else + pr_info("btrfs: use %s compression\n", + compress_type); break; case Opt_ssd: printk(KERN_INFO "btrfs: use ssd allocation scheme\n"); @@ -898,10 +917,14 @@ static int __init init_btrfs_fs(void) if (err) return err; - err = btrfs_init_cachep(); + err = btrfs_init_compress(); if (err) goto free_sysfs; + err = btrfs_init_cachep(); + if (err) + goto free_compress; + err = extent_io_init(); if (err) goto free_cachep; @@ -929,6 +952,8 @@ free_extent_io: extent_io_exit(); free_cachep: btrfs_destroy_cachep(); +free_compress: + btrfs_exit_compress(); free_sysfs: btrfs_exit_sysfs(); return err; @@ -943,7 +968,7 @@ static void __exit exit_btrfs_fs(void) unregister_filesystem(&btrfs_fs_type); btrfs_exit_sysfs(); btrfs_cleanup_fs_uuids(); - btrfs_zlib_exit(); + btrfs_exit_compress(); } module_init(init_btrfs_fs) diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index b01558661e3b..9a3e693917f2 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c @@ -32,15 +32,6 @@ #include #include "compression.h" -/* Plan: call deflate() with avail_in == *sourcelen, - avail_out = *dstlen - 12 and flush == Z_FINISH. - If it doesn't manage to finish, call it again with - avail_in == 0 and avail_out set to the remaining 12 - bytes for it to clean up. - Q: Is 12 bytes sufficient? -*/ -#define STREAM_END_SPACE 12 - struct workspace { z_stream inf_strm; z_stream def_strm; @@ -48,155 +39,51 @@ struct workspace { struct list_head list; }; -static LIST_HEAD(idle_workspace); -static DEFINE_SPINLOCK(workspace_lock); -static unsigned long num_workspace; -static atomic_t alloc_workspace = ATOMIC_INIT(0); -static DECLARE_WAIT_QUEUE_HEAD(workspace_wait); +static void zlib_free_workspace(struct list_head *ws) +{ + struct workspace *workspace = list_entry(ws, struct workspace, list); -/* - * this finds an available zlib workspace or allocates a new one - * NULL or an ERR_PTR is returned if things go bad. - */ -static struct workspace *find_zlib_workspace(void) + vfree(workspace->def_strm.workspace); + vfree(workspace->inf_strm.workspace); + kfree(workspace->buf); + kfree(workspace); +} + +static struct list_head *zlib_alloc_workspace(void) { struct workspace *workspace; - int ret; - int cpus = num_online_cpus(); - -again: - spin_lock(&workspace_lock); - if (!list_empty(&idle_workspace)) { - workspace = list_entry(idle_workspace.next, struct workspace, - list); - list_del(&workspace->list); - num_workspace--; - spin_unlock(&workspace_lock); - return workspace; - - } - if (atomic_read(&alloc_workspace) > cpus) { - DEFINE_WAIT(wait); - - spin_unlock(&workspace_lock); - prepare_to_wait(&workspace_wait, &wait, TASK_UNINTERRUPTIBLE); - if (atomic_read(&alloc_workspace) > cpus && !num_workspace) - schedule(); - finish_wait(&workspace_wait, &wait); - goto again; - } - atomic_inc(&alloc_workspace); - spin_unlock(&workspace_lock); workspace = kzalloc(sizeof(*workspace), GFP_NOFS); - if (!workspace) { - ret = -ENOMEM; - goto fail; - } + if (!workspace) + return ERR_PTR(-ENOMEM); workspace->def_strm.workspace = vmalloc(zlib_deflate_workspacesize()); - if (!workspace->def_strm.workspace) { - ret = -ENOMEM; - goto fail; - } workspace->inf_strm.workspace = vmalloc(zlib_inflate_workspacesize()); - if (!workspace->inf_strm.workspace) { - ret = -ENOMEM; - goto fail_inflate; - } workspace->buf = kmalloc(PAGE_CACHE_SIZE, GFP_NOFS); - if (!workspace->buf) { - ret = -ENOMEM; - goto fail_kmalloc; - } - return workspace; - -fail_kmalloc: - vfree(workspace->inf_strm.workspace); -fail_inflate: - vfree(workspace->def_strm.workspace); -fail: - kfree(workspace); - atomic_dec(&alloc_workspace); - wake_up(&workspace_wait); - return ERR_PTR(ret); -} - -/* - * put a workspace struct back on the list or free it if we have enough - * idle ones sitting around - */ -static int free_workspace(struct workspace *workspace) -{ - spin_lock(&workspace_lock); - if (num_workspace < num_online_cpus()) { - list_add_tail(&workspace->list, &idle_workspace); - num_workspace++; - spin_unlock(&workspace_lock); - if (waitqueue_active(&workspace_wait)) - wake_up(&workspace_wait); - return 0; - } - spin_unlock(&workspace_lock); - vfree(workspace->def_strm.workspace); - vfree(workspace->inf_strm.workspace); - kfree(workspace->buf); - kfree(workspace); + if (!workspace->def_strm.workspace || + !workspace->inf_strm.workspace || !workspace->buf) + goto fail; - atomic_dec(&alloc_workspace); - if (waitqueue_active(&workspace_wait)) - wake_up(&workspace_wait); - return 0; -} + INIT_LIST_HEAD(&workspace->list); -/* - * cleanup function for module exit - */ -static void free_workspaces(void) -{ - struct workspace *workspace; - while (!list_empty(&idle_workspace)) { - workspace = list_entry(idle_workspace.next, struct workspace, - list); - list_del(&workspace->list); - vfree(workspace->def_strm.workspace); - vfree(workspace->inf_strm.workspace); - kfree(workspace->buf); - kfree(workspace); - atomic_dec(&alloc_workspace); - } + return &workspace->list; +fail: + zlib_free_workspace(&workspace->list); + return ERR_PTR(-ENOMEM); } -/* - * given an address space and start/len, compress the bytes. - * - * pages are allocated to hold the compressed result and stored - * in 'pages' - * - * out_pages is used to return the number of pages allocated. There - * may be pages allocated even if we return an error - * - * total_in is used to return the number of bytes actually read. It - * may be smaller then len if we had to exit early because we - * ran out of room in the pages array or because we cross the - * max_out threshold. - * - * total_out is used to return the total number of compressed bytes - * - * max_out tells us the max number of bytes that we're allowed to - * stuff into pages - */ -int btrfs_zlib_compress_pages(struct address_space *mapping, - u64 start, unsigned long len, - struct page **pages, - unsigned long nr_dest_pages, - unsigned long *out_pages, - unsigned long *total_in, - unsigned long *total_out, - unsigned long max_out) +static int zlib_compress_pages(struct list_head *ws, + struct address_space *mapping, + u64 start, unsigned long len, + struct page **pages, + unsigned long nr_dest_pages, + unsigned long *out_pages, + unsigned long *total_in, + unsigned long *total_out, + unsigned long max_out) { + struct workspace *workspace = list_entry(ws, struct workspace, list); int ret; - struct workspace *workspace; char *data_in; char *cpage_out; int nr_pages = 0; @@ -208,10 +95,6 @@ int btrfs_zlib_compress_pages(struct address_space *mapping, *total_out = 0; *total_in = 0; - workspace = find_zlib_workspace(); - if (IS_ERR(workspace)) - return -1; - if (Z_OK != zlib_deflateInit(&workspace->def_strm, 3)) { printk(KERN_WARNING "deflateInit failed\n"); ret = -1; @@ -325,35 +208,18 @@ out: kunmap(in_page); page_cache_release(in_page); } - free_workspace(workspace); return ret; } -/* - * pages_in is an array of pages with compressed data. - * - * disk_start is the starting logical offset of this array in the file - * - * bvec is a bio_vec of pages from the file that we want to decompress into - * - * vcnt is the count of pages in the biovec - * - * srclen is the number of bytes in pages_in - * - * The basic idea is that we have a bio that was created by readpages. - * The pages in the bio are for the uncompressed data, and they may not - * be contiguous. They all correspond to the range of bytes covered by - * the compressed extent. - */ -int btrfs_zlib_decompress_biovec(struct page **pages_in, - u64 disk_start, - struct bio_vec *bvec, - int vcnt, - size_t srclen) +static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in, + u64 disk_start, + struct bio_vec *bvec, + int vcnt, + size_t srclen) { + struct workspace *workspace = list_entry(ws, struct workspace, list); int ret = 0; int wbits = MAX_WBITS; - struct workspace *workspace; char *data_in; size_t total_out = 0; unsigned long page_bytes_left; @@ -371,10 +237,6 @@ int btrfs_zlib_decompress_biovec(struct page **pages_in, unsigned long current_buf_start; char *kaddr; - workspace = find_zlib_workspace(); - if (IS_ERR(workspace)) - return -ENOMEM; - data_in = kmap(pages_in[page_in_index]); workspace->inf_strm.next_in = data_in; workspace->inf_strm.avail_in = min_t(size_t, srclen, PAGE_CACHE_SIZE); @@ -400,8 +262,7 @@ int btrfs_zlib_decompress_biovec(struct page **pages_in, if (Z_OK != zlib_inflateInit2(&workspace->inf_strm, wbits)) { printk(KERN_WARNING "inflateInit failed\n"); - ret = -1; - goto out; + return -1; } while (workspace->inf_strm.total_in < srclen) { ret = zlib_inflate(&workspace->inf_strm, Z_NO_FLUSH); @@ -527,35 +388,21 @@ done: zlib_inflateEnd(&workspace->inf_strm); if (data_in) kunmap(pages_in[page_in_index]); -out: - free_workspace(workspace); return ret; } -/* - * a less complex decompression routine. Our compressed data fits in a - * single page, and we want to read a single page out of it. - * start_byte tells us the offset into the compressed data we're interested in - */ -int btrfs_zlib_decompress(unsigned char *data_in, - struct page *dest_page, - unsigned long start_byte, - size_t srclen, size_t destlen) +static int zlib_decompress(struct list_head *ws, unsigned char *data_in, + struct page *dest_page, + unsigned long start_byte, + size_t srclen, size_t destlen) { + struct workspace *workspace = list_entry(ws, struct workspace, list); int ret = 0; int wbits = MAX_WBITS; - struct workspace *workspace; unsigned long bytes_left = destlen; unsigned long total_out = 0; char *kaddr; - if (destlen > PAGE_CACHE_SIZE) - return -ENOMEM; - - workspace = find_zlib_workspace(); - if (IS_ERR(workspace)) - return -ENOMEM; - workspace->inf_strm.next_in = data_in; workspace->inf_strm.avail_in = srclen; workspace->inf_strm.total_in = 0; @@ -576,8 +423,7 @@ int btrfs_zlib_decompress(unsigned char *data_in, if (Z_OK != zlib_inflateInit2(&workspace->inf_strm, wbits)) { printk(KERN_WARNING "inflateInit failed\n"); - ret = -1; - goto out; + return -1; } while (bytes_left > 0) { @@ -627,12 +473,13 @@ next: ret = 0; zlib_inflateEnd(&workspace->inf_strm); -out: - free_workspace(workspace); return ret; } -void btrfs_zlib_exit(void) -{ - free_workspaces(); -} +struct btrfs_compress_op btrfs_zlib_compress = { + .alloc_workspace = zlib_alloc_workspace, + .free_workspace = zlib_free_workspace, + .compress_pages = zlib_compress_pages, + .decompress_biovec = zlib_decompress_biovec, + .decompress = zlib_decompress, +}; -- cgit v1.2.2 From a6fa6fae40ec336c7df6155255ae64ebef43a8bc Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Mon, 25 Oct 2010 15:12:26 +0800 Subject: btrfs: Add lzo compression support Lzo is a much faster compression algorithm than gzib, so would allow more users to enable transparent compression, and some users can choose from compression ratio and speed for different applications Usage: # mount -t btrfs -o compress[=] dev /mnt or # mount -t btrfs -o compress-force[=] dev /mnt "-o compress" without argument is still allowed for compatability. Compatibility: If we mount a filesystem with lzo compression, it will not be able be mounted in old kernels. One reason is, otherwise btrfs will directly dump compressed data, which sits in inline extent, to user. Performance: The test copied a linux source tarball (~400M) from an ext4 partition to the btrfs partition, and then extracted it. (time in second) lzo zlib nocompress copy: 10.6 21.7 14.9 extract: 70.1 94.4 66.6 (data size in MB) lzo zlib nocompress copy: 185.87 108.69 394.49 extract: 193.80 132.36 381.21 Changelog: v1 -> v2: - Select LZO_COMPRESS and LZO_DECOMPRESS in btrfs Kconfig. - Add incompability flag. - Fix error handling in compress code. Signed-off-by: Li Zefan --- fs/btrfs/Kconfig | 2 + fs/btrfs/Makefile | 2 +- fs/btrfs/compression.c | 1 + fs/btrfs/compression.h | 1 + fs/btrfs/ctree.h | 9 +- fs/btrfs/disk-io.c | 8 +- fs/btrfs/lzo.c | 509 +++++++++++++++++++++++++++++++++++++++++++++++++ fs/btrfs/super.c | 3 + 8 files changed, 527 insertions(+), 8 deletions(-) create mode 100644 fs/btrfs/lzo.c (limited to 'fs') diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig index 7bb3c020e570..ecb9fd3be143 100644 --- a/fs/btrfs/Kconfig +++ b/fs/btrfs/Kconfig @@ -4,6 +4,8 @@ config BTRFS_FS select LIBCRC32C select ZLIB_INFLATE select ZLIB_DEFLATE + select LZO_COMPRESS + select LZO_DECOMPRESS help Btrfs is a new filesystem with extents, writable snapshotting, support for multiple devices and many more features. diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile index a35eb36b32fd..31610ea73aec 100644 --- a/fs/btrfs/Makefile +++ b/fs/btrfs/Makefile @@ -6,5 +6,5 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \ transaction.o inode.o file.o tree-defrag.o \ extent_map.o sysfs.o struct-funcs.o xattr.o ordered-data.o \ extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \ - export.o tree-log.o acl.o free-space-cache.o zlib.o \ + export.o tree-log.o acl.o free-space-cache.o zlib.o lzo.o \ compression.o delayed-ref.o relocation.o diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 6638c9877720..8faa2df9e719 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -691,6 +691,7 @@ static wait_queue_head_t comp_workspace_wait[BTRFS_COMPRESS_TYPES]; struct btrfs_compress_op *btrfs_compress_op[] = { &btrfs_zlib_compress, + &btrfs_lzo_compress, }; int __init btrfs_init_compress(void) diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index 9b5f2f365b79..f7ce217113fa 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h @@ -73,5 +73,6 @@ struct btrfs_compress_op { }; extern struct btrfs_compress_op btrfs_zlib_compress; +extern struct btrfs_compress_op btrfs_lzo_compress; #endif diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index e06534438592..53b984623983 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -398,13 +398,15 @@ struct btrfs_super_block { #define BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF (1ULL << 0) #define BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL (1ULL << 1) #define BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS (1ULL << 2) +#define BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO (1ULL << 3) #define BTRFS_FEATURE_COMPAT_SUPP 0ULL #define BTRFS_FEATURE_COMPAT_RO_SUPP 0ULL #define BTRFS_FEATURE_INCOMPAT_SUPP \ (BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF | \ BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL | \ - BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) + BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS | \ + BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO) /* * A leaf is full of items. offset and size tell us where to find @@ -553,8 +555,9 @@ struct btrfs_timespec { enum btrfs_compression_type { BTRFS_COMPRESS_NONE = 0, BTRFS_COMPRESS_ZLIB = 1, - BTRFS_COMPRESS_TYPES = 1, - BTRFS_COMPRESS_LAST = 2, + BTRFS_COMPRESS_LZO = 2, + BTRFS_COMPRESS_TYPES = 2, + BTRFS_COMPRESS_LAST = 3, }; struct btrfs_inode_item { diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index a5d2249e6da5..f88eb2ce7919 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1744,10 +1744,10 @@ struct btrfs_root *open_ctree(struct super_block *sb, } features = btrfs_super_incompat_flags(disk_super); - if (!(features & BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF)) { - features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF; - btrfs_set_super_incompat_flags(disk_super, features); - } + features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF; + if (tree_root->fs_info->compress_type & BTRFS_COMPRESS_LZO) + features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO; + btrfs_set_super_incompat_flags(disk_super, features); features = btrfs_super_compat_ro_flags(disk_super) & ~BTRFS_FEATURE_COMPAT_RO_SUPP; diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c new file mode 100644 index 000000000000..523b144e2aec --- /dev/null +++ b/fs/btrfs/lzo.c @@ -0,0 +1,509 @@ +/* + * Copyright (C) 2008 Oracle. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public + * License v2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 021110-1307, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "compression.h" + +#define LZO_LEN 4 + +struct workspace { + void *mem; + void *buf; /* where compressed data goes */ + void *cbuf; /* where decompressed data goes */ + struct list_head list; +}; + +static void lzo_free_workspace(struct list_head *ws) +{ + struct workspace *workspace = list_entry(ws, struct workspace, list); + + vfree(workspace->buf); + vfree(workspace->cbuf); + vfree(workspace->mem); + kfree(workspace); +} + +static struct list_head *lzo_alloc_workspace(void) +{ + struct workspace *workspace; + + workspace = kzalloc(sizeof(*workspace), GFP_NOFS); + if (!workspace) + return ERR_PTR(-ENOMEM); + + workspace->mem = vmalloc(LZO1X_MEM_COMPRESS); + workspace->buf = vmalloc(lzo1x_worst_compress(PAGE_CACHE_SIZE)); + workspace->cbuf = vmalloc(lzo1x_worst_compress(PAGE_CACHE_SIZE)); + if (!workspace->mem || !workspace->buf || !workspace->cbuf) + goto fail; + + INIT_LIST_HEAD(&workspace->list); + + return &workspace->list; +fail: + lzo_free_workspace(&workspace->list); + return ERR_PTR(-ENOMEM); +} + +static inline void write_compress_length(char *buf, size_t len) +{ + __le32 dlen; + + dlen = cpu_to_le32(len); + memcpy(buf, &dlen, LZO_LEN); +} + +static inline size_t read_compress_length(char *buf) +{ + __le32 dlen; + + memcpy(&dlen, buf, LZO_LEN); + return le32_to_cpu(dlen); +} + +static int lzo_compress_pages(struct list_head *ws, + struct address_space *mapping, + u64 start, unsigned long len, + struct page **pages, + unsigned long nr_dest_pages, + unsigned long *out_pages, + unsigned long *total_in, + unsigned long *total_out, + unsigned long max_out) +{ + struct workspace *workspace = list_entry(ws, struct workspace, list); + int ret = 0; + char *data_in; + char *cpage_out; + int nr_pages = 0; + struct page *in_page = NULL; + struct page *out_page = NULL; + unsigned long bytes_left; + + size_t in_len; + size_t out_len; + char *buf; + unsigned long tot_in = 0; + unsigned long tot_out = 0; + unsigned long pg_bytes_left; + unsigned long out_offset; + unsigned long bytes; + + *out_pages = 0; + *total_out = 0; + *total_in = 0; + + in_page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT); + data_in = kmap(in_page); + + /* + * store the size of all chunks of compressed data in + * the first 4 bytes + */ + out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); + if (out_page == NULL) { + ret = -ENOMEM; + goto out; + } + cpage_out = kmap(out_page); + out_offset = LZO_LEN; + tot_out = LZO_LEN; + pages[0] = out_page; + nr_pages = 1; + pg_bytes_left = PAGE_CACHE_SIZE - LZO_LEN; + + /* compress at most one page of data each time */ + in_len = min(len, PAGE_CACHE_SIZE); + while (tot_in < len) { + ret = lzo1x_1_compress(data_in, in_len, workspace->cbuf, + &out_len, workspace->mem); + if (ret != LZO_E_OK) { + printk(KERN_DEBUG "btrfs deflate in loop returned %d\n", + ret); + ret = -1; + goto out; + } + + /* store the size of this chunk of compressed data */ + write_compress_length(cpage_out + out_offset, out_len); + tot_out += LZO_LEN; + out_offset += LZO_LEN; + pg_bytes_left -= LZO_LEN; + + tot_in += in_len; + tot_out += out_len; + + /* copy bytes from the working buffer into the pages */ + buf = workspace->cbuf; + while (out_len) { + bytes = min_t(unsigned long, pg_bytes_left, out_len); + + memcpy(cpage_out + out_offset, buf, bytes); + + out_len -= bytes; + pg_bytes_left -= bytes; + buf += bytes; + out_offset += bytes; + + /* + * we need another page for writing out. + * + * Note if there's less than 4 bytes left, we just + * skip to a new page. + */ + if ((out_len == 0 && pg_bytes_left < LZO_LEN) || + pg_bytes_left == 0) { + if (pg_bytes_left) { + memset(cpage_out + out_offset, 0, + pg_bytes_left); + tot_out += pg_bytes_left; + } + + /* we're done, don't allocate new page */ + if (out_len == 0 && tot_in >= len) + break; + + kunmap(out_page); + if (nr_pages == nr_dest_pages) { + out_page = NULL; + ret = -1; + goto out; + } + + out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); + if (out_page == NULL) { + ret = -ENOMEM; + goto out; + } + cpage_out = kmap(out_page); + pages[nr_pages++] = out_page; + + pg_bytes_left = PAGE_CACHE_SIZE; + out_offset = 0; + } + } + + /* we're making it bigger, give up */ + if (tot_in > 8192 && tot_in < tot_out) + goto out; + + /* we're all done */ + if (tot_in >= len) + break; + + if (tot_out > max_out) + break; + + bytes_left = len - tot_in; + kunmap(in_page); + page_cache_release(in_page); + + start += PAGE_CACHE_SIZE; + in_page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT); + data_in = kmap(in_page); + in_len = min(bytes_left, PAGE_CACHE_SIZE); + } + + if (tot_out > tot_in) + goto out; + + /* store the size of all chunks of compressed data */ + cpage_out = kmap(pages[0]); + write_compress_length(cpage_out, tot_out); + + kunmap(pages[0]); + + ret = 0; + *total_out = tot_out; + *total_in = tot_in; +out: + *out_pages = nr_pages; + if (out_page) + kunmap(out_page); + + if (in_page) { + kunmap(in_page); + page_cache_release(in_page); + } + + return ret; +} + +static int lzo_decompress_biovec(struct list_head *ws, + struct page **pages_in, + u64 disk_start, + struct bio_vec *bvec, + int vcnt, + size_t srclen) +{ + struct workspace *workspace = list_entry(ws, struct workspace, list); + int ret = 0; + char *data_in; + unsigned long page_bytes_left; + unsigned long page_in_index = 0; + unsigned long page_out_index = 0; + struct page *page_out; + unsigned long total_pages_in = (srclen + PAGE_CACHE_SIZE - 1) / + PAGE_CACHE_SIZE; + unsigned long buf_start; + unsigned long buf_offset = 0; + unsigned long bytes; + unsigned long working_bytes; + unsigned long pg_offset; + unsigned long start_byte; + unsigned long current_buf_start; + char *kaddr; + + size_t in_len; + size_t out_len; + unsigned long in_offset; + unsigned long in_page_bytes_left; + unsigned long tot_in; + unsigned long tot_out; + unsigned long tot_len; + char *buf; + + data_in = kmap(pages_in[0]); + tot_len = read_compress_length(data_in); + + tot_in = LZO_LEN; + in_offset = LZO_LEN; + tot_len = min_t(size_t, srclen, tot_len); + in_page_bytes_left = PAGE_CACHE_SIZE - LZO_LEN; + + tot_out = 0; + page_out = bvec[0].bv_page; + page_bytes_left = PAGE_CACHE_SIZE; + pg_offset = 0; + + while (tot_in < tot_len) { + in_len = read_compress_length(data_in + in_offset); + in_page_bytes_left -= LZO_LEN; + in_offset += LZO_LEN; + tot_in += LZO_LEN; + + tot_in += in_len; + working_bytes = in_len; + + /* fast path: avoid using the working buffer */ + if (in_page_bytes_left >= in_len) { + buf = data_in + in_offset; + bytes = in_len; + goto cont; + } + + /* copy bytes from the pages into the working buffer */ + buf = workspace->cbuf; + buf_offset = 0; + while (working_bytes) { + bytes = min(working_bytes, in_page_bytes_left); + + memcpy(buf + buf_offset, data_in + in_offset, bytes); + buf_offset += bytes; +cont: + working_bytes -= bytes; + in_page_bytes_left -= bytes; + in_offset += bytes; + + /* check if we need to pick another page */ + if ((working_bytes == 0 && in_page_bytes_left < LZO_LEN) + || in_page_bytes_left == 0) { + tot_in += in_page_bytes_left; + + if (working_bytes == 0 && tot_in >= tot_len) + break; + + kunmap(pages_in[page_in_index]); + page_in_index++; + if (page_in_index >= total_pages_in) { + ret = -1; + data_in = NULL; + goto done; + } + data_in = kmap(pages_in[page_in_index]); + + in_page_bytes_left = PAGE_CACHE_SIZE; + in_offset = 0; + } + } + + out_len = lzo1x_worst_compress(PAGE_CACHE_SIZE); + ret = lzo1x_decompress_safe(buf, in_len, workspace->buf, + &out_len); + if (ret != LZO_E_OK) { + printk(KERN_WARNING "btrfs decompress failed\n"); + ret = -1; + break; + } + + /* + * buf start is the byte offset we're of the start of + * our workspace buffer + */ + buf_start = tot_out; + + /* tot_out is the last byte of the workspace buffer */ + tot_out += out_len; + + working_bytes = tot_out - buf_start; + + /* + * start_byte is the first byte of the page we're currently + * copying into relative to the start of the compressed data. + */ + start_byte = page_offset(page_out) - disk_start; + + if (working_bytes == 0) { + /* we didn't make progress in this inflate + * call, we're done + */ + break; + } + + /* we haven't yet hit data corresponding to this page */ + if (tot_out <= start_byte) + continue; + + /* + * the start of the data we care about is offset into + * the middle of our working buffer + */ + if (tot_out > start_byte && buf_start < start_byte) { + buf_offset = start_byte - buf_start; + working_bytes -= buf_offset; + } else { + buf_offset = 0; + } + current_buf_start = buf_start; + + /* copy bytes from the working buffer into the pages */ + while (working_bytes > 0) { + bytes = min(PAGE_CACHE_SIZE - pg_offset, + PAGE_CACHE_SIZE - buf_offset); + bytes = min(bytes, working_bytes); + kaddr = kmap_atomic(page_out, KM_USER0); + memcpy(kaddr + pg_offset, workspace->buf + buf_offset, + bytes); + kunmap_atomic(kaddr, KM_USER0); + flush_dcache_page(page_out); + + pg_offset += bytes; + page_bytes_left -= bytes; + buf_offset += bytes; + working_bytes -= bytes; + current_buf_start += bytes; + + /* check if we need to pick another page */ + if (page_bytes_left == 0) { + page_out_index++; + if (page_out_index >= vcnt) { + ret = 0; + goto done; + } + + page_out = bvec[page_out_index].bv_page; + pg_offset = 0; + page_bytes_left = PAGE_CACHE_SIZE; + start_byte = page_offset(page_out) - disk_start; + + /* + * make sure our new page is covered by this + * working buffer + */ + if (tot_out <= start_byte) + break; + + /* the next page in the biovec might not + * be adjacent to the last page, but it + * might still be found inside this working + * buffer. bump our offset pointer + */ + if (tot_out > start_byte && + current_buf_start < start_byte) { + buf_offset = start_byte - buf_start; + working_bytes = tot_out - start_byte; + current_buf_start = buf_start + + buf_offset; + } + } + } + } +done: + if (data_in) + kunmap(pages_in[page_in_index]); + return ret; +} + +static int lzo_decompress(struct list_head *ws, unsigned char *data_in, + struct page *dest_page, + unsigned long start_byte, + size_t srclen, size_t destlen) +{ + struct workspace *workspace = list_entry(ws, struct workspace, list); + size_t in_len; + size_t out_len; + size_t tot_len; + int ret = 0; + char *kaddr; + unsigned long bytes; + + BUG_ON(srclen < LZO_LEN); + + tot_len = read_compress_length(data_in); + data_in += LZO_LEN; + + in_len = read_compress_length(data_in); + data_in += LZO_LEN; + + out_len = PAGE_CACHE_SIZE; + ret = lzo1x_decompress_safe(data_in, in_len, workspace->buf, &out_len); + if (ret != LZO_E_OK) { + printk(KERN_WARNING "btrfs decompress failed!\n"); + ret = -1; + goto out; + } + + if (out_len < start_byte) { + ret = -1; + goto out; + } + + bytes = min_t(unsigned long, destlen, out_len - start_byte); + + kaddr = kmap_atomic(dest_page, KM_USER0); + memcpy(kaddr, workspace->buf + start_byte, bytes); + kunmap_atomic(kaddr, KM_USER0); +out: + return ret; +} + +struct btrfs_compress_op btrfs_lzo_compress = { + .alloc_workspace = lzo_alloc_workspace, + .free_workspace = lzo_free_workspace, + .compress_pages = lzo_compress_pages, + .decompress_biovec = lzo_decompress_biovec, + .decompress = lzo_decompress, +}; diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index f348f2b93164..a1a76b2a61f9 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -168,6 +168,9 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) strcmp(args[0].from, "zlib") == 0) { compress_type = "zlib"; info->compress_type = BTRFS_COMPRESS_ZLIB; + } else if (strcmp(args[0].from, "lzo") == 0) { + compress_type = "lzo"; + info->compress_type = BTRFS_COMPRESS_LZO; } else { ret = -EINVAL; goto out; -- cgit v1.2.2 From 1a419d85a76853d7d04e9b6280a80e96770bf3e3 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Mon, 25 Oct 2010 15:12:50 +0800 Subject: btrfs: Allow to specify compress method when defrag Update defrag ioctl, so one can choose lzo or zlib when turning on compression in defrag operation. Changelog: v1 -> v2 - Add incompability flag. - Fix to check invalid compress type. Signed-off-by: Li Zefan --- fs/btrfs/ioctl.c | 19 ++++++++++++++++++- fs/btrfs/ioctl.h | 9 ++++++++- 2 files changed, 26 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 8cb86d4d763c..b6985d33eede 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -638,9 +638,11 @@ static int btrfs_defrag_file(struct file *file, struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; struct btrfs_ordered_extent *ordered; struct page *page; + struct btrfs_super_block *disk_super; unsigned long last_index; unsigned long ra_pages = root->fs_info->bdi.ra_pages; unsigned long total_read = 0; + u64 features; u64 page_start; u64 page_end; u64 last_len = 0; @@ -648,6 +650,14 @@ static int btrfs_defrag_file(struct file *file, u64 defrag_end = 0; unsigned long i; int ret; + int compress_type = BTRFS_COMPRESS_ZLIB; + + if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS) { + if (range->compress_type > BTRFS_COMPRESS_TYPES) + return -EINVAL; + if (range->compress_type) + compress_type = range->compress_type; + } if (inode->i_size == 0) return 0; @@ -683,7 +693,7 @@ static int btrfs_defrag_file(struct file *file, total_read++; mutex_lock(&inode->i_mutex); if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS) - BTRFS_I(inode)->force_compress = BTRFS_COMPRESS_ZLIB; + BTRFS_I(inode)->force_compress = compress_type; ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE); if (ret) @@ -785,6 +795,13 @@ loop_unlock: mutex_unlock(&inode->i_mutex); } + disk_super = &root->fs_info->super_copy; + features = btrfs_super_incompat_flags(disk_super); + if (range->compress_type == BTRFS_COMPRESS_LZO) { + features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO; + btrfs_set_super_incompat_flags(disk_super, features); + } + return 0; err_reservations: diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h index c344d12c646b..24d0f4628240 100644 --- a/fs/btrfs/ioctl.h +++ b/fs/btrfs/ioctl.h @@ -133,8 +133,15 @@ struct btrfs_ioctl_defrag_range_args { */ __u32 extent_thresh; + /* + * which compression method to use if turning on compression + * for this defrag operation. If unspecified, zlib will + * be used + */ + __u32 compress_type; + /* spare for later */ - __u32 unused[5]; + __u32 unused[4]; }; struct btrfs_ioctl_space_info { -- cgit v1.2.2 From 3a39c18d63fec35f49df577d4b2a4e29c2212f22 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Mon, 8 Nov 2010 15:22:19 +0800 Subject: btrfs: Extract duplicate decompress code Add a common function to copy decompressed data from working buffer to bio pages. Signed-off-by: Li Zefan --- fs/btrfs/compression.c | 92 ++++++++++++++++++++++++++++++++++++++++ fs/btrfs/compression.h | 5 +++ fs/btrfs/lzo.c | 101 +++----------------------------------------- fs/btrfs/zlib.c | 111 ++++++------------------------------------------- 4 files changed, 115 insertions(+), 194 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 8faa2df9e719..f745287fbf2e 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -904,3 +904,95 @@ void __exit btrfs_exit_compress(void) { free_workspaces(); } + +/* + * Copy uncompressed data from working buffer to pages. + * + * buf_start is the byte offset we're of the start of our workspace buffer. + * + * total_out is the last byte of the buffer + */ +int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, + unsigned long total_out, u64 disk_start, + struct bio_vec *bvec, int vcnt, + unsigned long *page_index, + unsigned long *pg_offset) +{ + unsigned long buf_offset; + unsigned long current_buf_start; + unsigned long start_byte; + unsigned long working_bytes = total_out - buf_start; + unsigned long bytes; + char *kaddr; + struct page *page_out = bvec[*page_index].bv_page; + + /* + * start byte is the first byte of the page we're currently + * copying into relative to the start of the compressed data. + */ + start_byte = page_offset(page_out) - disk_start; + + /* we haven't yet hit data corresponding to this page */ + if (total_out <= start_byte) + return 1; + + /* + * the start of the data we care about is offset into + * the middle of our working buffer + */ + if (total_out > start_byte && buf_start < start_byte) { + buf_offset = start_byte - buf_start; + working_bytes -= buf_offset; + } else { + buf_offset = 0; + } + current_buf_start = buf_start; + + /* copy bytes from the working buffer into the pages */ + while (working_bytes > 0) { + bytes = min(PAGE_CACHE_SIZE - *pg_offset, + PAGE_CACHE_SIZE - buf_offset); + bytes = min(bytes, working_bytes); + kaddr = kmap_atomic(page_out, KM_USER0); + memcpy(kaddr + *pg_offset, buf + buf_offset, bytes); + kunmap_atomic(kaddr, KM_USER0); + flush_dcache_page(page_out); + + *pg_offset += bytes; + buf_offset += bytes; + working_bytes -= bytes; + current_buf_start += bytes; + + /* check if we need to pick another page */ + if (*pg_offset == PAGE_CACHE_SIZE) { + (*page_index)++; + if (*page_index >= vcnt) + return 0; + + page_out = bvec[*page_index].bv_page; + *pg_offset = 0; + start_byte = page_offset(page_out) - disk_start; + + /* + * make sure our new page is covered by this + * working buffer + */ + if (total_out <= start_byte) + return 1; + + /* + * the next page in the biovec might not be adjacent + * to the last page, but it might still be found + * inside this working buffer. bump our offset pointer + */ + if (total_out > start_byte && + current_buf_start < start_byte) { + buf_offset = start_byte - buf_start; + working_bytes = total_out - start_byte; + current_buf_start = buf_start + buf_offset; + } + } + } + + return 1; +} diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index f7ce217113fa..51000174b9d7 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h @@ -34,6 +34,11 @@ int btrfs_decompress_biovec(int type, struct page **pages_in, u64 disk_start, struct bio_vec *bvec, int vcnt, size_t srclen); int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page, unsigned long start_byte, size_t srclen, size_t destlen); +int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, + unsigned long total_out, u64 disk_start, + struct bio_vec *bvec, int vcnt, + unsigned long *page_index, + unsigned long *pg_offset); int btrfs_submit_compressed_write(struct inode *inode, u64 start, unsigned long len, u64 disk_start, diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c index 523b144e2aec..cc9b450399df 100644 --- a/fs/btrfs/lzo.c +++ b/fs/btrfs/lzo.c @@ -260,12 +260,10 @@ static int lzo_decompress_biovec(struct list_head *ws, size_t srclen) { struct workspace *workspace = list_entry(ws, struct workspace, list); - int ret = 0; + int ret = 0, ret2; char *data_in; - unsigned long page_bytes_left; unsigned long page_in_index = 0; unsigned long page_out_index = 0; - struct page *page_out; unsigned long total_pages_in = (srclen + PAGE_CACHE_SIZE - 1) / PAGE_CACHE_SIZE; unsigned long buf_start; @@ -273,9 +271,6 @@ static int lzo_decompress_biovec(struct list_head *ws, unsigned long bytes; unsigned long working_bytes; unsigned long pg_offset; - unsigned long start_byte; - unsigned long current_buf_start; - char *kaddr; size_t in_len; size_t out_len; @@ -295,8 +290,6 @@ static int lzo_decompress_biovec(struct list_head *ws, in_page_bytes_left = PAGE_CACHE_SIZE - LZO_LEN; tot_out = 0; - page_out = bvec[0].bv_page; - page_bytes_left = PAGE_CACHE_SIZE; pg_offset = 0; while (tot_in < tot_len) { @@ -359,97 +352,15 @@ cont: break; } - /* - * buf start is the byte offset we're of the start of - * our workspace buffer - */ buf_start = tot_out; - - /* tot_out is the last byte of the workspace buffer */ tot_out += out_len; - working_bytes = tot_out - buf_start; - - /* - * start_byte is the first byte of the page we're currently - * copying into relative to the start of the compressed data. - */ - start_byte = page_offset(page_out) - disk_start; - - if (working_bytes == 0) { - /* we didn't make progress in this inflate - * call, we're done - */ + ret2 = btrfs_decompress_buf2page(workspace->buf, buf_start, + tot_out, disk_start, + bvec, vcnt, + &page_out_index, &pg_offset); + if (ret2 == 0) break; - } - - /* we haven't yet hit data corresponding to this page */ - if (tot_out <= start_byte) - continue; - - /* - * the start of the data we care about is offset into - * the middle of our working buffer - */ - if (tot_out > start_byte && buf_start < start_byte) { - buf_offset = start_byte - buf_start; - working_bytes -= buf_offset; - } else { - buf_offset = 0; - } - current_buf_start = buf_start; - - /* copy bytes from the working buffer into the pages */ - while (working_bytes > 0) { - bytes = min(PAGE_CACHE_SIZE - pg_offset, - PAGE_CACHE_SIZE - buf_offset); - bytes = min(bytes, working_bytes); - kaddr = kmap_atomic(page_out, KM_USER0); - memcpy(kaddr + pg_offset, workspace->buf + buf_offset, - bytes); - kunmap_atomic(kaddr, KM_USER0); - flush_dcache_page(page_out); - - pg_offset += bytes; - page_bytes_left -= bytes; - buf_offset += bytes; - working_bytes -= bytes; - current_buf_start += bytes; - - /* check if we need to pick another page */ - if (page_bytes_left == 0) { - page_out_index++; - if (page_out_index >= vcnt) { - ret = 0; - goto done; - } - - page_out = bvec[page_out_index].bv_page; - pg_offset = 0; - page_bytes_left = PAGE_CACHE_SIZE; - start_byte = page_offset(page_out) - disk_start; - - /* - * make sure our new page is covered by this - * working buffer - */ - if (tot_out <= start_byte) - break; - - /* the next page in the biovec might not - * be adjacent to the last page, but it - * might still be found inside this working - * buffer. bump our offset pointer - */ - if (tot_out > start_byte && - current_buf_start < start_byte) { - buf_offset = start_byte - buf_start; - working_bytes = tot_out - start_byte; - current_buf_start = buf_start + - buf_offset; - } - } - } } done: if (data_in) diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index 9a3e693917f2..f5ec2d44150d 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c @@ -218,24 +218,16 @@ static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in, size_t srclen) { struct workspace *workspace = list_entry(ws, struct workspace, list); - int ret = 0; + int ret = 0, ret2; int wbits = MAX_WBITS; char *data_in; size_t total_out = 0; - unsigned long page_bytes_left; unsigned long page_in_index = 0; unsigned long page_out_index = 0; - struct page *page_out; unsigned long total_pages_in = (srclen + PAGE_CACHE_SIZE - 1) / PAGE_CACHE_SIZE; unsigned long buf_start; - unsigned long buf_offset; - unsigned long bytes; - unsigned long working_bytes; unsigned long pg_offset; - unsigned long start_byte; - unsigned long current_buf_start; - char *kaddr; data_in = kmap(pages_in[page_in_index]); workspace->inf_strm.next_in = data_in; @@ -245,8 +237,6 @@ static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in, workspace->inf_strm.total_out = 0; workspace->inf_strm.next_out = workspace->buf; workspace->inf_strm.avail_out = PAGE_CACHE_SIZE; - page_out = bvec[page_out_index].bv_page; - page_bytes_left = PAGE_CACHE_SIZE; pg_offset = 0; /* If it's deflate, and it's got no preset dictionary, then @@ -268,100 +258,23 @@ static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in, ret = zlib_inflate(&workspace->inf_strm, Z_NO_FLUSH); if (ret != Z_OK && ret != Z_STREAM_END) break; - /* - * buf start is the byte offset we're of the start of - * our workspace buffer - */ - buf_start = total_out; - /* total_out is the last byte of the workspace buffer */ + buf_start = total_out; total_out = workspace->inf_strm.total_out; - working_bytes = total_out - buf_start; - - /* - * start byte is the first byte of the page we're currently - * copying into relative to the start of the compressed data. - */ - start_byte = page_offset(page_out) - disk_start; - - if (working_bytes == 0) { - /* we didn't make progress in this inflate - * call, we're done - */ - if (ret != Z_STREAM_END) - ret = -1; + /* we didn't make progress in this inflate call, we're done */ + if (buf_start == total_out) break; - } - /* we haven't yet hit data corresponding to this page */ - if (total_out <= start_byte) - goto next; - - /* - * the start of the data we care about is offset into - * the middle of our working buffer - */ - if (total_out > start_byte && buf_start < start_byte) { - buf_offset = start_byte - buf_start; - working_bytes -= buf_offset; - } else { - buf_offset = 0; - } - current_buf_start = buf_start; - - /* copy bytes from the working buffer into the pages */ - while (working_bytes > 0) { - bytes = min(PAGE_CACHE_SIZE - pg_offset, - PAGE_CACHE_SIZE - buf_offset); - bytes = min(bytes, working_bytes); - kaddr = kmap_atomic(page_out, KM_USER0); - memcpy(kaddr + pg_offset, workspace->buf + buf_offset, - bytes); - kunmap_atomic(kaddr, KM_USER0); - flush_dcache_page(page_out); - - pg_offset += bytes; - page_bytes_left -= bytes; - buf_offset += bytes; - working_bytes -= bytes; - current_buf_start += bytes; - - /* check if we need to pick another page */ - if (page_bytes_left == 0) { - page_out_index++; - if (page_out_index >= vcnt) { - ret = 0; - goto done; - } - - page_out = bvec[page_out_index].bv_page; - pg_offset = 0; - page_bytes_left = PAGE_CACHE_SIZE; - start_byte = page_offset(page_out) - disk_start; - - /* - * make sure our new page is covered by this - * working buffer - */ - if (total_out <= start_byte) - goto next; - - /* the next page in the biovec might not - * be adjacent to the last page, but it - * might still be found inside this working - * buffer. bump our offset pointer - */ - if (total_out > start_byte && - current_buf_start < start_byte) { - buf_offset = start_byte - buf_start; - working_bytes = total_out - start_byte; - current_buf_start = buf_start + - buf_offset; - } - } + ret2 = btrfs_decompress_buf2page(workspace->buf, buf_start, + total_out, disk_start, + bvec, vcnt, + &page_out_index, &pg_offset); + if (ret2 == 0) { + ret = 0; + goto done; } -next: + workspace->inf_strm.next_out = workspace->buf; workspace->inf_strm.avail_out = PAGE_CACHE_SIZE; -- cgit v1.2.2 From fa0d2b9bd717340e0bc4850a80ac0eb344e9a7fb Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Mon, 20 Dec 2010 15:53:28 +0800 Subject: Btrfs: Refactor btrfs_ioctl_snap_create() Split it into two functions for two different ioctls, since they share no common code. Signed-off-by: Li Zefan --- fs/btrfs/ioctl.c | 84 +++++++++++++++++++++++++++----------------------------- 1 file changed, 40 insertions(+), 44 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index f87552a1d7ea..02554e19d974 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -946,58 +946,54 @@ out: } static noinline int btrfs_ioctl_snap_create(struct file *file, - void __user *arg, int subvol, - int v2) + void __user *arg, int subvol) { - struct btrfs_ioctl_vol_args *vol_args = NULL; - struct btrfs_ioctl_vol_args_v2 *vol_args_v2 = NULL; - char *name; - u64 fd; + struct btrfs_ioctl_vol_args *vol_args; int ret; - if (v2) { - u64 transid = 0; - u64 *ptr = NULL; + vol_args = memdup_user(arg, sizeof(*vol_args)); + if (IS_ERR(vol_args)) + return PTR_ERR(vol_args); + vol_args->name[BTRFS_PATH_NAME_MAX] = '\0'; - vol_args_v2 = memdup_user(arg, sizeof(*vol_args_v2)); - if (IS_ERR(vol_args_v2)) - return PTR_ERR(vol_args_v2); + ret = btrfs_ioctl_snap_create_transid(file, vol_args->name, + vol_args->fd, subvol, NULL); - if (vol_args_v2->flags & ~BTRFS_SUBVOL_CREATE_ASYNC) { - ret = -EINVAL; - goto out; - } - - name = vol_args_v2->name; - fd = vol_args_v2->fd; - vol_args_v2->name[BTRFS_SUBVOL_NAME_MAX] = '\0'; + kfree(vol_args); + return ret; +} - if (vol_args_v2->flags & BTRFS_SUBVOL_CREATE_ASYNC) - ptr = &transid; +static noinline int btrfs_ioctl_snap_create_v2(struct file *file, + void __user *arg, int subvol) +{ + struct btrfs_ioctl_vol_args_v2 *vol_args; + int ret; + u64 transid = 0; + u64 *ptr = NULL; - ret = btrfs_ioctl_snap_create_transid(file, name, fd, - subvol, ptr); + vol_args = memdup_user(arg, sizeof(*vol_args)); + if (IS_ERR(vol_args)) + return PTR_ERR(vol_args); + vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0'; - if (ret == 0 && ptr && - copy_to_user(arg + - offsetof(struct btrfs_ioctl_vol_args_v2, - transid), ptr, sizeof(*ptr))) - ret = -EFAULT; - } else { - vol_args = memdup_user(arg, sizeof(*vol_args)); - if (IS_ERR(vol_args)) - return PTR_ERR(vol_args); - name = vol_args->name; - fd = vol_args->fd; - vol_args->name[BTRFS_PATH_NAME_MAX] = '\0'; - - ret = btrfs_ioctl_snap_create_transid(file, name, fd, - subvol, NULL); + if (vol_args->flags & ~BTRFS_SUBVOL_CREATE_ASYNC) { + ret = -EINVAL; + goto out; } + + if (vol_args->flags & BTRFS_SUBVOL_CREATE_ASYNC) + ptr = &transid; + + ret = btrfs_ioctl_snap_create_transid(file, vol_args->name, + vol_args->fd, subvol, ptr); + + if (ret == 0 && ptr && + copy_to_user(arg + + offsetof(struct btrfs_ioctl_vol_args_v2, + transid), ptr, sizeof(*ptr))) + ret = -EFAULT; out: kfree(vol_args); - kfree(vol_args_v2); - return ret; } @@ -2257,11 +2253,11 @@ long btrfs_ioctl(struct file *file, unsigned int case FS_IOC_GETVERSION: return btrfs_ioctl_getversion(file, argp); case BTRFS_IOC_SNAP_CREATE: - return btrfs_ioctl_snap_create(file, argp, 0, 0); + return btrfs_ioctl_snap_create(file, argp, 0); case BTRFS_IOC_SNAP_CREATE_V2: - return btrfs_ioctl_snap_create(file, argp, 0, 1); + return btrfs_ioctl_snap_create_v2(file, argp, 0); case BTRFS_IOC_SUBVOL_CREATE: - return btrfs_ioctl_snap_create(file, argp, 1, 0); + return btrfs_ioctl_snap_create(file, argp, 1); case BTRFS_IOC_SNAP_DESTROY: return btrfs_ioctl_snap_destroy(file, argp); case BTRFS_IOC_DEFAULT_SUBVOL: -- cgit v1.2.2 From b83cc9693f39689490970c19f6c5b866f6719a70 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Mon, 20 Dec 2010 16:04:08 +0800 Subject: Btrfs: Add readonly snapshots support Usage: Set BTRFS_SUBVOL_RDONLY of btrfs_ioctl_vol_arg_v2->flags, and call ioctl(BTRFS_I0CTL_SNAP_CREATE_V2). Implementation: - Set readonly bit of btrfs_root_item->flags. - Add readonly checks in btrfs_permission (inode_permission), btrfs_setattr, btrfs_set/remove_xattr and some ioctls. Changelog for v3: - Eliminate btrfs_root->readonly, but check btrfs_root->root_item.flags. - Rename BTRFS_ROOT_SNAP_RDONLY to BTRFS_ROOT_SUBVOL_RDONLY. Signed-off-by: Li Zefan --- fs/btrfs/ctree.h | 7 +++++++ fs/btrfs/inode.c | 8 ++++++++ fs/btrfs/ioctl.c | 42 ++++++++++++++++++++++++++++++++---------- fs/btrfs/ioctl.h | 1 + fs/btrfs/transaction.c | 8 ++++++++ fs/btrfs/transaction.h | 1 + fs/btrfs/xattr.c | 18 ++++++++++++++++++ 7 files changed, 75 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index af52f6d7a4d8..4403e5643d43 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -597,6 +597,8 @@ struct btrfs_dir_item { u8 type; } __attribute__ ((__packed__)); +#define BTRFS_ROOT_SUBVOL_RDONLY (1ULL << 0) + struct btrfs_root_item { struct btrfs_inode_item inode; __le64 generation; @@ -1893,6 +1895,11 @@ BTRFS_SETGET_STACK_FUNCS(root_limit, struct btrfs_root_item, byte_limit, 64); BTRFS_SETGET_STACK_FUNCS(root_last_snapshot, struct btrfs_root_item, last_snapshot, 64); +static inline bool btrfs_root_readonly(struct btrfs_root *root) +{ + return root->root_item.flags & BTRFS_ROOT_SUBVOL_RDONLY; +} + /* struct btrfs_super_block */ BTRFS_SETGET_STACK_FUNCS(super_bytenr, struct btrfs_super_block, bytenr, 64); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 5f9194438f7c..956f1eb913b1 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -3671,8 +3671,12 @@ static int btrfs_setattr_size(struct inode *inode, struct iattr *attr) static int btrfs_setattr(struct dentry *dentry, struct iattr *attr) { struct inode *inode = dentry->d_inode; + struct btrfs_root *root = BTRFS_I(inode)->root; int err; + if (btrfs_root_readonly(root)) + return -EROFS; + err = inode_change_ok(inode, attr); if (err) return err; @@ -7206,6 +7210,10 @@ static int btrfs_set_page_dirty(struct page *page) static int btrfs_permission(struct inode *inode, int mask) { + struct btrfs_root *root = BTRFS_I(inode)->root; + + if (btrfs_root_readonly(root) && (mask & MAY_WRITE)) + return -EROFS; if ((BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) && (mask & MAY_WRITE)) return -EACCES; return generic_permission(inode, mask, btrfs_check_acl); diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 02554e19d974..f066ccb5dddf 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -147,6 +147,9 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) unsigned int flags, oldflags; int ret; + if (btrfs_root_readonly(root)) + return -EROFS; + if (copy_from_user(&flags, arg, sizeof(flags))) return -EFAULT; @@ -360,7 +363,8 @@ fail: } static int create_snapshot(struct btrfs_root *root, struct dentry *dentry, - char *name, int namelen, u64 *async_transid) + char *name, int namelen, u64 *async_transid, + bool readonly) { struct inode *inode; struct dentry *parent; @@ -378,6 +382,7 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry, btrfs_init_block_rsv(&pending_snapshot->block_rsv); pending_snapshot->dentry = dentry; pending_snapshot->root = root; + pending_snapshot->readonly = readonly; trans = btrfs_start_transaction(root->fs_info->extent_root, 5); if (IS_ERR(trans)) { @@ -509,7 +514,7 @@ static inline int btrfs_may_create(struct inode *dir, struct dentry *child) static noinline int btrfs_mksubvol(struct path *parent, char *name, int namelen, struct btrfs_root *snap_src, - u64 *async_transid) + u64 *async_transid, bool readonly) { struct inode *dir = parent->dentry->d_inode; struct dentry *dentry; @@ -541,7 +546,7 @@ static noinline int btrfs_mksubvol(struct path *parent, if (snap_src) { error = create_snapshot(snap_src, dentry, - name, namelen, async_transid); + name, namelen, async_transid, readonly); } else { error = create_subvol(BTRFS_I(dir)->root, dentry, name, namelen, async_transid); @@ -901,7 +906,8 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file, char *name, unsigned long fd, int subvol, - u64 *transid) + u64 *transid, + bool readonly) { struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root; struct file *src_file; @@ -919,7 +925,7 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file, if (subvol) { ret = btrfs_mksubvol(&file->f_path, name, namelen, - NULL, transid); + NULL, transid, readonly); } else { struct inode *src_inode; src_file = fget(fd); @@ -938,7 +944,7 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file, } ret = btrfs_mksubvol(&file->f_path, name, namelen, BTRFS_I(src_inode)->root, - transid); + transid, readonly); fput(src_file); } out: @@ -957,7 +963,8 @@ static noinline int btrfs_ioctl_snap_create(struct file *file, vol_args->name[BTRFS_PATH_NAME_MAX] = '\0'; ret = btrfs_ioctl_snap_create_transid(file, vol_args->name, - vol_args->fd, subvol, NULL); + vol_args->fd, subvol, + NULL, false); kfree(vol_args); return ret; @@ -970,22 +977,27 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file, int ret; u64 transid = 0; u64 *ptr = NULL; + bool readonly = false; vol_args = memdup_user(arg, sizeof(*vol_args)); if (IS_ERR(vol_args)) return PTR_ERR(vol_args); vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0'; - if (vol_args->flags & ~BTRFS_SUBVOL_CREATE_ASYNC) { - ret = -EINVAL; + if (vol_args->flags & + ~(BTRFS_SUBVOL_CREATE_ASYNC | BTRFS_SUBVOL_RDONLY)) { + ret = -EOPNOTSUPP; goto out; } if (vol_args->flags & BTRFS_SUBVOL_CREATE_ASYNC) ptr = &transid; + if (vol_args->flags & BTRFS_SUBVOL_RDONLY) + readonly = true; ret = btrfs_ioctl_snap_create_transid(file, vol_args->name, - vol_args->fd, subvol, ptr); + vol_args->fd, subvol, + ptr, readonly); if (ret == 0 && ptr && copy_to_user(arg + @@ -1505,6 +1517,9 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp) struct btrfs_ioctl_defrag_range_args *range; int ret; + if (btrfs_root_readonly(root)) + return -EROFS; + ret = mnt_want_write(file->f_path.mnt); if (ret) return ret; @@ -1633,6 +1648,9 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, if (!(file->f_mode & FMODE_WRITE) || (file->f_flags & O_APPEND)) return -EINVAL; + if (btrfs_root_readonly(root)) + return -EROFS; + ret = mnt_want_write(file->f_path.mnt); if (ret) return ret; @@ -1954,6 +1972,10 @@ static long btrfs_ioctl_trans_start(struct file *file) if (file->private_data) goto out; + ret = -EROFS; + if (btrfs_root_readonly(root)) + goto out; + ret = mnt_want_write(file->f_path.mnt); if (ret) goto out; diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h index c344d12c646b..52ae489974be 100644 --- a/fs/btrfs/ioctl.h +++ b/fs/btrfs/ioctl.h @@ -31,6 +31,7 @@ struct btrfs_ioctl_vol_args { }; #define BTRFS_SUBVOL_CREATE_ASYNC (1ULL << 0) +#define BTRFS_SUBVOL_RDONLY (1ULL << 1) #define BTRFS_SUBVOL_NAME_MAX 4039 struct btrfs_ioctl_vol_args_v2 { diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index f50e931fc217..29e30d832ec9 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -910,6 +910,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, u64 to_reserve = 0; u64 index = 0; u64 objectid; + u64 root_flags; new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS); if (!new_root_item) { @@ -967,6 +968,13 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, btrfs_set_root_last_snapshot(&root->root_item, trans->transid); memcpy(new_root_item, &root->root_item, sizeof(*new_root_item)); + root_flags = btrfs_root_flags(new_root_item); + if (pending->readonly) + root_flags |= BTRFS_ROOT_SUBVOL_RDONLY; + else + root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY; + btrfs_set_root_flags(new_root_item, root_flags); + old = btrfs_lock_root_node(root); btrfs_cow_block(trans, root, old, NULL, 0, &old); btrfs_set_lock_blocking(old); diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index f104b57ad4ef..229a594cacd5 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -62,6 +62,7 @@ struct btrfs_pending_snapshot { struct btrfs_block_rsv block_rsv; /* extra metadata reseration for relocation */ int error; + bool readonly; struct list_head list; }; diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c index 698fdd2c739c..a5776531dc2b 100644 --- a/fs/btrfs/xattr.c +++ b/fs/btrfs/xattr.c @@ -316,6 +316,15 @@ ssize_t btrfs_getxattr(struct dentry *dentry, const char *name, int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags) { + struct btrfs_root *root = BTRFS_I(dentry->d_inode)->root; + + /* + * The permission on security.* and system.* is not checked + * in permission(). + */ + if (btrfs_root_readonly(root)) + return -EROFS; + /* * If this is a request for a synthetic attribute in the system.* * namespace use the generic infrastructure to resolve a handler @@ -336,6 +345,15 @@ int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value, int btrfs_removexattr(struct dentry *dentry, const char *name) { + struct btrfs_root *root = BTRFS_I(dentry->d_inode)->root; + + /* + * The permission on security.* and system.* is not checked + * in permission(). + */ + if (btrfs_root_readonly(root)) + return -EROFS; + /* * If this is a request for a synthetic attribute in the system.* * namespace use the generic infrastructure to resolve a handler -- cgit v1.2.2 From 0caa102da82799efaba88e234484786a9591c797 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Mon, 20 Dec 2010 16:30:25 +0800 Subject: Btrfs: Add BTRFS_IOC_SUBVOL_GETFLAGS/SETFLAGS ioctls This allows us to set a snapshot or a subvolume readonly or writable on the fly. Usage: Set BTRFS_SUBVOL_RDONLY of btrfs_ioctl_vol_arg_v2->flags, and then call ioctl(BTRFS_IOCTL_SUBVOL_SETFLAGS); Changelog for v3: - Change to pass __u64 as ioctl parameter. Changelog for v2: - Add _GETFLAGS ioctl. - Check if the passed fd is the root of a subvolume. - Change the name from _SNAP_SETFLAGS to _SUBVOL_SETFLAGS. Signed-off-by: Li Zefan --- fs/btrfs/ioctl.c | 83 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ fs/btrfs/ioctl.h | 2 ++ 2 files changed, 85 insertions(+) (limited to 'fs') diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index f066ccb5dddf..ad1983524f97 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -1009,6 +1009,85 @@ out: return ret; } +static noinline int btrfs_ioctl_subvol_getflags(struct file *file, + void __user *arg) +{ + struct inode *inode = fdentry(file)->d_inode; + struct btrfs_root *root = BTRFS_I(inode)->root; + int ret = 0; + u64 flags = 0; + + if (inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) + return -EINVAL; + + down_read(&root->fs_info->subvol_sem); + if (btrfs_root_readonly(root)) + flags |= BTRFS_SUBVOL_RDONLY; + up_read(&root->fs_info->subvol_sem); + + if (copy_to_user(arg, &flags, sizeof(flags))) + ret = -EFAULT; + + return ret; +} + +static noinline int btrfs_ioctl_subvol_setflags(struct file *file, + void __user *arg) +{ + struct inode *inode = fdentry(file)->d_inode; + struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_trans_handle *trans; + u64 root_flags; + u64 flags; + int ret = 0; + + if (root->fs_info->sb->s_flags & MS_RDONLY) + return -EROFS; + + if (inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) + return -EINVAL; + + if (copy_from_user(&flags, arg, sizeof(flags))) + return -EFAULT; + + if (flags & ~BTRFS_SUBVOL_CREATE_ASYNC) + return -EINVAL; + + if (flags & ~BTRFS_SUBVOL_RDONLY) + return -EOPNOTSUPP; + + down_write(&root->fs_info->subvol_sem); + + /* nothing to do */ + if (!!(flags & BTRFS_SUBVOL_RDONLY) == btrfs_root_readonly(root)) + goto out; + + root_flags = btrfs_root_flags(&root->root_item); + if (flags & BTRFS_SUBVOL_RDONLY) + btrfs_set_root_flags(&root->root_item, + root_flags | BTRFS_ROOT_SUBVOL_RDONLY); + else + btrfs_set_root_flags(&root->root_item, + root_flags & ~BTRFS_ROOT_SUBVOL_RDONLY); + + trans = btrfs_start_transaction(root, 1); + if (IS_ERR(trans)) { + ret = PTR_ERR(trans); + goto out_reset; + } + + ret = btrfs_update_root(trans, root, + &root->root_key, &root->root_item); + + btrfs_commit_transaction(trans, root); +out_reset: + if (ret) + btrfs_set_root_flags(&root->root_item, root_flags); +out: + up_write(&root->fs_info->subvol_sem); + return ret; +} + /* * helper to check if the subvolume references other subvolumes */ @@ -2282,6 +2361,10 @@ long btrfs_ioctl(struct file *file, unsigned int return btrfs_ioctl_snap_create(file, argp, 1); case BTRFS_IOC_SNAP_DESTROY: return btrfs_ioctl_snap_destroy(file, argp); + case BTRFS_IOC_SUBVOL_GETFLAGS: + return btrfs_ioctl_subvol_getflags(file, argp); + case BTRFS_IOC_SUBVOL_SETFLAGS: + return btrfs_ioctl_subvol_setflags(file, argp); case BTRFS_IOC_DEFAULT_SUBVOL: return btrfs_ioctl_default_subvol(file, argp); case BTRFS_IOC_DEFRAG: diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h index 52ae489974be..1223223351fa 100644 --- a/fs/btrfs/ioctl.h +++ b/fs/btrfs/ioctl.h @@ -194,4 +194,6 @@ struct btrfs_ioctl_space_args { #define BTRFS_IOC_WAIT_SYNC _IOW(BTRFS_IOCTL_MAGIC, 22, __u64) #define BTRFS_IOC_SNAP_CREATE_V2 _IOW(BTRFS_IOCTL_MAGIC, 23, \ struct btrfs_ioctl_vol_args_v2) +#define BTRFS_IOC_SUBVOL_GETFLAGS _IOW(BTRFS_IOCTL_MAGIC, 25, __u64) +#define BTRFS_IOC_SUBVOL_SETFLAGS _IOW(BTRFS_IOCTL_MAGIC, 26, __u64) #endif -- cgit v1.2.2 From 1e6d9153df27923649976554d034a69ac7b28f95 Mon Sep 17 00:00:00 2001 From: Tao Ma Date: Mon, 20 Dec 2010 16:21:11 +0800 Subject: ocfs2: Release buffer_head in case of error in ocfs2_double_lock. In ocfs2_double_lock, when ocfs2_inode_lock for inode1 fails, we just unlock inode2 and return without releasing buffer we get from inode_lock(inode2). The good thing is that it is freed by the only caller ocfs2_rename when it exits. But I don't think this is a right way for error handling. We should free the buffer_head we get in ocfs2_double_lock before exit so that the caller doesn't need to take care of it. Signed-off-by: Tao Ma Signed-off-by: Joel Becker --- fs/ocfs2/namei.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index ff5744e1e36f..ca35f81a13bb 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c @@ -1017,8 +1017,11 @@ static int ocfs2_double_lock(struct ocfs2_super *osb, * An error return must mean that no cluster locks * were held on function exit. */ - if (oi1->ip_blkno != oi2->ip_blkno) + if (oi1->ip_blkno != oi2->ip_blkno) { ocfs2_inode_unlock(inode2, 1); + brelse(*bh2); + *bh2 = NULL; + } if (status != -ENOENT) mlog_errno(status); -- cgit v1.2.2 From 02bd9c394ef64a16a313eb4d968a94b7000c5d00 Mon Sep 17 00:00:00 2001 From: Sunil Mushran Date: Mon, 20 Dec 2010 16:34:59 -0800 Subject: ocfs2/dlm: Cleanup dlmdebug.c Remove struct debug_buffer in dlmdebug.c/h. Signed-off-by: Sunil Mushran Signed-off-by: Joel Becker --- fs/ocfs2/dlm/dlmdebug.c | 178 ++++++++++++++++++------------------------------ fs/ocfs2/dlm/dlmdebug.h | 5 -- 2 files changed, 66 insertions(+), 117 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c index 272ec8631a51..77199ca4409a 100644 --- a/fs/ocfs2/dlm/dlmdebug.c +++ b/fs/ocfs2/dlm/dlmdebug.c @@ -370,92 +370,46 @@ static void dlm_debug_get(struct dlm_debug_ctxt *dc) kref_get(&dc->debug_refcnt); } -static struct debug_buffer *debug_buffer_allocate(void) +static int debug_release(struct inode *inode, struct file *file) { - struct debug_buffer *db = NULL; - - db = kzalloc(sizeof(struct debug_buffer), GFP_KERNEL); - if (!db) - goto bail; - - db->len = PAGE_SIZE; - db->buf = kmalloc(db->len, GFP_KERNEL); - if (!db->buf) - goto bail; - - return db; -bail: - kfree(db); - return NULL; -} - -static ssize_t debug_buffer_read(struct file *file, char __user *buf, - size_t nbytes, loff_t *ppos) -{ - struct debug_buffer *db = file->private_data; - - return simple_read_from_buffer(buf, nbytes, ppos, db->buf, db->len); -} - -static loff_t debug_buffer_llseek(struct file *file, loff_t off, int whence) -{ - struct debug_buffer *db = file->private_data; - loff_t new = -1; - - switch (whence) { - case 0: - new = off; - break; - case 1: - new = file->f_pos + off; - break; - } - - if (new < 0 || new > db->len) - return -EINVAL; - - return (file->f_pos = new); + free_page((unsigned long)file->private_data); + return 0; } -static int debug_buffer_release(struct inode *inode, struct file *file) +static ssize_t debug_read(struct file *file, char __user *buf, + size_t nbytes, loff_t *ppos) { - struct debug_buffer *db = file->private_data; - - if (db) - kfree(db->buf); - kfree(db); - - return 0; + return simple_read_from_buffer(buf, nbytes, ppos, file->private_data, + i_size_read(file->f_mapping->host)); } /* end - util funcs */ /* begin - purge list funcs */ -static int debug_purgelist_print(struct dlm_ctxt *dlm, struct debug_buffer *db) +static int debug_purgelist_print(struct dlm_ctxt *dlm, char *buf, int len) { struct dlm_lock_resource *res; int out = 0; unsigned long total = 0; - out += snprintf(db->buf + out, db->len - out, + out += snprintf(buf + out, len - out, "Dumping Purgelist for Domain: %s\n", dlm->name); spin_lock(&dlm->spinlock); list_for_each_entry(res, &dlm->purge_list, purge) { ++total; - if (db->len - out < 100) + if (len - out < 100) continue; spin_lock(&res->spinlock); out += stringify_lockname(res->lockname.name, res->lockname.len, - db->buf + out, db->len - out); - out += snprintf(db->buf + out, db->len - out, "\t%ld\n", + buf + out, len - out); + out += snprintf(buf + out, len - out, "\t%ld\n", (jiffies - res->last_used)/HZ); spin_unlock(&res->spinlock); } spin_unlock(&dlm->spinlock); - out += snprintf(db->buf + out, db->len - out, - "Total on list: %ld\n", total); + out += snprintf(buf + out, len - out, "Total on list: %ld\n", total); return out; } @@ -463,15 +417,15 @@ static int debug_purgelist_print(struct dlm_ctxt *dlm, struct debug_buffer *db) static int debug_purgelist_open(struct inode *inode, struct file *file) { struct dlm_ctxt *dlm = inode->i_private; - struct debug_buffer *db; + char *buf = NULL; - db = debug_buffer_allocate(); - if (!db) + buf = (char *) get_zeroed_page(GFP_NOFS); + if (!buf) goto bail; - db->len = debug_purgelist_print(dlm, db); + i_size_write(inode, debug_purgelist_print(dlm, buf, PAGE_SIZE - 1)); - file->private_data = db; + file->private_data = buf; return 0; bail: @@ -480,14 +434,14 @@ bail: static const struct file_operations debug_purgelist_fops = { .open = debug_purgelist_open, - .release = debug_buffer_release, - .read = debug_buffer_read, - .llseek = debug_buffer_llseek, + .release = debug_release, + .read = debug_read, + .llseek = generic_file_llseek, }; /* end - purge list funcs */ /* begin - debug mle funcs */ -static int debug_mle_print(struct dlm_ctxt *dlm, struct debug_buffer *db) +static int debug_mle_print(struct dlm_ctxt *dlm, char *buf, int len) { struct dlm_master_list_entry *mle; struct hlist_head *bucket; @@ -495,7 +449,7 @@ static int debug_mle_print(struct dlm_ctxt *dlm, struct debug_buffer *db) int i, out = 0; unsigned long total = 0, longest = 0, bucket_count = 0; - out += snprintf(db->buf + out, db->len - out, + out += snprintf(buf + out, len - out, "Dumping MLEs for Domain: %s\n", dlm->name); spin_lock(&dlm->master_lock); @@ -506,16 +460,16 @@ static int debug_mle_print(struct dlm_ctxt *dlm, struct debug_buffer *db) master_hash_node); ++total; ++bucket_count; - if (db->len - out < 200) + if (len - out < 200) continue; - out += dump_mle(mle, db->buf + out, db->len - out); + out += dump_mle(mle, buf + out, len - out); } longest = max(longest, bucket_count); bucket_count = 0; } spin_unlock(&dlm->master_lock); - out += snprintf(db->buf + out, db->len - out, + out += snprintf(buf + out, len - out, "Total: %ld, Longest: %ld\n", total, longest); return out; } @@ -523,15 +477,15 @@ static int debug_mle_print(struct dlm_ctxt *dlm, struct debug_buffer *db) static int debug_mle_open(struct inode *inode, struct file *file) { struct dlm_ctxt *dlm = inode->i_private; - struct debug_buffer *db; + char *buf = NULL; - db = debug_buffer_allocate(); - if (!db) + buf = (char *) get_zeroed_page(GFP_NOFS); + if (!buf) goto bail; - db->len = debug_mle_print(dlm, db); + i_size_write(inode, debug_mle_print(dlm, buf, PAGE_SIZE - 1)); - file->private_data = db; + file->private_data = buf; return 0; bail: @@ -540,9 +494,9 @@ bail: static const struct file_operations debug_mle_fops = { .open = debug_mle_open, - .release = debug_buffer_release, - .read = debug_buffer_read, - .llseek = debug_buffer_llseek, + .release = debug_release, + .read = debug_read, + .llseek = generic_file_llseek, }; /* end - debug mle funcs */ @@ -757,7 +711,7 @@ static const struct file_operations debug_lockres_fops = { /* end - debug lockres funcs */ /* begin - debug state funcs */ -static int debug_state_print(struct dlm_ctxt *dlm, struct debug_buffer *db) +static int debug_state_print(struct dlm_ctxt *dlm, char *buf, int len) { int out = 0; struct dlm_reco_node_data *node; @@ -781,35 +735,35 @@ static int debug_state_print(struct dlm_ctxt *dlm, struct debug_buffer *db) } /* Domain: xxxxxxxxxx Key: 0xdfbac769 */ - out += snprintf(db->buf + out, db->len - out, + out += snprintf(buf + out, len - out, "Domain: %s Key: 0x%08x Protocol: %d.%d\n", dlm->name, dlm->key, dlm->dlm_locking_proto.pv_major, dlm->dlm_locking_proto.pv_minor); /* Thread Pid: xxx Node: xxx State: xxxxx */ - out += snprintf(db->buf + out, db->len - out, + out += snprintf(buf + out, len - out, "Thread Pid: %d Node: %d State: %s\n", dlm->dlm_thread_task->pid, dlm->node_num, state); /* Number of Joins: xxx Joining Node: xxx */ - out += snprintf(db->buf + out, db->len - out, + out += snprintf(buf + out, len - out, "Number of Joins: %d Joining Node: %d\n", dlm->num_joins, dlm->joining_node); /* Domain Map: xx xx xx */ - out += snprintf(db->buf + out, db->len - out, "Domain Map: "); + out += snprintf(buf + out, len - out, "Domain Map: "); out += stringify_nodemap(dlm->domain_map, O2NM_MAX_NODES, - db->buf + out, db->len - out); - out += snprintf(db->buf + out, db->len - out, "\n"); + buf + out, len - out); + out += snprintf(buf + out, len - out, "\n"); /* Live Map: xx xx xx */ - out += snprintf(db->buf + out, db->len - out, "Live Map: "); + out += snprintf(buf + out, len - out, "Live Map: "); out += stringify_nodemap(dlm->live_nodes_map, O2NM_MAX_NODES, - db->buf + out, db->len - out); - out += snprintf(db->buf + out, db->len - out, "\n"); + buf + out, len - out); + out += snprintf(buf + out, len - out, "\n"); /* Lock Resources: xxx (xxx) */ - out += snprintf(db->buf + out, db->len - out, + out += snprintf(buf + out, len - out, "Lock Resources: %d (%d)\n", atomic_read(&dlm->res_cur_count), atomic_read(&dlm->res_tot_count)); @@ -821,29 +775,29 @@ static int debug_state_print(struct dlm_ctxt *dlm, struct debug_buffer *db) cur_mles += atomic_read(&dlm->mle_cur_count[i]); /* MLEs: xxx (xxx) */ - out += snprintf(db->buf + out, db->len - out, + out += snprintf(buf + out, len - out, "MLEs: %d (%d)\n", cur_mles, tot_mles); /* Blocking: xxx (xxx) */ - out += snprintf(db->buf + out, db->len - out, + out += snprintf(buf + out, len - out, " Blocking: %d (%d)\n", atomic_read(&dlm->mle_cur_count[DLM_MLE_BLOCK]), atomic_read(&dlm->mle_tot_count[DLM_MLE_BLOCK])); /* Mastery: xxx (xxx) */ - out += snprintf(db->buf + out, db->len - out, + out += snprintf(buf + out, len - out, " Mastery: %d (%d)\n", atomic_read(&dlm->mle_cur_count[DLM_MLE_MASTER]), atomic_read(&dlm->mle_tot_count[DLM_MLE_MASTER])); /* Migration: xxx (xxx) */ - out += snprintf(db->buf + out, db->len - out, + out += snprintf(buf + out, len - out, " Migration: %d (%d)\n", atomic_read(&dlm->mle_cur_count[DLM_MLE_MIGRATION]), atomic_read(&dlm->mle_tot_count[DLM_MLE_MIGRATION])); /* Lists: Dirty=Empty Purge=InUse PendingASTs=Empty ... */ - out += snprintf(db->buf + out, db->len - out, + out += snprintf(buf + out, len - out, "Lists: Dirty=%s Purge=%s PendingASTs=%s " "PendingBASTs=%s\n", (list_empty(&dlm->dirty_list) ? "Empty" : "InUse"), @@ -852,12 +806,12 @@ static int debug_state_print(struct dlm_ctxt *dlm, struct debug_buffer *db) (list_empty(&dlm->pending_basts) ? "Empty" : "InUse")); /* Purge Count: xxx Refs: xxx */ - out += snprintf(db->buf + out, db->len - out, + out += snprintf(buf + out, len - out, "Purge Count: %d Refs: %d\n", dlm->purge_count, atomic_read(&dlm->dlm_refs.refcount)); /* Dead Node: xxx */ - out += snprintf(db->buf + out, db->len - out, + out += snprintf(buf + out, len - out, "Dead Node: %d\n", dlm->reco.dead_node); /* What about DLM_RECO_STATE_FINALIZE? */ @@ -867,19 +821,19 @@ static int debug_state_print(struct dlm_ctxt *dlm, struct debug_buffer *db) state = "INACTIVE"; /* Recovery Pid: xxxx Master: xxx State: xxxx */ - out += snprintf(db->buf + out, db->len - out, + out += snprintf(buf + out, len - out, "Recovery Pid: %d Master: %d State: %s\n", dlm->dlm_reco_thread_task->pid, dlm->reco.new_master, state); /* Recovery Map: xx xx */ - out += snprintf(db->buf + out, db->len - out, "Recovery Map: "); + out += snprintf(buf + out, len - out, "Recovery Map: "); out += stringify_nodemap(dlm->recovery_map, O2NM_MAX_NODES, - db->buf + out, db->len - out); - out += snprintf(db->buf + out, db->len - out, "\n"); + buf + out, len - out); + out += snprintf(buf + out, len - out, "\n"); /* Recovery Node State: */ - out += snprintf(db->buf + out, db->len - out, "Recovery Node State:\n"); + out += snprintf(buf + out, len - out, "Recovery Node State:\n"); list_for_each_entry(node, &dlm->reco.node_data, list) { switch (node->state) { case DLM_RECO_NODE_DATA_INIT: @@ -907,7 +861,7 @@ static int debug_state_print(struct dlm_ctxt *dlm, struct debug_buffer *db) state = "BAD"; break; } - out += snprintf(db->buf + out, db->len - out, "\t%u - %s\n", + out += snprintf(buf + out, len - out, "\t%u - %s\n", node->node_num, state); } @@ -919,15 +873,15 @@ static int debug_state_print(struct dlm_ctxt *dlm, struct debug_buffer *db) static int debug_state_open(struct inode *inode, struct file *file) { struct dlm_ctxt *dlm = inode->i_private; - struct debug_buffer *db = NULL; + char *buf = NULL; - db = debug_buffer_allocate(); - if (!db) + buf = (char *) get_zeroed_page(GFP_NOFS); + if (!buf) goto bail; - db->len = debug_state_print(dlm, db); + i_size_write(inode, debug_state_print(dlm, buf, PAGE_SIZE - 1)); - file->private_data = db; + file->private_data = buf; return 0; bail: @@ -936,9 +890,9 @@ bail: static const struct file_operations debug_state_fops = { .open = debug_state_open, - .release = debug_buffer_release, - .read = debug_buffer_read, - .llseek = debug_buffer_llseek, + .release = debug_release, + .read = debug_read, + .llseek = generic_file_llseek, }; /* end - debug state funcs */ diff --git a/fs/ocfs2/dlm/dlmdebug.h b/fs/ocfs2/dlm/dlmdebug.h index 8c686d22f9c7..1f27c4812d1a 100644 --- a/fs/ocfs2/dlm/dlmdebug.h +++ b/fs/ocfs2/dlm/dlmdebug.h @@ -37,11 +37,6 @@ struct dlm_debug_ctxt { struct dentry *debug_purgelist_dentry; }; -struct debug_buffer { - int len; - char *buf; -}; - struct debug_lockres { int dl_len; char *dl_buf; -- cgit v1.2.2 From 37096a7927decb0b1d3c2514b8adb4583a834112 Mon Sep 17 00:00:00 2001 From: Sunil Mushran Date: Mon, 20 Dec 2010 16:35:00 -0800 Subject: ocfs2/dlm: Minor cleanup Patch makes use of task_pid_nr(). Also removes the null check before calling debugfs_remove(). Signed-off-by: Sunil Mushran Signed-off-by: Joel Becker --- fs/ocfs2/cluster/netdebug.c | 20 +++++++------------- fs/ocfs2/dlm/dlmdebug.c | 22 ++++++++-------------- 2 files changed, 15 insertions(+), 27 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/cluster/netdebug.c b/fs/ocfs2/cluster/netdebug.c index a3f150e52b02..0edf836b42d5 100644 --- a/fs/ocfs2/cluster/netdebug.c +++ b/fs/ocfs2/cluster/netdebug.c @@ -141,7 +141,7 @@ static int nst_seq_show(struct seq_file *seq, void *v) " sock acquiry: %lu.%ld\n" " send start: %lu.%ld\n" " wait start: %lu.%ld\n", - nst, (unsigned long)nst->st_task->pid, + nst, (unsigned long)task_pid_nr(nst->st_task), (unsigned long)nst->st_task->tgid, nst->st_task->comm, nst->st_node, nst->st_sc, nst->st_id, nst->st_msg_type, @@ -421,23 +421,17 @@ int o2net_debugfs_init(void) return 0; bail: - if (sc_dentry) - debugfs_remove(sc_dentry); - if (nst_dentry) - debugfs_remove(nst_dentry); - if (o2net_dentry) - debugfs_remove(o2net_dentry); + debugfs_remove(sc_dentry); + debugfs_remove(nst_dentry); + debugfs_remove(o2net_dentry); return -ENOMEM; } void o2net_debugfs_exit(void) { - if (sc_dentry) - debugfs_remove(sc_dentry); - if (nst_dentry) - debugfs_remove(nst_dentry); - if (o2net_dentry) - debugfs_remove(o2net_dentry); + debugfs_remove(sc_dentry); + debugfs_remove(nst_dentry); + debugfs_remove(o2net_dentry); } #endif /* CONFIG_DEBUG_FS */ diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c index 77199ca4409a..04a32be0aeb9 100644 --- a/fs/ocfs2/dlm/dlmdebug.c +++ b/fs/ocfs2/dlm/dlmdebug.c @@ -743,7 +743,7 @@ static int debug_state_print(struct dlm_ctxt *dlm, char *buf, int len) /* Thread Pid: xxx Node: xxx State: xxxxx */ out += snprintf(buf + out, len - out, "Thread Pid: %d Node: %d State: %s\n", - dlm->dlm_thread_task->pid, dlm->node_num, state); + task_pid_nr(dlm->dlm_thread_task), dlm->node_num, state); /* Number of Joins: xxx Joining Node: xxx */ out += snprintf(buf + out, len - out, @@ -823,7 +823,7 @@ static int debug_state_print(struct dlm_ctxt *dlm, char *buf, int len) /* Recovery Pid: xxxx Master: xxx State: xxxx */ out += snprintf(buf + out, len - out, "Recovery Pid: %d Master: %d State: %s\n", - dlm->dlm_reco_thread_task->pid, + task_pid_nr(dlm->dlm_reco_thread_task), dlm->reco.new_master, state); /* Recovery Map: xx xx */ @@ -956,14 +956,10 @@ void dlm_debug_shutdown(struct dlm_ctxt *dlm) struct dlm_debug_ctxt *dc = dlm->dlm_debug_ctxt; if (dc) { - if (dc->debug_purgelist_dentry) - debugfs_remove(dc->debug_purgelist_dentry); - if (dc->debug_mle_dentry) - debugfs_remove(dc->debug_mle_dentry); - if (dc->debug_lockres_dentry) - debugfs_remove(dc->debug_lockres_dentry); - if (dc->debug_state_dentry) - debugfs_remove(dc->debug_state_dentry); + debugfs_remove(dc->debug_purgelist_dentry); + debugfs_remove(dc->debug_mle_dentry); + debugfs_remove(dc->debug_lockres_dentry); + debugfs_remove(dc->debug_state_dentry); dlm_debug_put(dc); } } @@ -994,8 +990,7 @@ bail: void dlm_destroy_debugfs_subroot(struct dlm_ctxt *dlm) { - if (dlm->dlm_debugfs_subroot) - debugfs_remove(dlm->dlm_debugfs_subroot); + debugfs_remove(dlm->dlm_debugfs_subroot); } /* debugfs root */ @@ -1011,7 +1006,6 @@ int dlm_create_debugfs_root(void) void dlm_destroy_debugfs_root(void) { - if (dlm_debugfs_root) - debugfs_remove(dlm_debugfs_root); + debugfs_remove(dlm_debugfs_root); } #endif /* CONFIG_DEBUG_FS */ -- cgit v1.2.2 From 079ffb743c622fe2189b75614427c56e8391498b Mon Sep 17 00:00:00 2001 From: Sunil Mushran Date: Mon, 20 Dec 2010 16:35:01 -0800 Subject: ocfs2/dlm: Hard code the values for enums In o2dlm, the enumerated message values are part of the protocol. The patch hard codes each value so as to reduce the chance of an editing error causing a protocol mismatch. Signed-off-by: Sunil Mushran Signed-off-by: Joel Becker --- fs/ocfs2/dlm/dlmcommon.h | 86 ++++++++++++++++++++++++------------------------ 1 file changed, 43 insertions(+), 43 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h index b36d0bf77a5a..4bdf7baee344 100644 --- a/fs/ocfs2/dlm/dlmcommon.h +++ b/fs/ocfs2/dlm/dlmcommon.h @@ -50,10 +50,10 @@ #define dlm_lockid_hash(_n, _l) full_name_hash(_n, _l) enum dlm_mle_type { - DLM_MLE_BLOCK, - DLM_MLE_MASTER, - DLM_MLE_MIGRATION, - DLM_MLE_NUM_TYPES + DLM_MLE_BLOCK = 0, + DLM_MLE_MASTER = 1, + DLM_MLE_MIGRATION = 2, + DLM_MLE_NUM_TYPES = 3, }; struct dlm_master_list_entry { @@ -82,8 +82,8 @@ struct dlm_master_list_entry { enum dlm_ast_type { DLM_AST = 0, - DLM_BAST, - DLM_ASTUNLOCK + DLM_BAST = 1, + DLM_ASTUNLOCK = 2, }; @@ -119,9 +119,9 @@ struct dlm_recovery_ctxt enum dlm_ctxt_state { DLM_CTXT_NEW = 0, - DLM_CTXT_JOINED, - DLM_CTXT_IN_SHUTDOWN, - DLM_CTXT_LEAVING, + DLM_CTXT_JOINED = 1, + DLM_CTXT_IN_SHUTDOWN = 2, + DLM_CTXT_LEAVING = 3, }; struct dlm_ctxt @@ -388,8 +388,8 @@ struct dlm_lock enum dlm_lockres_list { DLM_GRANTED_LIST = 0, - DLM_CONVERTING_LIST, - DLM_BLOCKED_LIST + DLM_CONVERTING_LIST = 1, + DLM_BLOCKED_LIST = 2, }; static inline int dlm_lvb_is_empty(char *lvb) @@ -427,27 +427,27 @@ struct dlm_node_iter enum { - DLM_MASTER_REQUEST_MSG = 500, - DLM_UNUSED_MSG1, /* 501 */ - DLM_ASSERT_MASTER_MSG, /* 502 */ - DLM_CREATE_LOCK_MSG, /* 503 */ - DLM_CONVERT_LOCK_MSG, /* 504 */ - DLM_PROXY_AST_MSG, /* 505 */ - DLM_UNLOCK_LOCK_MSG, /* 506 */ - DLM_DEREF_LOCKRES_MSG, /* 507 */ - DLM_MIGRATE_REQUEST_MSG, /* 508 */ - DLM_MIG_LOCKRES_MSG, /* 509 */ - DLM_QUERY_JOIN_MSG, /* 510 */ - DLM_ASSERT_JOINED_MSG, /* 511 */ - DLM_CANCEL_JOIN_MSG, /* 512 */ - DLM_EXIT_DOMAIN_MSG, /* 513 */ - DLM_MASTER_REQUERY_MSG, /* 514 */ - DLM_LOCK_REQUEST_MSG, /* 515 */ - DLM_RECO_DATA_DONE_MSG, /* 516 */ - DLM_BEGIN_RECO_MSG, /* 517 */ - DLM_FINALIZE_RECO_MSG, /* 518 */ - DLM_QUERY_REGION, /* 519 */ - DLM_QUERY_NODEINFO, /* 520 */ + DLM_MASTER_REQUEST_MSG = 500, + DLM_UNUSED_MSG1 = 501, + DLM_ASSERT_MASTER_MSG = 502, + DLM_CREATE_LOCK_MSG = 503, + DLM_CONVERT_LOCK_MSG = 504, + DLM_PROXY_AST_MSG = 505, + DLM_UNLOCK_LOCK_MSG = 506, + DLM_DEREF_LOCKRES_MSG = 507, + DLM_MIGRATE_REQUEST_MSG = 508, + DLM_MIG_LOCKRES_MSG = 509, + DLM_QUERY_JOIN_MSG = 510, + DLM_ASSERT_JOINED_MSG = 511, + DLM_CANCEL_JOIN_MSG = 512, + DLM_EXIT_DOMAIN_MSG = 513, + DLM_MASTER_REQUERY_MSG = 514, + DLM_LOCK_REQUEST_MSG = 515, + DLM_RECO_DATA_DONE_MSG = 516, + DLM_BEGIN_RECO_MSG = 517, + DLM_FINALIZE_RECO_MSG = 518, + DLM_QUERY_REGION = 519, + DLM_QUERY_NODEINFO = 520, }; struct dlm_reco_node_data @@ -460,19 +460,19 @@ struct dlm_reco_node_data enum { DLM_RECO_NODE_DATA_DEAD = -1, DLM_RECO_NODE_DATA_INIT = 0, - DLM_RECO_NODE_DATA_REQUESTING, - DLM_RECO_NODE_DATA_REQUESTED, - DLM_RECO_NODE_DATA_RECEIVING, - DLM_RECO_NODE_DATA_DONE, - DLM_RECO_NODE_DATA_FINALIZE_SENT, + DLM_RECO_NODE_DATA_REQUESTING = 1, + DLM_RECO_NODE_DATA_REQUESTED = 2, + DLM_RECO_NODE_DATA_RECEIVING = 3, + DLM_RECO_NODE_DATA_DONE = 4, + DLM_RECO_NODE_DATA_FINALIZE_SENT = 5, }; enum { DLM_MASTER_RESP_NO = 0, - DLM_MASTER_RESP_YES, - DLM_MASTER_RESP_MAYBE, - DLM_MASTER_RESP_ERROR + DLM_MASTER_RESP_YES = 1, + DLM_MASTER_RESP_MAYBE = 2, + DLM_MASTER_RESP_ERROR = 3, }; @@ -649,9 +649,9 @@ struct dlm_proxy_ast #define DLM_MOD_KEY (0x666c6172) enum dlm_query_join_response_code { JOIN_DISALLOW = 0, - JOIN_OK, - JOIN_OK_NO_MAP, - JOIN_PROTOCOL_MISMATCH, + JOIN_OK = 1, + JOIN_OK_NO_MAP = 2, + JOIN_PROTOCOL_MISMATCH = 3, }; struct dlm_query_join_packet { -- cgit v1.2.2 From 8757241e32a295a2aa836e8f8b32912204d11fda Mon Sep 17 00:00:00 2001 From: Sunil Mushran Date: Wed, 22 Dec 2010 12:39:37 -0800 Subject: ocfs2: Add DEBUG_FS dependency Make OCFS2_FS_STATS depend on DEBUG_FS. Signed-off-by: Sunil Mushran Signed-off-by: Joel Becker --- fs/ocfs2/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ocfs2/Kconfig b/fs/ocfs2/Kconfig index 0d840669698e..ab152c00cd3a 100644 --- a/fs/ocfs2/Kconfig +++ b/fs/ocfs2/Kconfig @@ -51,7 +51,7 @@ config OCFS2_FS_USERSPACE_CLUSTER config OCFS2_FS_STATS bool "OCFS2 statistics" - depends on OCFS2_FS + depends on OCFS2_FS && DEBUG_FS default y help This option allows some fs statistics to be captured. Enabling -- cgit v1.2.2 From 3f9c14fab0a2e90af9995f261a123f59e0b41141 Mon Sep 17 00:00:00 2001 From: Sunil Mushran Date: Wed, 22 Dec 2010 12:39:38 -0800 Subject: ocfs2/cluster: Replace timeval with ktime in struct o2net_send_tracking Replace time trackers in struct o2net_send_tracking from struct timeval to union ktime. Signed-off-by: Sunil Mushran Signed-off-by: Joel Becker --- fs/ocfs2/cluster/netdebug.c | 22 +++++++++++++--------- fs/ocfs2/cluster/tcp.c | 6 +++--- fs/ocfs2/cluster/tcp_internal.h | 6 +++--- 3 files changed, 19 insertions(+), 15 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/cluster/netdebug.c b/fs/ocfs2/cluster/netdebug.c index 0edf836b42d5..2b986aa82299 100644 --- a/fs/ocfs2/cluster/netdebug.c +++ b/fs/ocfs2/cluster/netdebug.c @@ -123,10 +123,17 @@ static void *nst_seq_next(struct seq_file *seq, void *v, loff_t *pos) static int nst_seq_show(struct seq_file *seq, void *v) { struct o2net_send_tracking *nst, *dummy_nst = seq->private; + ktime_t now; + s64 sock, send, status; spin_lock(&o2net_debug_lock); nst = next_nst(dummy_nst); + now = ktime_get(); + sock = ktime_to_us(ktime_sub(now, nst->st_sock_time)); + send = ktime_to_us(ktime_sub(now, nst->st_send_time)); + status = ktime_to_us(ktime_sub(now, nst->st_status_time)); + if (nst != NULL) { /* get_task_comm isn't exported. oh well. */ seq_printf(seq, "%p:\n" @@ -138,20 +145,17 @@ static int nst_seq_show(struct seq_file *seq, void *v) " message id: %d\n" " message type: %u\n" " message key: 0x%08x\n" - " sock acquiry: %lu.%ld\n" - " send start: %lu.%ld\n" - " wait start: %lu.%ld\n", + " sock acquiry: %lld usecs ago\n" + " send start: %lld usecs ago\n" + " wait start: %lld usecs ago\n", nst, (unsigned long)task_pid_nr(nst->st_task), (unsigned long)nst->st_task->tgid, nst->st_task->comm, nst->st_node, nst->st_sc, nst->st_id, nst->st_msg_type, nst->st_msg_key, - nst->st_sock_time.tv_sec, - (long)nst->st_sock_time.tv_usec, - nst->st_send_time.tv_sec, - (long)nst->st_send_time.tv_usec, - nst->st_status_time.tv_sec, - (long)nst->st_status_time.tv_usec); + (long long)sock, + (long long)send, + (long long)status); } spin_unlock(&o2net_debug_lock); diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c index 92de96cd247d..49c1a95e352e 100644 --- a/fs/ocfs2/cluster/tcp.c +++ b/fs/ocfs2/cluster/tcp.c @@ -155,17 +155,17 @@ static void o2net_init_nst(struct o2net_send_tracking *nst, u32 msgtype, static void o2net_set_nst_sock_time(struct o2net_send_tracking *nst) { - do_gettimeofday(&nst->st_sock_time); + nst->st_sock_time = ktime_get(); } static void o2net_set_nst_send_time(struct o2net_send_tracking *nst) { - do_gettimeofday(&nst->st_send_time); + nst->st_send_time = ktime_get(); } static void o2net_set_nst_status_time(struct o2net_send_tracking *nst) { - do_gettimeofday(&nst->st_status_time); + nst->st_status_time = ktime_get(); } static void o2net_set_nst_sock_container(struct o2net_send_tracking *nst, diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h index 15fdbdf9eb4b..b613aaaf3e5c 100644 --- a/fs/ocfs2/cluster/tcp_internal.h +++ b/fs/ocfs2/cluster/tcp_internal.h @@ -220,9 +220,9 @@ struct o2net_send_tracking { u32 st_msg_type; u32 st_msg_key; u8 st_node; - struct timeval st_sock_time; - struct timeval st_send_time; - struct timeval st_status_time; + ktime_t st_sock_time; + ktime_t st_send_time; + ktime_t st_status_time; }; #else struct o2net_send_tracking { -- cgit v1.2.2 From ff1becbf85bf4d4d4652915b7ab27db949585f6b Mon Sep 17 00:00:00 2001 From: Sunil Mushran Date: Wed, 22 Dec 2010 12:39:39 -0800 Subject: ocfs2/cluster: Use ktime instead of timeval in struct o2net_sock_container Replace time trackers in struct o2net_sock_container from struct timeval to union ktime. Signed-off-by: Sunil Mushran Signed-off-by: Joel Becker --- fs/ocfs2/cluster/netdebug.c | 28 ++++++------- fs/ocfs2/cluster/tcp.c | 91 ++++++++++++++++++++++++----------------- fs/ocfs2/cluster/tcp_internal.h | 19 +++++---- 3 files changed, 76 insertions(+), 62 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/cluster/netdebug.c b/fs/ocfs2/cluster/netdebug.c index 2b986aa82299..536a93d13a06 100644 --- a/fs/ocfs2/cluster/netdebug.c +++ b/fs/ocfs2/cluster/netdebug.c @@ -280,8 +280,6 @@ static void *sc_seq_next(struct seq_file *seq, void *v, loff_t *pos) return sc; /* unused, just needs to be null when done */ } -#define TV_SEC_USEC(TV) TV.tv_sec, (long)TV.tv_usec - static int sc_seq_show(struct seq_file *seq, void *v) { struct o2net_sock_container *sc, *dummy_sc = seq->private; @@ -313,13 +311,13 @@ static int sc_seq_show(struct seq_file *seq, void *v) " remote node: %s\n" " page off: %zu\n" " handshake ok: %u\n" - " timer: %lu.%ld\n" - " data ready: %lu.%ld\n" - " advance start: %lu.%ld\n" - " advance stop: %lu.%ld\n" - " func start: %lu.%ld\n" - " func stop: %lu.%ld\n" - " func key: %u\n" + " timer: %lld usecs\n" + " data ready: %lld usecs\n" + " advance start: %lld usecs\n" + " advance stop: %lld usecs\n" + " func start: %lld usecs\n" + " func stop: %lld usecs\n" + " func key: 0x%08x\n" " func type: %u\n", sc, atomic_read(&sc->sc_kref.refcount), @@ -328,12 +326,12 @@ static int sc_seq_show(struct seq_file *seq, void *v) sc->sc_node->nd_name, sc->sc_page_off, sc->sc_handshake_ok, - TV_SEC_USEC(sc->sc_tv_timer), - TV_SEC_USEC(sc->sc_tv_data_ready), - TV_SEC_USEC(sc->sc_tv_advance_start), - TV_SEC_USEC(sc->sc_tv_advance_stop), - TV_SEC_USEC(sc->sc_tv_func_start), - TV_SEC_USEC(sc->sc_tv_func_stop), + (long long)ktime_to_us(sc->sc_tv_timer), + (long long)ktime_to_us(sc->sc_tv_data_ready), + (long long)ktime_to_us(sc->sc_tv_advance_start), + (long long)ktime_to_us(sc->sc_tv_advance_stop), + (long long)ktime_to_us(sc->sc_tv_func_start), + (long long)ktime_to_us(sc->sc_tv_func_stop), sc->sc_msg_key, sc->sc_msg_type); } diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c index 49c1a95e352e..efd848334052 100644 --- a/fs/ocfs2/cluster/tcp.c +++ b/fs/ocfs2/cluster/tcp.c @@ -153,61 +153,75 @@ static void o2net_init_nst(struct o2net_send_tracking *nst, u32 msgtype, nst->st_node = node; } -static void o2net_set_nst_sock_time(struct o2net_send_tracking *nst) +static inline void o2net_set_nst_sock_time(struct o2net_send_tracking *nst) { nst->st_sock_time = ktime_get(); } -static void o2net_set_nst_send_time(struct o2net_send_tracking *nst) +static inline void o2net_set_nst_send_time(struct o2net_send_tracking *nst) { nst->st_send_time = ktime_get(); } -static void o2net_set_nst_status_time(struct o2net_send_tracking *nst) +static inline void o2net_set_nst_status_time(struct o2net_send_tracking *nst) { nst->st_status_time = ktime_get(); } -static void o2net_set_nst_sock_container(struct o2net_send_tracking *nst, - struct o2net_sock_container *sc) +static inline void o2net_set_nst_sock_container(struct o2net_send_tracking *nst, + struct o2net_sock_container *sc) { nst->st_sc = sc; } -static void o2net_set_nst_msg_id(struct o2net_send_tracking *nst, u32 msg_id) +static inline void o2net_set_nst_msg_id(struct o2net_send_tracking *nst, + u32 msg_id) { nst->st_id = msg_id; } -#else /* CONFIG_DEBUG_FS */ - -static inline void o2net_init_nst(struct o2net_send_tracking *nst, u32 msgtype, - u32 msgkey, struct task_struct *task, u8 node) +static inline void o2net_set_sock_timer(struct o2net_sock_container *sc) { + sc->sc_tv_timer = ktime_get(); } -static inline void o2net_set_nst_sock_time(struct o2net_send_tracking *nst) +static inline void o2net_set_data_ready_time(struct o2net_sock_container *sc) { + sc->sc_tv_data_ready = ktime_get(); } -static inline void o2net_set_nst_send_time(struct o2net_send_tracking *nst) +static inline void o2net_set_advance_start_time(struct o2net_sock_container *sc) { + sc->sc_tv_advance_start = ktime_get(); } -static inline void o2net_set_nst_status_time(struct o2net_send_tracking *nst) +static inline void o2net_set_advance_stop_time(struct o2net_sock_container *sc) { + sc->sc_tv_advance_stop = ktime_get(); } -static inline void o2net_set_nst_sock_container(struct o2net_send_tracking *nst, - struct o2net_sock_container *sc) +static inline void o2net_set_func_start_time(struct o2net_sock_container *sc) { + sc->sc_tv_func_start = ktime_get(); } -static inline void o2net_set_nst_msg_id(struct o2net_send_tracking *nst, - u32 msg_id) +static inline void o2net_set_func_stop_time(struct o2net_sock_container *sc) { + sc->sc_tv_func_stop = ktime_get(); } - +#else /* CONFIG_DEBUG_FS */ +# define o2net_init_nst(a, b, c, d, e) +# define o2net_set_nst_sock_time(a) +# define o2net_set_nst_send_time(a) +# define o2net_set_nst_status_time(a) +# define o2net_set_nst_sock_container(a, b) +# define o2net_set_nst_msg_id(a, b) +# define o2net_set_sock_timer(a) +# define o2net_set_data_ready_time(a) +# define o2net_set_advance_start_time(a) +# define o2net_set_advance_stop_time(a) +# define o2net_set_func_start_time(a) +# define o2net_set_func_stop_time(a) #endif /* CONFIG_DEBUG_FS */ static inline int o2net_reconnect_delay(void) @@ -555,7 +569,7 @@ static void o2net_data_ready(struct sock *sk, int bytes) if (sk->sk_user_data) { struct o2net_sock_container *sc = sk->sk_user_data; sclog(sc, "data_ready hit\n"); - do_gettimeofday(&sc->sc_tv_data_ready); + o2net_set_data_ready_time(sc); o2net_sc_queue_work(sc, &sc->sc_rx_work); ready = sc->sc_data_ready; } else { @@ -1192,13 +1206,13 @@ static int o2net_process_message(struct o2net_sock_container *sc, if (syserr != O2NET_ERR_NONE) goto out_respond; - do_gettimeofday(&sc->sc_tv_func_start); + o2net_set_func_start_time(sc); sc->sc_msg_key = be32_to_cpu(hdr->key); sc->sc_msg_type = be16_to_cpu(hdr->msg_type); handler_status = (nmh->nh_func)(hdr, sizeof(struct o2net_msg) + be16_to_cpu(hdr->data_len), nmh->nh_func_data, &ret_data); - do_gettimeofday(&sc->sc_tv_func_stop); + o2net_set_func_stop_time(sc); out_respond: /* this destroys the hdr, so don't use it after this */ @@ -1309,7 +1323,7 @@ static int o2net_advance_rx(struct o2net_sock_container *sc) size_t datalen; sclog(sc, "receiving\n"); - do_gettimeofday(&sc->sc_tv_advance_start); + o2net_set_advance_start_time(sc); if (unlikely(sc->sc_handshake_ok == 0)) { if(sc->sc_page_off < sizeof(struct o2net_handshake)) { @@ -1384,7 +1398,7 @@ static int o2net_advance_rx(struct o2net_sock_container *sc) out: sclog(sc, "ret = %d\n", ret); - do_gettimeofday(&sc->sc_tv_advance_stop); + o2net_set_advance_stop_time(sc); return ret; } @@ -1484,27 +1498,28 @@ static void o2net_idle_timer(unsigned long data) { struct o2net_sock_container *sc = (struct o2net_sock_container *)data; struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num); - struct timeval now; - do_gettimeofday(&now); +#ifdef CONFIG_DEBUG_FS + ktime_t now = ktime_get(); +#endif printk(KERN_NOTICE "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u " "seconds, shutting it down.\n", SC_NODEF_ARGS(sc), o2net_idle_timeout() / 1000, o2net_idle_timeout() % 1000); - mlog(ML_NOTICE, "here are some times that might help debug the " - "situation: (tmr %ld.%ld now %ld.%ld dr %ld.%ld adv " - "%ld.%ld:%ld.%ld func (%08x:%u) %ld.%ld:%ld.%ld)\n", - sc->sc_tv_timer.tv_sec, (long) sc->sc_tv_timer.tv_usec, - now.tv_sec, (long) now.tv_usec, - sc->sc_tv_data_ready.tv_sec, (long) sc->sc_tv_data_ready.tv_usec, - sc->sc_tv_advance_start.tv_sec, - (long) sc->sc_tv_advance_start.tv_usec, - sc->sc_tv_advance_stop.tv_sec, - (long) sc->sc_tv_advance_stop.tv_usec, + +#ifdef CONFIG_DEBUG_FS + mlog(ML_NOTICE, "Here are some times that might help debug the " + "situation: (Timer: %lld, Now %lld, DataReady %lld, Advance %lld-%lld, " + "Key 0x%08x, Func %u, FuncTime %lld-%lld)\n", + (long long)ktime_to_us(sc->sc_tv_timer), (long long)ktime_to_us(now), + (long long)ktime_to_us(sc->sc_tv_data_ready), + (long long)ktime_to_us(sc->sc_tv_advance_start), + (long long)ktime_to_us(sc->sc_tv_advance_stop), sc->sc_msg_key, sc->sc_msg_type, - sc->sc_tv_func_start.tv_sec, (long) sc->sc_tv_func_start.tv_usec, - sc->sc_tv_func_stop.tv_sec, (long) sc->sc_tv_func_stop.tv_usec); + (long long)ktime_to_us(sc->sc_tv_func_start), + (long long)ktime_to_us(sc->sc_tv_func_stop)); +#endif /* * Initialize the nn_timeout so that the next connection attempt @@ -1520,7 +1535,7 @@ static void o2net_sc_reset_idle_timer(struct o2net_sock_container *sc) o2net_sc_cancel_delayed_work(sc, &sc->sc_keepalive_work); o2net_sc_queue_delayed_work(sc, &sc->sc_keepalive_work, msecs_to_jiffies(o2net_keepalive_delay())); - do_gettimeofday(&sc->sc_tv_timer); + o2net_set_sock_timer(sc); mod_timer(&sc->sc_idle_timeout, jiffies + msecs_to_jiffies(o2net_idle_timeout())); } diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h index b613aaaf3e5c..f81576333911 100644 --- a/fs/ocfs2/cluster/tcp_internal.h +++ b/fs/ocfs2/cluster/tcp_internal.h @@ -166,18 +166,19 @@ struct o2net_sock_container { /* original handlers for the sockets */ void (*sc_state_change)(struct sock *sk); void (*sc_data_ready)(struct sock *sk, int bytes); -#ifdef CONFIG_DEBUG_FS - struct list_head sc_net_debug_item; -#endif - struct timeval sc_tv_timer; - struct timeval sc_tv_data_ready; - struct timeval sc_tv_advance_start; - struct timeval sc_tv_advance_stop; - struct timeval sc_tv_func_start; - struct timeval sc_tv_func_stop; + u32 sc_msg_key; u16 sc_msg_type; +#ifdef CONFIG_DEBUG_FS + struct list_head sc_net_debug_item; + ktime_t sc_tv_timer; + ktime_t sc_tv_data_ready; + ktime_t sc_tv_advance_start; + ktime_t sc_tv_advance_stop; + ktime_t sc_tv_func_start; + ktime_t sc_tv_func_stop; +#endif struct mutex sc_send_lock; }; -- cgit v1.2.2 From 3c193b3807e933cf2a16d55a38debbe549195847 Mon Sep 17 00:00:00 2001 From: Sunil Mushran Date: Wed, 22 Dec 2010 12:39:40 -0800 Subject: ocfs2/cluster: Track send message timing stats for each socket Tracks total send and status times for all messages sent on a socket. Signed-off-by: Sunil Mushran Signed-off-by: Joel Becker --- fs/ocfs2/cluster/tcp.c | 24 ++++++++++++++++++++++++ fs/ocfs2/cluster/tcp_internal.h | 6 ++++++ 2 files changed, 30 insertions(+) (limited to 'fs') diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c index efd848334052..4d61e19d6146 100644 --- a/fs/ocfs2/cluster/tcp.c +++ b/fs/ocfs2/cluster/tcp.c @@ -224,6 +224,28 @@ static inline void o2net_set_func_stop_time(struct o2net_sock_container *sc) # define o2net_set_func_stop_time(a) #endif /* CONFIG_DEBUG_FS */ +#ifdef CONFIG_OCFS2_FS_STATS +static void o2net_update_send_stats(struct o2net_send_tracking *nst, + struct o2net_sock_container *sc) +{ + sc->sc_tv_status_total = ktime_add(sc->sc_tv_status_total, + ktime_sub(ktime_get(), + nst->st_status_time)); + sc->sc_tv_send_total = ktime_add(sc->sc_tv_send_total, + ktime_sub(nst->st_status_time, + nst->st_send_time)); + sc->sc_tv_acquiry_total = ktime_add(sc->sc_tv_acquiry_total, + ktime_sub(nst->st_send_time, + nst->st_sock_time)); + sc->sc_send_count++; +} + +#else + +# define o2net_update_send_stats(a, b) + +#endif /* CONFIG_OCFS2_FS_STATS */ + static inline int o2net_reconnect_delay(void) { return o2nm_single_cluster->cl_reconnect_delay_ms; @@ -1093,6 +1115,8 @@ int o2net_send_message_vec(u32 msg_type, u32 key, struct kvec *caller_vec, o2net_set_nst_status_time(&nst); wait_event(nsw.ns_wq, o2net_nsw_completed(nn, &nsw)); + o2net_update_send_stats(&nst, sc); + /* Note that we avoid overwriting the callers status return * variable if a system error was reported on the other * side. Callers beware. */ diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h index f81576333911..640c6fcef720 100644 --- a/fs/ocfs2/cluster/tcp_internal.h +++ b/fs/ocfs2/cluster/tcp_internal.h @@ -178,6 +178,12 @@ struct o2net_sock_container { ktime_t sc_tv_advance_stop; ktime_t sc_tv_func_start; ktime_t sc_tv_func_stop; +#endif +#ifdef CONFIG_OCFS2_FS_STATS + ktime_t sc_tv_acquiry_total; + ktime_t sc_tv_send_total; + ktime_t sc_tv_status_total; + u32 sc_send_count; #endif struct mutex sc_send_lock; }; -- cgit v1.2.2 From e453039f8bf44abf82f3ecfb34177e0cb04bce12 Mon Sep 17 00:00:00 2001 From: Sunil Mushran Date: Wed, 22 Dec 2010 12:39:41 -0800 Subject: ocfs2/cluster: Track process message timing stats for each socket Tracks total time taken to process messages received on a socket. Signed-off-by: Sunil Mushran Signed-off-by: Joel Becker --- fs/ocfs2/cluster/tcp.c | 15 +++++++++++++++ fs/ocfs2/cluster/tcp_internal.h | 2 ++ 2 files changed, 17 insertions(+) (limited to 'fs') diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c index 4d61e19d6146..bc2309554d0b 100644 --- a/fs/ocfs2/cluster/tcp.c +++ b/fs/ocfs2/cluster/tcp.c @@ -209,6 +209,11 @@ static inline void o2net_set_func_stop_time(struct o2net_sock_container *sc) { sc->sc_tv_func_stop = ktime_get(); } + +static ktime_t o2net_get_func_run_time(struct o2net_sock_container *sc) +{ + return ktime_sub(sc->sc_tv_func_stop, sc->sc_tv_func_start); +} #else /* CONFIG_DEBUG_FS */ # define o2net_init_nst(a, b, c, d, e) # define o2net_set_nst_sock_time(a) @@ -222,6 +227,7 @@ static inline void o2net_set_func_stop_time(struct o2net_sock_container *sc) # define o2net_set_advance_stop_time(a) # define o2net_set_func_start_time(a) # define o2net_set_func_stop_time(a) +# define o2net_get_func_run_time(a) (ktime_t)0 #endif /* CONFIG_DEBUG_FS */ #ifdef CONFIG_OCFS2_FS_STATS @@ -240,6 +246,13 @@ static void o2net_update_send_stats(struct o2net_send_tracking *nst, sc->sc_send_count++; } +static void o2net_update_recv_stats(struct o2net_sock_container *sc) +{ + sc->sc_tv_process_total = ktime_add(sc->sc_tv_process_total, + o2net_get_func_run_time(sc)); + sc->sc_recv_count++; +} + #else # define o2net_update_send_stats(a, b) @@ -1238,6 +1251,8 @@ static int o2net_process_message(struct o2net_sock_container *sc, nmh->nh_func_data, &ret_data); o2net_set_func_stop_time(sc); + o2net_update_recv_stats(sc); + out_respond: /* this destroys the hdr, so don't use it after this */ mutex_lock(&sc->sc_send_lock); diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h index 640c6fcef720..4cbcb65784a3 100644 --- a/fs/ocfs2/cluster/tcp_internal.h +++ b/fs/ocfs2/cluster/tcp_internal.h @@ -184,6 +184,8 @@ struct o2net_sock_container { ktime_t sc_tv_send_total; ktime_t sc_tv_status_total; u32 sc_send_count; + u32 sc_recv_count; + ktime_t sc_tv_process_total; #endif struct mutex sc_send_lock; }; -- cgit v1.2.2 From db02754c8a1205b24beac70562c45ca5d671151f Mon Sep 17 00:00:00 2001 From: Sunil Mushran Date: Wed, 22 Dec 2010 12:39:42 -0800 Subject: ocfs2/cluster: Show o2net timing statistics Adds debugfs dentry o2net/stats to show the o2net timing statistics. Signed-off-by: Sunil Mushran Signed-off-by: Joel Becker --- fs/ocfs2/cluster/netdebug.c | 211 ++++++++++++++++++++++++++++++++------------ 1 file changed, 157 insertions(+), 54 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/cluster/netdebug.c b/fs/ocfs2/cluster/netdebug.c index 536a93d13a06..61df89cedded 100644 --- a/fs/ocfs2/cluster/netdebug.c +++ b/fs/ocfs2/cluster/netdebug.c @@ -46,10 +46,15 @@ #define O2NET_DEBUG_DIR "o2net" #define SC_DEBUG_NAME "sock_containers" #define NST_DEBUG_NAME "send_tracking" +#define STATS_DEBUG_NAME "stats" + +#define SHOW_SOCK_CONTAINERS 0 +#define SHOW_SOCK_STATS 1 static struct dentry *o2net_dentry; static struct dentry *sc_dentry; static struct dentry *nst_dentry; +static struct dentry *stats_dentry; static DEFINE_SPINLOCK(o2net_debug_lock); @@ -232,6 +237,11 @@ void o2net_debug_del_sc(struct o2net_sock_container *sc) spin_unlock(&o2net_debug_lock); } +struct o2net_sock_debug { + int dbg_ctxt; + struct o2net_sock_container *dbg_sock; +}; + static struct o2net_sock_container *next_sc(struct o2net_sock_container *sc_start) { @@ -257,7 +267,8 @@ static struct o2net_sock_container static void *sc_seq_start(struct seq_file *seq, loff_t *pos) { - struct o2net_sock_container *sc, *dummy_sc = seq->private; + struct o2net_sock_debug *sd = seq->private; + struct o2net_sock_container *sc, *dummy_sc = sd->dbg_sock; spin_lock(&o2net_debug_lock); sc = next_sc(dummy_sc); @@ -268,7 +279,8 @@ static void *sc_seq_start(struct seq_file *seq, loff_t *pos) static void *sc_seq_next(struct seq_file *seq, void *v, loff_t *pos) { - struct o2net_sock_container *sc, *dummy_sc = seq->private; + struct o2net_sock_debug *sd = seq->private; + struct o2net_sock_container *sc, *dummy_sc = sd->dbg_sock; spin_lock(&o2net_debug_lock); sc = next_sc(dummy_sc); @@ -280,63 +292,107 @@ static void *sc_seq_next(struct seq_file *seq, void *v, loff_t *pos) return sc; /* unused, just needs to be null when done */ } -static int sc_seq_show(struct seq_file *seq, void *v) +#ifdef CONFIG_OCFS2_FS_STATS +# define sc_send_count(_s) ((_s)->sc_send_count) +# define sc_recv_count(_s) ((_s)->sc_recv_count) +# define sc_tv_acquiry_total_ns(_s) (ktime_to_ns((_s)->sc_tv_acquiry_total)) +# define sc_tv_send_total_ns(_s) (ktime_to_ns((_s)->sc_tv_send_total)) +# define sc_tv_status_total_ns(_s) (ktime_to_ns((_s)->sc_tv_status_total)) +# define sc_tv_process_total_ns(_s) (ktime_to_ns((_s)->sc_tv_process_total)) +#else +# define sc_send_count(_s) (0U) +# define sc_recv_count(_s) (0U) +# define sc_tv_acquiry_total_ns(_s) (0LL) +# define sc_tv_send_total_ns(_s) (0LL) +# define sc_tv_status_total_ns(_s) (0LL) +# define sc_tv_process_total_ns(_s) (0LL) +#endif + +/* So that debugfs.ocfs2 can determine which format is being used */ +#define O2NET_STATS_STR_VERSION 1 +static void sc_show_sock_stats(struct seq_file *seq, + struct o2net_sock_container *sc) { - struct o2net_sock_container *sc, *dummy_sc = seq->private; + if (!sc) + return; + + seq_printf(seq, "%d,%u,%lu,%lld,%lld,%lld,%lu,%lld\n", O2NET_STATS_STR_VERSION, + sc->sc_node->nd_num, (unsigned long)sc_send_count(sc), + (long long)sc_tv_acquiry_total_ns(sc), + (long long)sc_tv_send_total_ns(sc), + (long long)sc_tv_status_total_ns(sc), + (unsigned long)sc_recv_count(sc), + (long long)sc_tv_process_total_ns(sc)); +} - spin_lock(&o2net_debug_lock); - sc = next_sc(dummy_sc); +static void sc_show_sock_container(struct seq_file *seq, + struct o2net_sock_container *sc) +{ + struct inet_sock *inet = NULL; + __be32 saddr = 0, daddr = 0; + __be16 sport = 0, dport = 0; + + if (!sc) + return; + + if (sc->sc_sock) { + inet = inet_sk(sc->sc_sock->sk); + /* the stack's structs aren't sparse endian clean */ + saddr = (__force __be32)inet->inet_saddr; + daddr = (__force __be32)inet->inet_daddr; + sport = (__force __be16)inet->inet_sport; + dport = (__force __be16)inet->inet_dport; + } - if (sc != NULL) { - struct inet_sock *inet = NULL; + /* XXX sigh, inet-> doesn't have sparse annotation so any + * use of it here generates a warning with -Wbitwise */ + seq_printf(seq, "%p:\n" + " krefs: %d\n" + " sock: %pI4:%u -> " + "%pI4:%u\n" + " remote node: %s\n" + " page off: %zu\n" + " handshake ok: %u\n" + " timer: %lld usecs\n" + " data ready: %lld usecs\n" + " advance start: %lld usecs\n" + " advance stop: %lld usecs\n" + " func start: %lld usecs\n" + " func stop: %lld usecs\n" + " func key: 0x%08x\n" + " func type: %u\n", + sc, + atomic_read(&sc->sc_kref.refcount), + &saddr, inet ? ntohs(sport) : 0, + &daddr, inet ? ntohs(dport) : 0, + sc->sc_node->nd_name, + sc->sc_page_off, + sc->sc_handshake_ok, + (long long)ktime_to_us(sc->sc_tv_timer), + (long long)ktime_to_us(sc->sc_tv_data_ready), + (long long)ktime_to_us(sc->sc_tv_advance_start), + (long long)ktime_to_us(sc->sc_tv_advance_stop), + (long long)ktime_to_us(sc->sc_tv_func_start), + (long long)ktime_to_us(sc->sc_tv_func_stop), + sc->sc_msg_key, + sc->sc_msg_type); +} - __be32 saddr = 0, daddr = 0; - __be16 sport = 0, dport = 0; +static int sc_seq_show(struct seq_file *seq, void *v) +{ + struct o2net_sock_debug *sd = seq->private; + struct o2net_sock_container *sc, *dummy_sc = sd->dbg_sock; - if (sc->sc_sock) { - inet = inet_sk(sc->sc_sock->sk); - /* the stack's structs aren't sparse endian clean */ - saddr = (__force __be32)inet->inet_saddr; - daddr = (__force __be32)inet->inet_daddr; - sport = (__force __be16)inet->inet_sport; - dport = (__force __be16)inet->inet_dport; - } + spin_lock(&o2net_debug_lock); + sc = next_sc(dummy_sc); - /* XXX sigh, inet-> doesn't have sparse annotation so any - * use of it here generates a warning with -Wbitwise */ - seq_printf(seq, "%p:\n" - " krefs: %d\n" - " sock: %pI4:%u -> " - "%pI4:%u\n" - " remote node: %s\n" - " page off: %zu\n" - " handshake ok: %u\n" - " timer: %lld usecs\n" - " data ready: %lld usecs\n" - " advance start: %lld usecs\n" - " advance stop: %lld usecs\n" - " func start: %lld usecs\n" - " func stop: %lld usecs\n" - " func key: 0x%08x\n" - " func type: %u\n", - sc, - atomic_read(&sc->sc_kref.refcount), - &saddr, inet ? ntohs(sport) : 0, - &daddr, inet ? ntohs(dport) : 0, - sc->sc_node->nd_name, - sc->sc_page_off, - sc->sc_handshake_ok, - (long long)ktime_to_us(sc->sc_tv_timer), - (long long)ktime_to_us(sc->sc_tv_data_ready), - (long long)ktime_to_us(sc->sc_tv_advance_start), - (long long)ktime_to_us(sc->sc_tv_advance_stop), - (long long)ktime_to_us(sc->sc_tv_func_start), - (long long)ktime_to_us(sc->sc_tv_func_stop), - sc->sc_msg_key, - sc->sc_msg_type); + if (sc) { + if (sd->dbg_ctxt == SHOW_SOCK_CONTAINERS) + sc_show_sock_container(seq, sc); + else + sc_show_sock_stats(seq, sc); } - spin_unlock(&o2net_debug_lock); return 0; @@ -353,7 +409,7 @@ static const struct seq_operations sc_seq_ops = { .show = sc_seq_show, }; -static int sc_fop_open(struct inode *inode, struct file *file) +static int sc_common_open(struct file *file, struct o2net_sock_debug *sd) { struct o2net_sock_container *dummy_sc; struct seq_file *seq; @@ -371,7 +427,8 @@ static int sc_fop_open(struct inode *inode, struct file *file) goto out; seq = file->private_data; - seq->private = dummy_sc; + seq->private = sd; + sd->dbg_sock = dummy_sc; o2net_debug_add_sc(dummy_sc); dummy_sc = NULL; @@ -384,12 +441,48 @@ out: static int sc_fop_release(struct inode *inode, struct file *file) { struct seq_file *seq = file->private_data; - struct o2net_sock_container *dummy_sc = seq->private; + struct o2net_sock_debug *sd = seq->private; + struct o2net_sock_container *dummy_sc = sd->dbg_sock; o2net_debug_del_sc(dummy_sc); return seq_release_private(inode, file); } +static int stats_fop_open(struct inode *inode, struct file *file) +{ + struct o2net_sock_debug *sd; + + sd = kmalloc(sizeof(struct o2net_sock_debug), GFP_KERNEL); + if (sd == NULL) + return -ENOMEM; + + sd->dbg_ctxt = SHOW_SOCK_STATS; + sd->dbg_sock = NULL; + + return sc_common_open(file, sd); +} + +static const struct file_operations stats_seq_fops = { + .open = stats_fop_open, + .read = seq_read, + .llseek = seq_lseek, + .release = sc_fop_release, +}; + +static int sc_fop_open(struct inode *inode, struct file *file) +{ + struct o2net_sock_debug *sd; + + sd = kmalloc(sizeof(struct o2net_sock_debug), GFP_KERNEL); + if (sd == NULL) + return -ENOMEM; + + sd->dbg_ctxt = SHOW_SOCK_CONTAINERS; + sd->dbg_sock = NULL; + + return sc_common_open(file, sd); +} + static const struct file_operations sc_seq_fops = { .open = sc_fop_open, .read = seq_read, @@ -421,8 +514,17 @@ int o2net_debugfs_init(void) goto bail; } + stats_dentry = debugfs_create_file(STATS_DEBUG_NAME, S_IFREG|S_IRUSR, + o2net_dentry, NULL, + &stats_seq_fops); + if (!stats_dentry) { + mlog_errno(-ENOMEM); + goto bail; + } + return 0; bail: + debugfs_remove(stats_dentry); debugfs_remove(sc_dentry); debugfs_remove(nst_dentry); debugfs_remove(o2net_dentry); @@ -431,6 +533,7 @@ bail: void o2net_debugfs_exit(void) { + debugfs_remove(stats_dentry); debugfs_remove(sc_dentry); debugfs_remove(nst_dentry); debugfs_remove(o2net_dentry); -- cgit v1.2.2 From 22b6dee842c6341b49bc09cc5728eb2f8f2b3766 Mon Sep 17 00:00:00 2001 From: Mi Jinlong Date: Mon, 27 Dec 2010 14:29:57 +0800 Subject: nfsd4: fix oops on secinfo_no_name result encoding The secinfo_no_name code oopses on encoding with BUG: unable to handle kernel NULL pointer dereference at 00000044 IP: [] nfsd4_encode_secinfo+0x1c/0x1c1 [nfsd] We should implement a nfsd4_encode_secinfo_no_name() instead using nfsd4_encode_secinfo(). Signed-off-by: Mi Jinlong Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4xdr.c | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index b543b2410b54..437b4623cb02 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -2845,11 +2845,10 @@ nfsd4_encode_rename(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_ } static __be32 -nfsd4_encode_secinfo(struct nfsd4_compoundres *resp, __be32 nfserr, - struct nfsd4_secinfo *secinfo) +nfsd4_do_encode_secinfo(struct nfsd4_compoundres *resp, + __be32 nfserr,struct svc_export *exp) { int i = 0; - struct svc_export *exp = secinfo->si_exp; u32 nflavs; struct exp_flavor_info *flavs; struct exp_flavor_info def_flavs[2]; @@ -2911,6 +2910,20 @@ out: return nfserr; } +static __be32 +nfsd4_encode_secinfo(struct nfsd4_compoundres *resp, __be32 nfserr, + struct nfsd4_secinfo *secinfo) +{ + return nfsd4_do_encode_secinfo(resp, nfserr, secinfo->si_exp); +} + +static __be32 +nfsd4_encode_secinfo_no_name(struct nfsd4_compoundres *resp, __be32 nfserr, + struct nfsd4_secinfo_no_name *secinfo) +{ + return nfsd4_do_encode_secinfo(resp, nfserr, secinfo->sin_exp); +} + /* * The SETATTR encode routine is special -- it always encodes a bitmap, * regardless of the error status. @@ -3173,7 +3186,7 @@ static nfsd4_enc nfsd4_enc_ops[] = { [OP_LAYOUTCOMMIT] = (nfsd4_enc)nfsd4_encode_noop, [OP_LAYOUTGET] = (nfsd4_enc)nfsd4_encode_noop, [OP_LAYOUTRETURN] = (nfsd4_enc)nfsd4_encode_noop, - [OP_SECINFO_NO_NAME] = (nfsd4_enc)nfsd4_encode_secinfo, + [OP_SECINFO_NO_NAME] = (nfsd4_enc)nfsd4_encode_secinfo_no_name, [OP_SEQUENCE] = (nfsd4_enc)nfsd4_encode_sequence, [OP_SET_SSV] = (nfsd4_enc)nfsd4_encode_noop, [OP_TEST_STATEID] = (nfsd4_enc)nfsd4_encode_noop, -- cgit v1.2.2 From a6e8dc46ff0b7defbfa4f29a71aee263377ec573 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 3 Jan 2011 15:01:48 +0100 Subject: bio-integrity: mark kintegrityd_wq highpri and CPU intensive Work items processed by kintegrityd_wq won't block much, may burn a lot of CPU cycles and affect IO latency. Use alloc_workqueue() to mark it highpri and CPU intensive with max concurrency of 1. Signed-off-by: Tejun Heo Cc: Martin K. Petersen Signed-off-by: Jens Axboe --- fs/bio-integrity.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c index 4d0ff5ee27b8..e49cce234c65 100644 --- a/fs/bio-integrity.c +++ b/fs/bio-integrity.c @@ -782,7 +782,12 @@ void __init bio_integrity_init(void) { unsigned int i; - kintegrityd_wq = create_workqueue("kintegrityd"); + /* + * kintegrityd won't block much but may burn a lot of CPU cycles. + * Make it highpri CPU intensive wq with max concurrency of 1. + */ + kintegrityd_wq = alloc_workqueue("kintegrityd", WQ_MEM_RECLAIM | + WQ_HIGHPRI | WQ_CPU_INTENSIVE, 1); if (!kintegrityd_wq) panic("Failed to create kintegrityd\n"); -- cgit v1.2.2 From 878215feb8b2417c4700090b4335739858cf7b5a Mon Sep 17 00:00:00 2001 From: Jesper Juhl Date: Fri, 24 Dec 2010 22:22:37 +0000 Subject: NFS: Don't leak in nfs_proc_symlink() Hi, In fs/nfs/proc.c::nfs_proc_symlink() we will leak memory if either nfs_alloc_fhandle() or nfs_alloc_fattr() returns NULL but the other one doesn't. This patch ensures memory allocated by one when the other fails is always released (this is safe since nfs_free_fattr() and nfs_free_fhandle() both call kfree which deals gracefully with NULL pointers). Signed-off-by: Jesper Juhl Signed-off-by: Trond Myklebust --- fs/nfs/proc.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c index 00df60523aac..77d5e21c4ad6 100644 --- a/fs/nfs/proc.c +++ b/fs/nfs/proc.c @@ -458,7 +458,7 @@ nfs_proc_symlink(struct inode *dir, struct dentry *dentry, struct page *page, fattr = nfs_alloc_fattr(); status = -ENOMEM; if (fh == NULL || fattr == NULL) - goto out; + goto out_free; status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0); nfs_mark_for_revalidate(dir); @@ -471,6 +471,7 @@ nfs_proc_symlink(struct inode *dir, struct dentry *dentry, struct page *page, if (status == 0) status = nfs_instantiate(dentry, fh, fattr); +out_free: nfs_free_fattr(fattr); nfs_free_fhandle(fh); out: -- cgit v1.2.2 From 51f128ea1c9224c1e3cf6c6a1498431d97699668 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Sun, 2 Jan 2011 20:20:42 +0000 Subject: lockd: double unlock in next_host_state() We unlock again after we goto out. Signed-off-by: Dan Carpenter Signed-off-by: Trond Myklebust --- fs/lockd/host.c | 1 - 1 file changed, 1 deletion(-) (limited to 'fs') diff --git a/fs/lockd/host.c b/fs/lockd/host.c index c106d6a93e5d..5f1bcb2f06f3 100644 --- a/fs/lockd/host.c +++ b/fs/lockd/host.c @@ -532,7 +532,6 @@ static struct nlm_host *next_host_state(struct hlist_head *cache, host->h_state++; nlm_get_host(host); - mutex_unlock(&nlm_host_mutex); goto out; } } -- cgit v1.2.2 From 5f3e97c9ee6290befb5a2e78baf95ff951a8a34a Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Tue, 21 Dec 2010 23:49:34 +0000 Subject: nfs: fix mispelling of idmap CONFIG symbol Trivial, but confusing when you're trying to grep through this code.... Signed-off-by: J. Bruce Fields Signed-off-by: Trond Myklebust --- fs/nfs/idmap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c index 4e2d9b6b1380..18696882f1c6 100644 --- a/fs/nfs/idmap.c +++ b/fs/nfs/idmap.c @@ -238,7 +238,7 @@ int nfs_map_gid_to_group(struct nfs_client *clp, __u32 gid, char *buf, size_t bu return nfs_idmap_lookup_name(gid, "group", buf, buflen); } -#else /* CONFIG_NFS_USE_IDMAPPER not defined */ +#else /* CONFIG_NFS_USE_NEW_IDMAPPER not defined */ #include #include -- cgit v1.2.2 From bf0c84f1614bffc59565d04f09b9ac6b1aa269a9 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 28 Dec 2010 17:02:46 +0000 Subject: NFS: use ERR_CAST() Use ERR_CAST() intead of wierd-looking cast. Signed-off-by: Namhyung Kim Signed-off-by: Trond Myklebust --- fs/nfs/dir.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 3e2123fe79f5..831d61c4449b 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1218,7 +1218,7 @@ static struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, stru goto out_unblock_sillyrename; } inode = nfs_fhget(dentry->d_sb, fhandle, fattr); - res = (struct dentry *)inode; + res = ERR_CAST(inode); if (IS_ERR(res)) goto out_unblock_sillyrename; -- cgit v1.2.2 From a8a5da996df7d2d91b5aef2752da9adcefea4bc6 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Thu, 9 Dec 2010 11:35:14 +0000 Subject: nfs: Set MS_POSIXACL always We want to skip VFS applying mode for NFS. So set MS_POSIXACL always and selectively use umask. Ideally we would want to use umask only when we don't have inheritable ACEs set. But NFS currently don't allow to send umask to the server. So this is best what we can do and this is consistent with NFSv3 Signed-off-by: Aneesh Kumar K.V Signed-off-by: Trond Myklebust --- fs/nfs/dir.c | 3 +-- fs/nfs/nfs4proc.c | 5 +++++ fs/nfs/super.c | 10 ++++++++++ 3 files changed, 16 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 831d61c4449b..6ceedc7b98da 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1352,8 +1352,7 @@ static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry if (nd->flags & LOOKUP_CREATE) { attr.ia_mode = nd->intent.open.create_mode; attr.ia_valid = ATTR_MODE; - if (!IS_POSIXACL(dir)) - attr.ia_mode &= ~current_umask(); + attr.ia_mode &= ~current_umask(); } else { open_flags &= ~(O_EXCL | O_CREAT); attr.ia_valid = 0; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 78b08993a38b..ca88f294f0af 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -2486,6 +2486,7 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, path = &ctx->path; fmode = ctx->mode; } + sattr->ia_mode &= ~current_umask(); state = nfs4_do_open(dir, path, fmode, flags, sattr, cred); d_drop(dentry); if (IS_ERR(state)) { @@ -2816,6 +2817,8 @@ static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, { struct nfs4_exception exception = { }; int err; + + sattr->ia_mode &= ~current_umask(); do { err = nfs4_handle_exception(NFS_SERVER(dir), _nfs4_proc_mkdir(dir, dentry, sattr), @@ -2916,6 +2919,8 @@ static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, { struct nfs4_exception exception = { }; int err; + + sattr->ia_mode &= ~current_umask(); do { err = nfs4_handle_exception(NFS_SERVER(dir), _nfs4_proc_mknod(dir, dentry, sattr, rdev), diff --git a/fs/nfs/super.c b/fs/nfs/super.c index dd56eec16eac..001f9cb2804b 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -2496,6 +2496,11 @@ static void nfs4_clone_super(struct super_block *sb, sb->s_maxbytes = old_sb->s_maxbytes; sb->s_time_gran = 1; sb->s_op = old_sb->s_op; + /* + * The VFS shouldn't apply the umask to mode bits. We will do + * so ourselves when necessary. + */ + sb->s_flags |= MS_POSIXACL; nfs_initialise_sb(sb); } @@ -2506,6 +2511,11 @@ static void nfs4_fill_super(struct super_block *sb) { sb->s_time_gran = 1; sb->s_op = &nfs4_sops; + /* + * The VFS shouldn't apply the umask to mode bits. We will do + * so ourselves when necessary. + */ + sb->s_flags |= MS_POSIXACL; nfs_initialise_sb(sb); } -- cgit v1.2.2 From 64c2ce8b72eceec4030b04bca32d098b3d1431bb Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Thu, 9 Dec 2010 11:35:25 +0000 Subject: nfsv4: Switch to generic xattr handling code This patch make nfsv4 use the generic xattr handling code to get the nfsv4 acl. This will help us to add richacl support to nfsv4 in later patches Signed-off-by: Aneesh Kumar K.V Signed-off-by: Trond Myklebust --- fs/nfs/dir.c | 8 ++++--- fs/nfs/nfs4_fs.h | 7 +------ fs/nfs/nfs4proc.c | 62 +++++++++++++++++++++++++++++++------------------------ fs/nfs/super.c | 4 +++- 4 files changed, 44 insertions(+), 37 deletions(-) (limited to 'fs') diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 6ceedc7b98da..65d5cb4f70b1 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -35,6 +35,7 @@ #include #include #include +#include #include "delegation.h" #include "iostat.h" @@ -125,9 +126,10 @@ const struct inode_operations nfs4_dir_inode_operations = { .permission = nfs_permission, .getattr = nfs_getattr, .setattr = nfs_setattr, - .getxattr = nfs4_getxattr, - .setxattr = nfs4_setxattr, - .listxattr = nfs4_listxattr, + .getxattr = generic_getxattr, + .setxattr = generic_setxattr, + .listxattr = generic_listxattr, + .removexattr = generic_removexattr, }; #endif /* CONFIG_NFS_V4 */ diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 7a6eecffcaeb..3b3829c3098f 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -227,12 +227,6 @@ struct nfs4_state_maintenance_ops { extern const struct dentry_operations nfs4_dentry_operations; extern const struct inode_operations nfs4_dir_inode_operations; -/* inode.c */ -extern ssize_t nfs4_getxattr(struct dentry *, const char *, void *, size_t); -extern int nfs4_setxattr(struct dentry *, const char *, const void *, size_t, int); -extern ssize_t nfs4_listxattr(struct dentry *, char *, size_t); - - /* nfs4proc.c */ extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, struct rpc_cred *, struct nfs4_setclientid_res *); extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct nfs4_setclientid_res *arg, struct rpc_cred *); @@ -246,6 +240,7 @@ extern int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fh extern int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name, struct nfs4_fs_locations *fs_locations, struct page *page); extern void nfs4_release_lockowner(const struct nfs4_lock_state *); +extern const struct xattr_handler *nfs4_xattr_handlers[]; #if defined(CONFIG_NFS_V4_1) static inline struct nfs4_session *nfs4_get_session(const struct nfs_server *server) diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index ca88f294f0af..82f3a82b7115 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -49,6 +49,7 @@ #include #include #include +#include #include "nfs4_fs.h" #include "delegation.h" @@ -4403,42 +4404,36 @@ void nfs4_release_lockowner(const struct nfs4_lock_state *lsp) #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl" -int nfs4_setxattr(struct dentry *dentry, const char *key, const void *buf, - size_t buflen, int flags) +static int nfs4_xattr_set_nfs4_acl(struct dentry *dentry, const char *key, + const void *buf, size_t buflen, + int flags, int type) { - struct inode *inode = dentry->d_inode; - - if (strcmp(key, XATTR_NAME_NFSV4_ACL) != 0) - return -EOPNOTSUPP; + if (strcmp(key, "") != 0) + return -EINVAL; - return nfs4_proc_set_acl(inode, buf, buflen); + return nfs4_proc_set_acl(dentry->d_inode, buf, buflen); } -/* The getxattr man page suggests returning -ENODATA for unknown attributes, - * and that's what we'll do for e.g. user attributes that haven't been set. - * But we'll follow ext2/ext3's lead by returning -EOPNOTSUPP for unsupported - * attributes in kernel-managed attribute namespaces. */ -ssize_t nfs4_getxattr(struct dentry *dentry, const char *key, void *buf, - size_t buflen) +static int nfs4_xattr_get_nfs4_acl(struct dentry *dentry, const char *key, + void *buf, size_t buflen, int type) { - struct inode *inode = dentry->d_inode; - - if (strcmp(key, XATTR_NAME_NFSV4_ACL) != 0) - return -EOPNOTSUPP; + if (strcmp(key, "") != 0) + return -EINVAL; - return nfs4_proc_get_acl(inode, buf, buflen); + return nfs4_proc_get_acl(dentry->d_inode, buf, buflen); } -ssize_t nfs4_listxattr(struct dentry *dentry, char *buf, size_t buflen) +static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry, char *list, + size_t list_len, const char *name, + size_t name_len, int type) { - size_t len = strlen(XATTR_NAME_NFSV4_ACL) + 1; + size_t len = sizeof(XATTR_NAME_NFSV4_ACL); if (!nfs4_server_supports_acls(NFS_SERVER(dentry->d_inode))) return 0; - if (buf && buflen < len) - return -ERANGE; - if (buf) - memcpy(buf, XATTR_NAME_NFSV4_ACL, len); + + if (list && len <= list_len) + memcpy(list, XATTR_NAME_NFSV4_ACL, len); return len; } @@ -5509,9 +5504,10 @@ static const struct inode_operations nfs4_file_inode_operations = { .permission = nfs_permission, .getattr = nfs_getattr, .setattr = nfs_setattr, - .getxattr = nfs4_getxattr, - .setxattr = nfs4_setxattr, - .listxattr = nfs4_listxattr, + .getxattr = generic_getxattr, + .setxattr = generic_setxattr, + .listxattr = generic_listxattr, + .removexattr = generic_removexattr, }; const struct nfs_rpc_ops nfs_v4_clientops = { @@ -5556,6 +5552,18 @@ const struct nfs_rpc_ops nfs_v4_clientops = { .open_context = nfs4_atomic_open, }; +static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = { + .prefix = XATTR_NAME_NFSV4_ACL, + .list = nfs4_xattr_list_nfs4_acl, + .get = nfs4_xattr_get_nfs4_acl, + .set = nfs4_xattr_set_nfs4_acl, +}; + +const struct xattr_handler *nfs4_xattr_handlers[] = { + &nfs4_xattr_nfs4_acl_handler, + NULL +}; + /* * Local variables: * c-basic-offset: 8 diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 001f9cb2804b..0f9ea73e7789 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -2501,7 +2501,8 @@ static void nfs4_clone_super(struct super_block *sb, * so ourselves when necessary. */ sb->s_flags |= MS_POSIXACL; - nfs_initialise_sb(sb); + sb->s_xattr = old_sb->s_xattr; + nfs_initialise_sb(sb); } /* @@ -2516,6 +2517,7 @@ static void nfs4_fill_super(struct super_block *sb) * so ourselves when necessary. */ sb->s_flags |= MS_POSIXACL; + sb->s_xattr = nfs4_xattr_handlers; nfs_initialise_sb(sb); } -- cgit v1.2.2 From 65e5341b9a0c39767ae1fecc727d70eda0dd6d83 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Fri, 24 Dec 2010 06:41:52 -0500 Subject: Btrfs: fix off by one while setting block groups readonly When we read in block groups, we'll set non-redundant groups readonly if we find a raid1, DUP or raid10 group. But the ro code has an off by one bug in the math around testing to make sure out accounting doesn't go wrong. Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 7e5162e5c411..b180efdc8b68 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -7971,13 +7971,14 @@ static int set_block_group_ro(struct btrfs_block_group_cache *cache) if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned + sinfo->bytes_may_use + sinfo->bytes_readonly + - cache->reserved_pinned + num_bytes < sinfo->total_bytes) { + cache->reserved_pinned + num_bytes <= sinfo->total_bytes) { sinfo->bytes_readonly += num_bytes; sinfo->bytes_reserved += cache->reserved_pinned; cache->reserved_pinned = 0; cache->ro = 1; ret = 0; } + spin_unlock(&cache->lock); spin_unlock(&sinfo->lock); return ret; -- cgit v1.2.2 From 65e4c8945575abca4e368e05ca3e9f77df030290 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Thu, 16 Dec 2010 15:25:54 +0200 Subject: nfsd: declare several functions of nfs4callback as static setup_callback_client(), nfsd4_release_cb() and nfsd4_process_cb_update() do not have users outside the translation unit. Let's declare it as static. Signed-off-by: Kirill A. Shutemov Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4callback.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index 143da2eecd7b..a08580553fda 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -473,7 +473,8 @@ static int max_cb_time(void) /* Reference counting, callback cleanup, etc., all look racy as heck. * And why is cl_cb_set an atomic? */ -int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn) +static int setup_callback_client(struct nfs4_client *clp, + struct nfs4_cb_conn *conn) { struct rpc_timeout timeparms = { .to_initval = max_cb_time(), @@ -748,13 +749,13 @@ void nfsd4_shutdown_callback(struct nfs4_client *clp) flush_workqueue(callback_wq); } -void nfsd4_release_cb(struct nfsd4_callback *cb) +static void nfsd4_release_cb(struct nfsd4_callback *cb) { if (cb->cb_ops->rpc_release) cb->cb_ops->rpc_release(cb); } -void nfsd4_process_cb_update(struct nfsd4_callback *cb) +static void nfsd4_process_cb_update(struct nfsd4_callback *cb) { struct nfs4_cb_conn conn; struct nfs4_client *clp = cb->cb_clp; -- cgit v1.2.2 From 3beb6cd1d448e7ded938bbd676493e6a08e9a6cd Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Sat, 1 Jan 2011 15:43:50 -0500 Subject: nfsd: don't drop requests on -ENOMEM We never want to drop a request if we could return a JUKEBOX/DELAY error instead; so, convert to nfserr_jukebox and let nfsd_dispatch() convert that to a dropit error as a last resort if JUKEBOX/DELAY is unavailable (as in the NFSv2 case). Signed-off-by: J. Bruce Fields --- fs/nfsd/nfsproc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c index 08e17264784b..dc9c2e3fd1b8 100644 --- a/fs/nfsd/nfsproc.c +++ b/fs/nfsd/nfsproc.c @@ -736,7 +736,7 @@ nfserrno (int errno) { nfserr_jukebox, -ETIMEDOUT }, { nfserr_jukebox, -ERESTARTSYS }, { nfserr_dropit, -EAGAIN }, - { nfserr_dropit, -ENOMEM }, + { nfserr_jukebox, -ENOMEM }, { nfserr_badname, -ESRCH }, { nfserr_io, -ETXTBSY }, { nfserr_notsupp, -EOPNOTSUPP }, -- cgit v1.2.2 From 9e701c610923aaeac8b38b9202a686d1cc9ee35d Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Sun, 2 Jan 2011 21:56:36 -0500 Subject: svcrpc: simpler request dropping Currently we use -EAGAIN returns to determine when to drop a deferred request. On its own, that is error-prone, as it makes us treat -EAGAIN returns from other functions specially to prevent inadvertent dropping. So, use a flag on the request instead. Returning an error on request deferral is still required, to prevent further processing, but we no longer need worry that an error return on its own could result in a drop. Signed-off-by: J. Bruce Fields --- fs/nfsd/nfssvc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c index 2bae1d86f5f2..18743c4d8bca 100644 --- a/fs/nfsd/nfssvc.c +++ b/fs/nfsd/nfssvc.c @@ -608,7 +608,7 @@ nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp) /* Now call the procedure handler, and encode NFS status. */ nfserr = proc->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp); nfserr = map_new_errors(rqstp->rq_vers, nfserr); - if (nfserr == nfserr_dropit) { + if (nfserr == nfserr_dropit || rqstp->rq_dropme) { dprintk("nfsd: Dropping request; may be revisited later\n"); nfsd_cache_update(rqstp, RC_NOCACHE, NULL); return 0; -- cgit v1.2.2 From 062304a815fe10068c478a4a3f28cf091c55cb82 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Sun, 2 Jan 2011 22:05:33 -0500 Subject: nfsd: stop translating EAGAIN to nfserr_dropit We no longer need this. Also, EWOULDBLOCK is generally a synonym for EAGAIN, but that may not be true on all architectures, so map it as well. Signed-off-by: J. Bruce Fields --- fs/nfsd/nfsproc.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c index dc9c2e3fd1b8..fd608a27a8d5 100644 --- a/fs/nfsd/nfsproc.c +++ b/fs/nfsd/nfsproc.c @@ -735,7 +735,8 @@ nfserrno (int errno) { nfserr_stale, -ESTALE }, { nfserr_jukebox, -ETIMEDOUT }, { nfserr_jukebox, -ERESTARTSYS }, - { nfserr_dropit, -EAGAIN }, + { nfserr_jukebox, -EAGAIN }, + { nfserr_jukebox, -EWOULDBLOCK }, { nfserr_jukebox, -ENOMEM }, { nfserr_badname, -ESRCH }, { nfserr_io, -ETXTBSY }, -- cgit v1.2.2 From da165dd60e136d0609e0a2c0c2a9b9a5372200d6 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Sun, 2 Jan 2011 22:13:18 -0500 Subject: nfsd: remove some unnecessary dropit handling We no longer need a few of these special cases. Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4proc.c | 4 ---- fs/nfsd/nfs4state.c | 2 -- fs/nfsd/nfs4xdr.c | 2 -- fs/nfsd/vfs.c | 4 ---- 4 files changed, 12 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index f80c3997d24c..fd6694b49e1c 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -1156,10 +1156,6 @@ encode_op: nfsd4_increment_op_stats(op->opnum); } - if (!rqstp->rq_usedeferral && status == nfserr_dropit) { - dprintk("%s Dropit - send NFS4ERR_DELAY\n", __func__); - status = nfserr_jukebox; - } resp->cstate.status = status; fh_put(&resp->cstate.current_fh); diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 73adcfb2dc17..b82e36862044 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -2509,8 +2509,6 @@ static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file if (!fp->fi_fds[oflag]) { status = nfsd_open(rqstp, cur_fh, S_IFREG, access, &fp->fi_fds[oflag]); - if (status == nfserr_dropit) - status = nfserr_jukebox; if (status) return status; } diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 437b4623cb02..364aae7d5998 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -2328,8 +2328,6 @@ nfsd4_encode_dirent(void *ccdv, const char *name, int namlen, case nfserr_resource: nfserr = nfserr_toosmall; goto fail; - case nfserr_dropit: - goto fail; case nfserr_noent: goto skip_entry; default: diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 184938fcff04..6a3af2ff3afe 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -380,8 +380,6 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap, * we need to break all leases. */ host_err = break_lease(inode, O_WRONLY | O_NONBLOCK); - if (host_err == -EWOULDBLOCK) - host_err = -ETIMEDOUT; if (host_err) /* ENOMEM or EWOULDBLOCK */ goto out_nfserr; @@ -752,8 +750,6 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, */ if (!(access & NFSD_MAY_NOT_BREAK_LEASE)) host_err = break_lease(inode, O_NONBLOCK | ((access & NFSD_MAY_WRITE) ? O_WRONLY : 0)); - if (host_err == -EWOULDBLOCK) - host_err = -ETIMEDOUT; if (host_err) /* NOMEM or WOULDBLOCK */ goto out_nfserr; -- cgit v1.2.2 From e63eb9375089f9d2041305d04c3f33a194e0e014 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Sat, 30 Oct 2010 17:41:26 -0400 Subject: nfsd4: eliminate lease delete callback nfsd controls the lifetime of the lease, not the lock code, so there's no need for this callback on lease destruction. Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4state.c | 18 ------------------ 1 file changed, 18 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index b82e36862044..2e44ad2539ab 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -2295,23 +2295,6 @@ void nfsd_break_deleg_cb(struct file_lock *fl) nfsd4_cb_recall(dp); } -/* - * The file_lock is being reapd. - * - * Called by locks_free_lock() with lock_flocks() held. - */ -static -void nfsd_release_deleg_cb(struct file_lock *fl) -{ - struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner; - - dprintk("NFSD nfsd_release_deleg_cb: fl %p dp %p dl_count %d\n", fl,dp, atomic_read(&dp->dl_count)); - - if (!(fl->fl_flags & FL_LEASE) || !dp) - return; - dp->dl_flock = NULL; -} - /* * Called from setlease() with lock_flocks() held */ @@ -2341,7 +2324,6 @@ int nfsd_change_deleg_cb(struct file_lock **onlist, int arg) static const struct lock_manager_operations nfsd_lease_mng_ops = { .fl_break = nfsd_break_deleg_cb, - .fl_release_private = nfsd_release_deleg_cb, .fl_mylease = nfsd_same_client_deleg_cb, .fl_change = nfsd_change_deleg_cb, }; -- cgit v1.2.2 From c84d500bc41658165ceb0dd04dc6a75249940fba Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Sat, 30 Oct 2010 23:35:04 -0400 Subject: nfsd4: use a single struct file for delegations When we converted to sharing struct filess between nfs4 opens I went too far and also used the same mechanism for delegations. But keeping a reference to the struct file ensures it will outlast the lease, and allows us to remove the lease with the same file as we added it. Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4state.c | 10 +++++----- fs/nfsd/state.h | 1 + 2 files changed, 6 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 2e44ad2539ab..cbe1b81c147d 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -230,7 +230,8 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_stateid *stp, struct svc_f dp->dl_client = clp; get_nfs4_file(fp); dp->dl_file = fp; - nfs4_file_get_access(fp, O_RDONLY); + dp->dl_vfs_file = find_readable_file(fp); + get_file(dp->dl_vfs_file); dp->dl_flock = NULL; dp->dl_type = type; dp->dl_stateid.si_boot = boot_time; @@ -252,6 +253,7 @@ nfs4_put_delegation(struct nfs4_delegation *dp) if (atomic_dec_and_test(&dp->dl_count)) { dprintk("NFSD: freeing dp %p\n",dp); put_nfs4_file(dp->dl_file); + fput(dp->dl_vfs_file); kmem_cache_free(deleg_slab, dp); num_delegations--; } @@ -265,12 +267,10 @@ nfs4_put_delegation(struct nfs4_delegation *dp) static void nfs4_close_delegation(struct nfs4_delegation *dp) { - struct file *filp = find_readable_file(dp->dl_file); - dprintk("NFSD: close_delegation dp %p\n",dp); + /* XXX: do we even need this check?: */ if (dp->dl_flock) - vfs_setlease(filp, F_UNLCK, &dp->dl_flock); - nfs4_file_put_access(dp->dl_file, O_RDONLY); + vfs_setlease(dp->dl_vfs_file, F_UNLCK, &dp->dl_flock); } /* Called under the state lock. */ diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h index 39adc27b0685..84b230217b1b 100644 --- a/fs/nfsd/state.h +++ b/fs/nfsd/state.h @@ -81,6 +81,7 @@ struct nfs4_delegation { atomic_t dl_count; /* ref count */ struct nfs4_client *dl_client; struct nfs4_file *dl_file; + struct file *dl_vfs_file; struct file_lock *dl_flock; u32 dl_type; time_t dl_time; -- cgit v1.2.2 From c45821d263a8a5109d69a9e8942b8d65bcd5f31a Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Sun, 31 Oct 2010 00:04:44 -0400 Subject: locks: eliminate fl_mylease callback The nfs server only supports read delegations for now, so we don't care how conflicts are determined. All we care is that unlocks are recognized as matching the leases they are meant to remove. After the last patch, a comparison of struct files will work for that purpose. So we no longer need this callback. Signed-off-by: J. Bruce Fields --- fs/locks.c | 8 +------- fs/nfsd/nfs4state.c | 21 +-------------------- 2 files changed, 2 insertions(+), 27 deletions(-) (limited to 'fs') diff --git a/fs/locks.c b/fs/locks.c index 8729347bcd1a..5cb65062281a 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -444,15 +444,9 @@ static void lease_release_private_callback(struct file_lock *fl) fl->fl_file->f_owner.signum = 0; } -static int lease_mylease_callback(struct file_lock *fl, struct file_lock *try) -{ - return fl->fl_file == try->fl_file; -} - static const struct lock_manager_operations lease_manager_ops = { .fl_break = lease_break_callback, .fl_release_private = lease_release_private_callback, - .fl_mylease = lease_mylease_callback, .fl_change = lease_modify, }; @@ -1405,7 +1399,7 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp) for (before = &inode->i_flock; ((fl = *before) != NULL) && IS_LEASE(fl); before = &fl->fl_next) { - if (lease->fl_lmops->fl_mylease(fl, lease)) + if (fl->fl_file == lease->fl_file) my_before = before; else if (fl->fl_type == (F_INPROGRESS | F_UNLCK)) /* diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index cbe1b81c147d..87d4c48b6069 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -2295,24 +2295,6 @@ void nfsd_break_deleg_cb(struct file_lock *fl) nfsd4_cb_recall(dp); } -/* - * Called from setlease() with lock_flocks() held - */ -static -int nfsd_same_client_deleg_cb(struct file_lock *onlist, struct file_lock *try) -{ - struct nfs4_delegation *onlistd = - (struct nfs4_delegation *)onlist->fl_owner; - struct nfs4_delegation *tryd = - (struct nfs4_delegation *)try->fl_owner; - - if (onlist->fl_lmops != try->fl_lmops) - return 0; - - return onlistd->dl_client == tryd->dl_client; -} - - static int nfsd_change_deleg_cb(struct file_lock **onlist, int arg) { @@ -2324,7 +2306,6 @@ int nfsd_change_deleg_cb(struct file_lock **onlist, int arg) static const struct lock_manager_operations nfsd_lease_mng_ops = { .fl_break = nfsd_break_deleg_cb, - .fl_mylease = nfsd_same_client_deleg_cb, .fl_change = nfsd_change_deleg_cb, }; @@ -2630,7 +2611,7 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta dp->dl_flock = fl; /* vfs_setlease checks to see if delegation should be handed out. - * the lock_manager callbacks fl_mylease and fl_change are used + * the lock_manager callback fl_change is used */ if ((status = vfs_setlease(fl->fl_file, fl->fl_type, &fl))) { dprintk("NFSD: setlease failed [%d], no delegation\n", status); -- cgit v1.2.2 From 255c7cf810e4776ae8f1023332060459f30d8a2a Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Sun, 31 Oct 2010 12:35:48 -0400 Subject: locks: minor setlease cleanup Signed-off-by: J. Bruce Fields --- fs/locks.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/locks.c b/fs/locks.c index 5cb65062281a..feaac634d0da 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -1399,7 +1399,7 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp) for (before = &inode->i_flock; ((fl = *before) != NULL) && IS_LEASE(fl); before = &fl->fl_next) { - if (fl->fl_file == lease->fl_file) + if (fl->fl_file == filp) my_before = before; else if (fl->fl_type == (F_INPROGRESS | F_UNLCK)) /* -- cgit v1.2.2 From f6af99ec1b261e21219d5eba99e3af48fc6c32d4 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Tue, 4 Jan 2011 18:02:15 -0500 Subject: nfsd4: name->id mapping should fail with BADOWNER not BADNAME According to rfc 3530 BADNAME is for strings that represent paths; BADOWNER is for user/group names that don't map. And the too-long name should probably be BADOWNER as well; it's effectively the same as if we couldn't map it. Cc: stable@kernel.org Reported-by: Trond Myklebust Reported-by: Simon Kirby Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4idmap.c | 4 ++-- fs/nfsd/nfsd.h | 1 + fs/nfsd/nfsproc.c | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c index f0695e815f0e..844960fd0395 100644 --- a/fs/nfsd/nfs4idmap.c +++ b/fs/nfsd/nfs4idmap.c @@ -524,13 +524,13 @@ idmap_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen int ret; if (namelen + 1 > sizeof(key.name)) - return -EINVAL; + return -ESRCH; /* nfserr_badowner */ memcpy(key.name, name, namelen); key.name[namelen] = '\0'; strlcpy(key.authname, rqst_authname(rqstp), sizeof(key.authname)); ret = idmap_lookup(rqstp, nametoid_lookup, &key, &nametoid_cache, &item); if (ret == -ENOENT) - ret = -ESRCH; /* nfserr_badname */ + ret = -ESRCH; /* nfserr_badowner */ if (ret) return ret; *id = item->id; diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h index 6b641cf2c19a..7ecfa2420307 100644 --- a/fs/nfsd/nfsd.h +++ b/fs/nfsd/nfsd.h @@ -158,6 +158,7 @@ void nfsd_lockd_shutdown(void); #define nfserr_attrnotsupp cpu_to_be32(NFSERR_ATTRNOTSUPP) #define nfserr_bad_xdr cpu_to_be32(NFSERR_BAD_XDR) #define nfserr_openmode cpu_to_be32(NFSERR_OPENMODE) +#define nfserr_badowner cpu_to_be32(NFSERR_BADOWNER) #define nfserr_locks_held cpu_to_be32(NFSERR_LOCKS_HELD) #define nfserr_op_illegal cpu_to_be32(NFSERR_OP_ILLEGAL) #define nfserr_grace cpu_to_be32(NFSERR_GRACE) diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c index fd608a27a8d5..8f05dcd0bf85 100644 --- a/fs/nfsd/nfsproc.c +++ b/fs/nfsd/nfsproc.c @@ -738,7 +738,7 @@ nfserrno (int errno) { nfserr_jukebox, -EAGAIN }, { nfserr_jukebox, -EWOULDBLOCK }, { nfserr_jukebox, -ENOMEM }, - { nfserr_badname, -ESRCH }, + { nfserr_badowner, -ESRCH }, { nfserr_io, -ETXTBSY }, { nfserr_notsupp, -EOPNOTSUPP }, { nfserr_toosmall, -ETOOSMALL }, -- cgit v1.2.2 From 2ca72e17e5acb1052c35c9faba609c2289ce7a92 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Tue, 4 Jan 2011 17:37:15 -0500 Subject: nfsd4: move idmap and acl header files into fs/nfsd These are internal nfsd interfaces. Signed-off-by: J. Bruce Fields --- fs/nfsd/acl.h | 61 ++++++++++++++++++++++++++++++++++++++++++++++++++ fs/nfsd/idmap.h | 64 +++++++++++++++++++++++++++++++++++++++++++++++++++++ fs/nfsd/nfs4acl.c | 2 +- fs/nfsd/nfs4idmap.c | 2 +- fs/nfsd/nfs4xdr.c | 5 +++-- fs/nfsd/nfsctl.c | 2 +- fs/nfsd/vfs.c | 4 ++-- 7 files changed, 133 insertions(+), 7 deletions(-) create mode 100644 fs/nfsd/acl.h create mode 100644 fs/nfsd/idmap.h (limited to 'fs') diff --git a/fs/nfsd/acl.h b/fs/nfsd/acl.h new file mode 100644 index 000000000000..c9c05a78e9bb --- /dev/null +++ b/fs/nfsd/acl.h @@ -0,0 +1,61 @@ +/* + * include/linux/nfs4_acl.c + * + * Common NFSv4 ACL handling definitions. + * + * Copyright (c) 2002 The Regents of the University of Michigan. + * All rights reserved. + * + * Marius Aamodt Eriksen + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef LINUX_NFS4_ACL_H +#define LINUX_NFS4_ACL_H + +#include + +/* Maximum ACL we'll accept from client; chosen (somewhat arbitrarily) to + * fit in a page: */ +#define NFS4_ACL_MAX 170 + +struct nfs4_acl *nfs4_acl_new(int); +int nfs4_acl_get_whotype(char *, u32); +int nfs4_acl_write_who(int who, char *p); +int nfs4_acl_permission(struct nfs4_acl *acl, uid_t owner, gid_t group, + uid_t who, u32 mask); + +#define NFS4_ACL_TYPE_DEFAULT 0x01 +#define NFS4_ACL_DIR 0x02 +#define NFS4_ACL_OWNER 0x04 + +struct nfs4_acl *nfs4_acl_posix_to_nfsv4(struct posix_acl *, + struct posix_acl *, unsigned int flags); +int nfs4_acl_nfsv4_to_posix(struct nfs4_acl *, struct posix_acl **, + struct posix_acl **, unsigned int flags); + +#endif /* LINUX_NFS4_ACL_H */ diff --git a/fs/nfsd/idmap.h b/fs/nfsd/idmap.h new file mode 100644 index 000000000000..d4a2ac18bd4c --- /dev/null +++ b/fs/nfsd/idmap.h @@ -0,0 +1,64 @@ +/* + * include/linux/nfsd_idmap.h + * + * Mapping of UID to name and vice versa. + * + * Copyright (c) 2002, 2003 The Regents of the University of + * Michigan. All rights reserved. +> * + * Marius Aamodt Eriksen + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef LINUX_NFSD_IDMAP_H +#define LINUX_NFSD_IDMAP_H + +#include +#include + +/* XXX from linux/nfs_idmap.h */ +#define IDMAP_NAMESZ 128 + +#ifdef CONFIG_NFSD_V4 +int nfsd_idmap_init(void); +void nfsd_idmap_shutdown(void); +#else +static inline int nfsd_idmap_init(void) +{ + return 0; +} +static inline void nfsd_idmap_shutdown(void) +{ +} +#endif + +int nfsd_map_name_to_uid(struct svc_rqst *, const char *, size_t, __u32 *); +int nfsd_map_name_to_gid(struct svc_rqst *, const char *, size_t, __u32 *); +int nfsd_map_uid_to_name(struct svc_rqst *, __u32, char *); +int nfsd_map_gid_to_name(struct svc_rqst *, __u32, char *); + +#endif /* LINUX_NFSD_IDMAP_H */ diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c index e48052615159..ad88f1c0a4c3 100644 --- a/fs/nfsd/nfs4acl.c +++ b/fs/nfsd/nfs4acl.c @@ -36,7 +36,7 @@ #include #include -#include +#include "acl.h" /* mode bit translations: */ diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c index 844960fd0395..cbd599732765 100644 --- a/fs/nfsd/nfs4idmap.c +++ b/fs/nfsd/nfs4idmap.c @@ -33,10 +33,10 @@ */ #include -#include #include #include #include +#include "idmap.h" /* * Cache entry diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 364aae7d5998..2a0814d0ab1a 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -44,13 +44,14 @@ #include #include #include -#include -#include #include +#include "idmap.h" +#include "acl.h" #include "xdr4.h" #include "vfs.h" + #define NFSDDBG_FACILITY NFSDDBG_XDR /* diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index 6840ec3ceecf..33b3e2b06779 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c @@ -8,12 +8,12 @@ #include #include -#include #include #include #include #include +#include "idmap.h" #include "nfsd.h" #include "cache.h" diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 6a3af2ff3afe..b991125ce4a5 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -35,8 +35,8 @@ #endif /* CONFIG_NFSD_V3 */ #ifdef CONFIG_NFSD_V4 -#include -#include +#include "acl.h" +#include "idmap.h" #endif /* CONFIG_NFSD_V4 */ #include "nfsd.h" -- cgit v1.2.2 From 775a1905e1e042e830eae31e70efec9387eb3e1d Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Tue, 4 Jan 2011 17:38:41 -0500 Subject: nfsd4: remove outdated pathname-comments Signed-off-by: J. Bruce Fields --- fs/nfsd/acl.h | 2 -- fs/nfsd/idmap.h | 2 -- 2 files changed, 4 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/acl.h b/fs/nfsd/acl.h index c9c05a78e9bb..34e5c40af5ef 100644 --- a/fs/nfsd/acl.h +++ b/fs/nfsd/acl.h @@ -1,6 +1,4 @@ /* - * include/linux/nfs4_acl.c - * * Common NFSv4 ACL handling definitions. * * Copyright (c) 2002 The Regents of the University of Michigan. diff --git a/fs/nfsd/idmap.h b/fs/nfsd/idmap.h index d4a2ac18bd4c..514758994763 100644 --- a/fs/nfsd/idmap.h +++ b/fs/nfsd/idmap.h @@ -1,6 +1,4 @@ /* - * include/linux/nfsd_idmap.h - * * Mapping of UID to name and vice versa. * * Copyright (c) 2002, 2003 The Regents of the University of -- cgit v1.2.2 From 3c726023402a2f3b28f49b9d90ebf9e71151157d Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Tue, 4 Jan 2011 17:53:52 -0500 Subject: nfsd4: return nfs errno from name_to_id functions This avoids the need for the confusing ESRCH mapping. Signed-off-by: J. Bruce Fields --- fs/nfsd/idmap.h | 4 ++-- fs/nfsd/nfs4idmap.c | 13 +++++++------ fs/nfsd/nfs4xdr.c | 10 +++++----- fs/nfsd/nfsproc.c | 1 - 4 files changed, 14 insertions(+), 14 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/idmap.h b/fs/nfsd/idmap.h index 514758994763..2f3be1321534 100644 --- a/fs/nfsd/idmap.h +++ b/fs/nfsd/idmap.h @@ -54,8 +54,8 @@ static inline void nfsd_idmap_shutdown(void) } #endif -int nfsd_map_name_to_uid(struct svc_rqst *, const char *, size_t, __u32 *); -int nfsd_map_name_to_gid(struct svc_rqst *, const char *, size_t, __u32 *); +__be32 nfsd_map_name_to_uid(struct svc_rqst *, const char *, size_t, __u32 *); +__be32 nfsd_map_name_to_gid(struct svc_rqst *, const char *, size_t, __u32 *); int nfsd_map_uid_to_name(struct svc_rqst *, __u32, char *); int nfsd_map_gid_to_name(struct svc_rqst *, __u32, char *); diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c index cbd599732765..6d2c397d458b 100644 --- a/fs/nfsd/nfs4idmap.c +++ b/fs/nfsd/nfs4idmap.c @@ -37,6 +37,7 @@ #include #include #include "idmap.h" +#include "nfsd.h" /* * Cache entry @@ -514,7 +515,7 @@ rqst_authname(struct svc_rqst *rqstp) return clp->name; } -static int +static __be32 idmap_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen, uid_t *id) { @@ -524,15 +525,15 @@ idmap_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen int ret; if (namelen + 1 > sizeof(key.name)) - return -ESRCH; /* nfserr_badowner */ + return nfserr_badowner; memcpy(key.name, name, namelen); key.name[namelen] = '\0'; strlcpy(key.authname, rqst_authname(rqstp), sizeof(key.authname)); ret = idmap_lookup(rqstp, nametoid_lookup, &key, &nametoid_cache, &item); if (ret == -ENOENT) - ret = -ESRCH; /* nfserr_badowner */ + return nfserr_badowner; if (ret) - return ret; + return nfserrno(ret); *id = item->id; cache_put(&item->h, &nametoid_cache); return 0; @@ -560,14 +561,14 @@ idmap_id_to_name(struct svc_rqst *rqstp, int type, uid_t id, char *name) return ret; } -int +__be32 nfsd_map_name_to_uid(struct svc_rqst *rqstp, const char *name, size_t namelen, __u32 *id) { return idmap_name_to_id(rqstp, IDMAP_TYPE_USER, name, namelen, id); } -int +__be32 nfsd_map_name_to_gid(struct svc_rqst *rqstp, const char *name, size_t namelen, __u32 *id) { diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 2a0814d0ab1a..ca3786905dec 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -289,17 +289,17 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval, len += XDR_QUADLEN(dummy32) << 2; READMEM(buf, dummy32); ace->whotype = nfs4_acl_get_whotype(buf, dummy32); - host_err = 0; + status = nfs_ok; if (ace->whotype != NFS4_ACL_WHO_NAMED) ace->who = 0; else if (ace->flag & NFS4_ACE_IDENTIFIER_GROUP) - host_err = nfsd_map_name_to_gid(argp->rqstp, + status = nfsd_map_name_to_gid(argp->rqstp, buf, dummy32, &ace->who); else - host_err = nfsd_map_name_to_uid(argp->rqstp, + status = nfsd_map_name_to_uid(argp->rqstp, buf, dummy32, &ace->who); - if (host_err) - goto out_nfserr; + if (status) + return status; } } else *acl = NULL; diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c index 8f05dcd0bf85..e15dc45fc5ec 100644 --- a/fs/nfsd/nfsproc.c +++ b/fs/nfsd/nfsproc.c @@ -738,7 +738,6 @@ nfserrno (int errno) { nfserr_jukebox, -EAGAIN }, { nfserr_jukebox, -EWOULDBLOCK }, { nfserr_jukebox, -ENOMEM }, - { nfserr_badowner, -ESRCH }, { nfserr_io, -ETXTBSY }, { nfserr_notsupp, -EOPNOTSUPP }, { nfserr_toosmall, -ETOOSMALL }, -- cgit v1.2.2 From 6f3d772fb8a039de8f21d725f5e38c252b4c0efd Mon Sep 17 00:00:00 2001 From: Takuma Umeya Date: Wed, 15 Dec 2010 14:09:01 +0900 Subject: nfs4: set source address when callback is generated when callback is generated in NFSv4 server, it doesn't set the source address. When an alias IP is utilized on NFSv4 server and suppose the client is accessing via that alias IP (e.g. eth0:0), the client invokes the callback to the IP address that is set on the original device (e.g. eth0). This behavior results in timeout of xprt. The patch sets the IP address that the client should invoke callback to. Signed-off-by: Takuma Umeya [bfields@redhat.com: Simplify gen_callback arguments, use helper function] Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4callback.c | 1 + fs/nfsd/nfs4state.c | 22 +++++++++++++++++++--- fs/nfsd/state.h | 1 + 3 files changed, 21 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index a08580553fda..dd183af24fe6 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -484,6 +484,7 @@ static int setup_callback_client(struct nfs4_client *clp, .net = &init_net, .address = (struct sockaddr *) &conn->cb_addr, .addrsize = conn->cb_addrlen, + .saddress = (struct sockaddr *) &conn->cb_saddr, .timeout = &timeparms, .program = &cb_program, .version = 0, diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 87d4c48b6069..b583e4e800ab 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -1163,10 +1163,26 @@ find_unconfirmed_client_by_str(const char *dname, unsigned int hashval) return NULL; } +static void rpc_svcaddr2sockaddr(struct sockaddr *sa, unsigned short family, union svc_addr_u *svcaddr) +{ + switch (family) { + case AF_INET: + ((struct sockaddr_in *)sa)->sin_family = AF_INET; + ((struct sockaddr_in *)sa)->sin_addr = svcaddr->addr; + return; + case AF_INET6: + ((struct sockaddr_in6 *)sa)->sin6_family = AF_INET6; + ((struct sockaddr_in6 *)sa)->sin6_addr = svcaddr->addr6; + return; + } +} + static void -gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, u32 scopeid) +gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp) { struct nfs4_cb_conn *conn = &clp->cl_cb_conn; + struct sockaddr *sa = svc_addr(rqstp); + u32 scopeid = rpc_get_scope_id(sa); unsigned short expected_family; /* Currently, we only support tcp and tcp6 for the callback channel */ @@ -1192,6 +1208,7 @@ gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, u32 scopeid) conn->cb_prog = se->se_callback_prog; conn->cb_ident = se->se_callback_ident; + rpc_svcaddr2sockaddr((struct sockaddr *)&conn->cb_saddr, expected_family, &rqstp->rq_daddr); return; out_err: conn->cb_addr.ss_family = AF_UNSPEC; @@ -1768,7 +1785,6 @@ __be32 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_setclientid *setclid) { - struct sockaddr *sa = svc_addr(rqstp); struct xdr_netobj clname = { .len = setclid->se_namelen, .data = setclid->se_name, @@ -1871,7 +1887,7 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, * for consistent minorversion use throughout: */ new->cl_minorversion = 0; - gen_callback(new, setclid, rpc_get_scope_id(sa)); + gen_callback(new, setclid, rqstp); add_to_unconfirmed(new, strhashval); setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot; setclid->se_clientid.cl_id = new->cl_clientid.cl_id; diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h index 84b230217b1b..cf6dc83fd545 100644 --- a/fs/nfsd/state.h +++ b/fs/nfsd/state.h @@ -96,6 +96,7 @@ struct nfs4_delegation { struct nfs4_cb_conn { /* SETCLIENTID info */ struct sockaddr_storage cb_addr; + struct sockaddr_storage cb_saddr; size_t cb_addrlen; u32 cb_prog; /* used only in 4.0 case; per-session otherwise */ -- cgit v1.2.2 From 09e099d4bafea3b15be003d548bdf94b4b6e0e17 Mon Sep 17 00:00:00 2001 From: Jerome Marchand Date: Wed, 5 Jan 2011 16:57:38 +0100 Subject: block: fix accounting bug on cross partition merges /proc/diskstats would display a strange output as follows. $ cat /proc/diskstats |grep sda 8 0 sda 90524 7579 102154 20464 0 0 0 0 0 14096 20089 8 1 sda1 19085 1352 21841 4209 0 0 0 0 4294967064 15689 4293424691 ~~~~~~~~~~ 8 2 sda2 71252 3624 74891 15950 0 0 0 0 232 23995 1562390 8 3 sda3 54 487 2188 92 0 0 0 0 0 88 92 8 4 sda4 4 0 8 0 0 0 0 0 0 0 0 8 5 sda5 81 2027 2130 138 0 0 0 0 0 87 137 Its reason is the wrong way of accounting hd_struct->in_flight. When a bio is merged into a request belongs to different partition by ELEVATOR_FRONT_MERGE. The detailed root cause is as follows. Assuming that there are two partition, sda1 and sda2. 1. A request for sda2 is in request_queue. Hence sda1's hd_struct->in_flight is 0 and sda2's one is 1. | hd_struct->in_flight --------------------------- sda1 | 0 sda2 | 1 --------------------------- 2. A bio belongs to sda1 is issued and is merged into the request mentioned on step1 by ELEVATOR_BACK_MERGE. The first sector of the request is changed from sda2 region to sda1 region. However the two partition's hd_struct->in_flight are not changed. | hd_struct->in_flight --------------------------- sda1 | 0 sda2 | 1 --------------------------- 3. The request is finished and blk_account_io_done() is called. In this case, sda2's hd_struct->in_flight, not a sda1's one, is decremented. | hd_struct->in_flight --------------------------- sda1 | -1 sda2 | 1 --------------------------- The patch fixes the problem by caching the partition lookup inside the request structure, hence making sure that the increment and decrement will always happen on the same partition struct. This also speeds up IO with accounting enabled, since it cuts down on the number of lookups we have to do. Also add a refcount to struct hd_struct to keep the partition in memory as long as users exist. We use kref_test_and_get() to ensure we don't add a reference to a partition which is going away. Signed-off-by: Jerome Marchand Signed-off-by: Yasuaki Ishimatsu Cc: stable@kernel.org Signed-off-by: Jens Axboe --- fs/partitions/check.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/partitions/check.c b/fs/partitions/check.c index bdf8d3cc95a4..48209f58522b 100644 --- a/fs/partitions/check.c +++ b/fs/partitions/check.c @@ -381,6 +381,13 @@ static void delete_partition_rcu_cb(struct rcu_head *head) put_device(part_to_dev(part)); } +void __delete_partition(struct kref *ref) +{ + struct hd_struct *part = container_of(ref, struct hd_struct, ref); + + call_rcu(&part->rcu_head, delete_partition_rcu_cb); +} + void delete_partition(struct gendisk *disk, int partno) { struct disk_part_tbl *ptbl = disk->part_tbl; @@ -399,7 +406,7 @@ void delete_partition(struct gendisk *disk, int partno) kobject_put(part->holder_dir); device_del(part_to_dev(part)); - call_rcu(&part->rcu_head, delete_partition_rcu_cb); + kref_put(&part->ref, __delete_partition); } static ssize_t whole_disk_show(struct device *dev, @@ -498,6 +505,7 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno, if (!dev_get_uevent_suppress(ddev)) kobject_uevent(&pdev->kobj, KOBJ_ADD); + kref_init(&p->ref); return p; out_free_info: -- cgit v1.2.2 From 31d710a7bd42f0d89e30d53bdaad427c5f191d0d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maciej=20=C5=BBenczykowski?= Date: Sun, 26 Sep 2010 12:38:28 +0000 Subject: ext3: don't update sb journal_devnum when RO dev MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit An ext3 filesystem on a read-only device, with an external journal which is at a different device number then recorded in the superblock will fail to honor the read-only setting of the device and trigger a superblock update (write). For example: - ext3 on a software raid which is in read-only mode - external journal on a read-write device which has changed device num - attempt to mount with -o journal_dev= - hits BUG_ON(mddev->ro = 1) in md.c Cc: Theodore Ts'o Signed-off-by: Maciej Żenczykowski Signed-off-by: Jan Kara --- fs/ext3/super.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ext3/super.c b/fs/ext3/super.c index acf8695fa8f0..04169318e6ba 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c @@ -2290,7 +2290,7 @@ static int ext3_load_journal(struct super_block *sb, EXT3_SB(sb)->s_journal = journal; ext3_clear_journal_err(sb, es); - if (journal_devnum && + if (!really_read_only && journal_devnum && journal_devnum != le32_to_cpu(es->s_journal_dev)) { es->s_journal_dev = cpu_to_le32(journal_devnum); -- cgit v1.2.2 From 23a2ad6d0e58d0f2fb1647c2d6fef935bcaf9299 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Tue, 9 Nov 2010 10:16:03 -0800 Subject: fs/ext2/super.c: Use printf extension %pV Using %pV reduces the number of printk calls and eliminates any possible message interleaving from other printk calls. Signed-off-by: Joe Perches Signed-off-by: Jan Kara --- fs/ext2/super.c | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/ext2/super.c b/fs/ext2/super.c index d89e0b6a2d78..27822b98f74d 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c @@ -43,9 +43,10 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data); static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf); static int ext2_sync_fs(struct super_block *sb, int wait); -void ext2_error (struct super_block * sb, const char * function, - const char * fmt, ...) +void ext2_error(struct super_block *sb, const char *function, + const char *fmt, ...) { + struct va_format vaf; va_list args; struct ext2_sb_info *sbi = EXT2_SB(sb); struct ext2_super_block *es = sbi->s_es; @@ -59,9 +60,13 @@ void ext2_error (struct super_block * sb, const char * function, } va_start(args, fmt); - printk(KERN_CRIT "EXT2-fs (%s): error: %s: ", sb->s_id, function); - vprintk(fmt, args); - printk("\n"); + + vaf.fmt = fmt; + vaf.va = &args; + + printk(KERN_CRIT "EXT2-fs (%s): error: %s: %pV\n", + sb->s_id, function, &vaf); + va_end(args); if (test_opt(sb, ERRORS_PANIC)) @@ -76,12 +81,16 @@ void ext2_error (struct super_block * sb, const char * function, void ext2_msg(struct super_block *sb, const char *prefix, const char *fmt, ...) { + struct va_format vaf; va_list args; va_start(args, fmt); - printk("%sEXT2-fs (%s): ", prefix, sb->s_id); - vprintk(fmt, args); - printk("\n"); + + vaf.fmt = fmt; + vaf.va = &args; + + printk("%sEXT2-fs (%s): %pV\n", prefix, sb->s_id, &vaf); + va_end(args); } -- cgit v1.2.2 From 99fbb1e2af5da27d3ee75c2e421712fe9d083fb6 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Tue, 9 Nov 2010 10:18:05 -0800 Subject: fs/ext3/super.c: Use printf extension %pV Using %pV reduces the number of printk calls and eliminates any possible message interleaving from other printk calls. Signed-off-by: Joe Perches Signed-off-by: Jan Kara --- fs/ext3/super.c | 56 +++++++++++++++++++++++++++++++++++++------------------- 1 file changed, 37 insertions(+), 19 deletions(-) (limited to 'fs') diff --git a/fs/ext3/super.c b/fs/ext3/super.c index 04169318e6ba..0e9cbc37024a 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c @@ -143,12 +143,16 @@ void ext3_journal_abort_handle(const char *caller, const char *err_fn, void ext3_msg(struct super_block *sb, const char *prefix, const char *fmt, ...) { + struct va_format vaf; va_list args; va_start(args, fmt); - printk("%sEXT3-fs (%s): ", prefix, sb->s_id); - vprintk(fmt, args); - printk("\n"); + + vaf.fmt = fmt; + vaf.va = &args; + + printk("%sEXT3-fs (%s): %pV\n", prefix, sb->s_id, &vaf); + va_end(args); } @@ -195,15 +199,20 @@ static void ext3_handle_error(struct super_block *sb) sb->s_id); } -void ext3_error (struct super_block * sb, const char * function, - const char * fmt, ...) +void ext3_error(struct super_block *sb, const char *function, + const char *fmt, ...) { + struct va_format vaf; va_list args; va_start(args, fmt); - printk(KERN_CRIT "EXT3-fs error (device %s): %s: ",sb->s_id, function); - vprintk(fmt, args); - printk("\n"); + + vaf.fmt = fmt; + vaf.va = &args; + + printk(KERN_CRIT "EXT3-fs error (device %s): %s: %pV\n", + sb->s_id, function, &vaf); + va_end(args); ext3_handle_error(sb); @@ -274,15 +283,20 @@ void __ext3_std_error (struct super_block * sb, const char * function, * case we take the easy way out and panic immediately. */ -void ext3_abort (struct super_block * sb, const char * function, - const char * fmt, ...) +void ext3_abort(struct super_block *sb, const char *function, + const char *fmt, ...) { + struct va_format vaf; va_list args; va_start(args, fmt); - printk(KERN_CRIT "EXT3-fs (%s): error: %s: ", sb->s_id, function); - vprintk(fmt, args); - printk("\n"); + + vaf.fmt = fmt; + vaf.va = &args; + + printk(KERN_CRIT "EXT3-fs (%s): error: %s: %pV\n", + sb->s_id, function, &vaf); + va_end(args); if (test_opt(sb, ERRORS_PANIC)) @@ -300,16 +314,20 @@ void ext3_abort (struct super_block * sb, const char * function, journal_abort(EXT3_SB(sb)->s_journal, -EIO); } -void ext3_warning (struct super_block * sb, const char * function, - const char * fmt, ...) +void ext3_warning(struct super_block *sb, const char *function, + const char *fmt, ...) { + struct va_format vaf; va_list args; va_start(args, fmt); - printk(KERN_WARNING "EXT3-fs (%s): warning: %s: ", - sb->s_id, function); - vprintk(fmt, args); - printk("\n"); + + vaf.fmt = fmt; + vaf.va = &args; + + printk(KERN_WARNING "EXT3-fs (%s): warning: %s: %pV\n", + sb->s_id, function, &vaf); + va_end(args); } -- cgit v1.2.2 From 2b543edae2d9161ae8dda1d85cbd28ef8a166cc0 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 16 Nov 2010 20:18:12 +0900 Subject: ext3: Add error check in ext3_mkdir() Check return value of ext3_journal_get_write_access, ext3_journal_dirty_metadata and ext3_mark_inode_dirty. Consolidate error path under new label 'out_clear_inode' and adjust bh releasing appropriately. Signed-off-by: Namhyung Kim Signed-off-by: Jan Kara --- fs/ext3/namei.c | 36 ++++++++++++++++++++++-------------- 1 file changed, 22 insertions(+), 14 deletions(-) (limited to 'fs') diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c index bce9dce639b8..03fccc573333 100644 --- a/fs/ext3/namei.c +++ b/fs/ext3/namei.c @@ -1762,7 +1762,7 @@ static int ext3_mkdir(struct inode * dir, struct dentry * dentry, int mode) { handle_t *handle; struct inode * inode; - struct buffer_head * dir_block; + struct buffer_head * dir_block = NULL; struct ext3_dir_entry_2 * de; int err, retries = 0; @@ -1790,15 +1790,14 @@ retry: inode->i_fop = &ext3_dir_operations; inode->i_size = EXT3_I(inode)->i_disksize = inode->i_sb->s_blocksize; dir_block = ext3_bread (handle, inode, 0, 1, &err); - if (!dir_block) { - drop_nlink(inode); /* is this nlink == 0? */ - unlock_new_inode(inode); - ext3_mark_inode_dirty(handle, inode); - iput (inode); - goto out_stop; - } + if (!dir_block) + goto out_clear_inode; + BUFFER_TRACE(dir_block, "get_write_access"); - ext3_journal_get_write_access(handle, dir_block); + err = ext3_journal_get_write_access(handle, dir_block); + if (err) + goto out_clear_inode; + de = (struct ext3_dir_entry_2 *) dir_block->b_data; de->inode = cpu_to_le32(inode->i_ino); de->name_len = 1; @@ -1814,11 +1813,16 @@ retry: ext3_set_de_type(dir->i_sb, de, S_IFDIR); inode->i_nlink = 2; BUFFER_TRACE(dir_block, "call ext3_journal_dirty_metadata"); - ext3_journal_dirty_metadata(handle, dir_block); - brelse (dir_block); - ext3_mark_inode_dirty(handle, inode); - err = ext3_add_entry (handle, dentry, inode); + err = ext3_journal_dirty_metadata(handle, dir_block); + if (err) + goto out_clear_inode; + + err = ext3_mark_inode_dirty(handle, inode); + if (!err) + err = ext3_add_entry (handle, dentry, inode); + if (err) { +out_clear_inode: inode->i_nlink = 0; unlock_new_inode(inode); ext3_mark_inode_dirty(handle, inode); @@ -1827,10 +1831,14 @@ retry: } inc_nlink(dir); ext3_update_dx_flag(dir); - ext3_mark_inode_dirty(handle, dir); + err = ext3_mark_inode_dirty(handle, dir); + if (err) + goto out_clear_inode; + d_instantiate(dentry, inode); unlock_new_inode(inode); out_stop: + brelse(dir_block); ext3_journal_stop(handle); if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries)) goto retry; -- cgit v1.2.2 From fbcae8e32d73ad6cad9c5721881350c51174d552 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Fri, 19 Nov 2010 16:28:35 +0900 Subject: ext3: Add journal error check into ext3_delete_entry() Check return value of ext3_journal_get_write_access() and ext3_journal_dirty_metadata(). Signed-off-by: Namhyung Kim Signed-off-by: Jan Kara --- fs/ext3/namei.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c index 03fccc573333..672cea16a8b9 100644 --- a/fs/ext3/namei.c +++ b/fs/ext3/namei.c @@ -1644,8 +1644,13 @@ static int ext3_delete_entry (handle_t *handle, if (!ext3_check_dir_entry("ext3_delete_entry", dir, de, bh, i)) return -EIO; if (de == de_del) { + int err; + BUFFER_TRACE(bh, "get_write_access"); - ext3_journal_get_write_access(handle, bh); + err = ext3_journal_get_write_access(handle, bh); + if (err) + goto journal_error; + if (pde) pde->rec_len = ext3_rec_len_to_disk( ext3_rec_len_from_disk(pde->rec_len) + @@ -1654,7 +1659,12 @@ static int ext3_delete_entry (handle_t *handle, de->inode = 0; dir->i_version++; BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata"); - ext3_journal_dirty_metadata(handle, bh); + err = ext3_journal_dirty_metadata(handle, bh); + if (err) { +journal_error: + ext3_std_error(dir->i_sb, err); + return err; + } return 0; } i += ext3_rec_len_from_disk(de->rec_len); -- cgit v1.2.2 From ad692bf3ea035fa5a9d56462cf3df97d9607cced Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Tue, 16 Nov 2010 22:57:44 +0000 Subject: ext3: Return error code from generic_check_addressable ext3_fill_super should return the error code that generic_check_accessible returns when an error condition occurs. Signed-off-by: Darrick J. Wong Signed-off-by: Jan Kara --- fs/ext3/super.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ext3/super.c b/fs/ext3/super.c index 0e9cbc37024a..ebc3a9c77067 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c @@ -1859,13 +1859,15 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent) goto failed_mount; } - if (generic_check_addressable(sb->s_blocksize_bits, - le32_to_cpu(es->s_blocks_count))) { + err = generic_check_addressable(sb->s_blocksize_bits, + le32_to_cpu(es->s_blocks_count)); + if (err) { ext3_msg(sb, KERN_ERR, "error: filesystem is too large to mount safely"); if (sizeof(sector_t) < 8) ext3_msg(sb, KERN_ERR, "error: CONFIG_LBDAF not enabled"); + ret = err; goto failed_mount; } -- cgit v1.2.2 From f0cad89f5e8ef8b6d0c065115565524137e44f0b Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Sat, 16 Oct 2010 19:36:59 -0400 Subject: ext3: Avoid uninitialized memory references with a corrupted htree directory If the first htree directory is missing '.' or '..' but is otherwise a valid directory, and we do a lookup for '.' or '..', it's possible to dereference an uninitialized memory pointer in ext3_htree_next_block(). Avoid this. We avoid this by moving the special case from ext3_dx_find_entry() to ext3_find_entry(); this also means we can optimize ext3_find_entry() slightly when NFS looks up "..". Thanks to Brad Spengler for pointing a Clang warning that led me to look more closely at this code. The warning was harmless, but it was useful in pointing out code that was too ugly to live. This warning was also reported by Roman Borisov. Signed-off-by: "Theodore Ts'o" Cc: Jan Kara Cc: Brad Spengler Signed-off-by: Jan Kara --- fs/ext3/namei.c | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) (limited to 'fs') diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c index 672cea16a8b9..d093cbbe38b7 100644 --- a/fs/ext3/namei.c +++ b/fs/ext3/namei.c @@ -858,6 +858,7 @@ static struct buffer_head *ext3_find_entry(struct inode *dir, struct buffer_head * bh_use[NAMEI_RA_SIZE]; struct buffer_head * bh, *ret = NULL; unsigned long start, block, b; + const u8 *name = entry->name; int ra_max = 0; /* Number of bh's in the readahead buffer, bh_use[] */ int ra_ptr = 0; /* Current index into readahead @@ -871,6 +872,16 @@ static struct buffer_head *ext3_find_entry(struct inode *dir, namelen = entry->len; if (namelen > EXT3_NAME_LEN) return NULL; + if ((namelen <= 2) && (name[0] == '.') && + (name[1] == '.' || name[1] == 0)) { + /* + * "." or ".." will only be in the first block + * NFS may look up ".."; "." should be handled by the VFS + */ + block = start = 0; + nblocks = 1; + goto restart; + } if (is_dx(dir)) { bh = ext3_dx_find_entry(dir, entry, res_dir, &err); /* @@ -961,9 +972,8 @@ static struct buffer_head * ext3_dx_find_entry(struct inode *dir, struct qstr *entry, struct ext3_dir_entry_2 **res_dir, int *err) { - struct super_block * sb; + struct super_block *sb = dir->i_sb; struct dx_hash_info hinfo; - u32 hash; struct dx_frame frames[2], *frame; struct ext3_dir_entry_2 *de, *top; struct buffer_head *bh; @@ -972,18 +982,8 @@ static struct buffer_head * ext3_dx_find_entry(struct inode *dir, int namelen = entry->len; const u8 *name = entry->name; - sb = dir->i_sb; - /* NFS may look up ".." - look at dx_root directory block */ - if (namelen > 2 || name[0] != '.'|| (namelen == 2 && name[1] != '.')) { - if (!(frame = dx_probe(entry, dir, &hinfo, frames, err))) - return NULL; - } else { - frame = frames; - frame->bh = NULL; /* for dx_release() */ - frame->at = (struct dx_entry *)frames; /* hack for zero entry*/ - dx_set_block(frame->at, 0); /* dx_root block is 0 */ - } - hash = hinfo.hash; + if (!(frame = dx_probe(entry, dir, &hinfo, frames, err))) + return NULL; do { block = dx_get_block(frame->at); if (!(bh = ext3_bread (NULL,dir, block, 0, err))) @@ -1009,7 +1009,7 @@ static struct buffer_head * ext3_dx_find_entry(struct inode *dir, } brelse (bh); /* Check to see if we should continue to search */ - retval = ext3_htree_next_block(dir, hash, frame, + retval = ext3_htree_next_block(dir, hinfo.hash, frame, frames, NULL); if (retval < 0) { ext3_warning(sb, __func__, -- cgit v1.2.2 From 5026e90b86684bc878e4db0a8cd043fed769719c Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Sat, 16 Oct 2010 19:37:00 -0400 Subject: ext3: Use search_dirblock() in ext3_dx_find_entry() Use the search_dirblock() in ext3_dx_find_entry(). It makes the code easier to read, and it takes advantage of common code. It also saves 100 bytes or so of text space. Signed-off-by: "Theodore Ts'o" Cc: Brad Spengler Signed-off-by: Jan Kara --- fs/ext3/namei.c | 33 ++++++++++++--------------------- 1 file changed, 12 insertions(+), 21 deletions(-) (limited to 'fs') diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c index d093cbbe38b7..9cc0b2c9664f 100644 --- a/fs/ext3/namei.c +++ b/fs/ext3/namei.c @@ -975,12 +975,9 @@ static struct buffer_head * ext3_dx_find_entry(struct inode *dir, struct super_block *sb = dir->i_sb; struct dx_hash_info hinfo; struct dx_frame frames[2], *frame; - struct ext3_dir_entry_2 *de, *top; struct buffer_head *bh; unsigned long block; int retval; - int namelen = entry->len; - const u8 *name = entry->name; if (!(frame = dx_probe(entry, dir, &hinfo, frames, err))) return NULL; @@ -988,26 +985,20 @@ static struct buffer_head * ext3_dx_find_entry(struct inode *dir, block = dx_get_block(frame->at); if (!(bh = ext3_bread (NULL,dir, block, 0, err))) goto errout; - de = (struct ext3_dir_entry_2 *) bh->b_data; - top = (struct ext3_dir_entry_2 *) ((char *) de + sb->s_blocksize - - EXT3_DIR_REC_LEN(0)); - for (; de < top; de = ext3_next_entry(de)) { - int off = (block << EXT3_BLOCK_SIZE_BITS(sb)) - + ((char *) de - bh->b_data); - - if (!ext3_check_dir_entry(__func__, dir, de, bh, off)) { - brelse(bh); - *err = ERR_BAD_DX_DIR; - goto errout; - } - if (ext3_match(namelen, name, de)) { - *res_dir = de; - dx_release(frames); - return bh; - } + retval = search_dirblock(bh, dir, entry, + block << EXT3_BLOCK_SIZE_BITS(sb), + res_dir); + if (retval == 1) { + dx_release(frames); + return bh; } - brelse (bh); + brelse(bh); + if (retval == -1) { + *err = ERR_BAD_DX_DIR; + goto errout; + } + /* Check to see if we should continue to search */ retval = ext3_htree_next_block(dir, hinfo.hash, frame, frames, NULL); -- cgit v1.2.2 From ad1857a0e0cb29313efae3bb69c913b2c3c833a1 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 23 Nov 2010 13:30:33 +0900 Subject: ext3: Add journal error check into ext3_rename() Check return value of ext3_journal_get_write_access() and ext3_journal_dirty_metadata(). Signed-off-by: Namhyung Kim Signed-off-by: Jan Kara --- fs/ext3/namei.c | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c index 9cc0b2c9664f..e69eed547242 100644 --- a/fs/ext3/namei.c +++ b/fs/ext3/namei.c @@ -2362,7 +2362,9 @@ static int ext3_rename (struct inode * old_dir, struct dentry *old_dentry, goto end_rename; } else { BUFFER_TRACE(new_bh, "get write access"); - ext3_journal_get_write_access(handle, new_bh); + retval = ext3_journal_get_write_access(handle, new_bh); + if (retval) + goto journal_error; new_de->inode = cpu_to_le32(old_inode->i_ino); if (EXT3_HAS_INCOMPAT_FEATURE(new_dir->i_sb, EXT3_FEATURE_INCOMPAT_FILETYPE)) @@ -2371,7 +2373,9 @@ static int ext3_rename (struct inode * old_dir, struct dentry *old_dentry, new_dir->i_ctime = new_dir->i_mtime = CURRENT_TIME_SEC; ext3_mark_inode_dirty(handle, new_dir); BUFFER_TRACE(new_bh, "call ext3_journal_dirty_metadata"); - ext3_journal_dirty_metadata(handle, new_bh); + retval = ext3_journal_dirty_metadata(handle, new_bh); + if (retval) + goto journal_error; brelse(new_bh); new_bh = NULL; } @@ -2420,10 +2424,17 @@ static int ext3_rename (struct inode * old_dir, struct dentry *old_dentry, ext3_update_dx_flag(old_dir); if (dir_bh) { BUFFER_TRACE(dir_bh, "get_write_access"); - ext3_journal_get_write_access(handle, dir_bh); + retval = ext3_journal_get_write_access(handle, dir_bh); + if (retval) + goto journal_error; PARENT_INO(dir_bh->b_data) = cpu_to_le32(new_dir->i_ino); BUFFER_TRACE(dir_bh, "call ext3_journal_dirty_metadata"); - ext3_journal_dirty_metadata(handle, dir_bh); + retval = ext3_journal_dirty_metadata(handle, dir_bh); + if (retval) { +journal_error: + ext3_std_error(new_dir->i_sb, retval); + goto end_rename; + } drop_nlink(old_dir); if (new_inode) { drop_nlink(new_inode); -- cgit v1.2.2 From ed2ae6f69148dcf9da725364e17184f27ba04dc2 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Thu, 4 Nov 2010 20:08:04 -0700 Subject: fs/udf: Use vzalloc Signed-off-by: Joe Perches Signed-off-by: Jan Kara --- fs/udf/super.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/udf/super.c b/fs/udf/super.c index 4a5c7c61836a..f99ff5dbd741 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -959,9 +959,9 @@ static struct udf_bitmap *udf_sb_alloc_bitmap(struct super_block *sb, u32 index) (sizeof(struct buffer_head *) * nr_groups); if (size <= PAGE_SIZE) - bitmap = kmalloc(size, GFP_KERNEL); + bitmap = kzalloc(size, GFP_KERNEL); else - bitmap = vmalloc(size); /* TODO: get rid of vmalloc */ + bitmap = vzalloc(size); /* TODO: get rid of vzalloc */ if (bitmap == NULL) { udf_error(sb, __func__, @@ -970,7 +970,6 @@ static struct udf_bitmap *udf_sb_alloc_bitmap(struct super_block *sb, u32 index) return NULL; } - memset(bitmap, 0x00, size); bitmap->s_block_bitmap = (struct buffer_head **)(bitmap + 1); bitmap->s_nr_groups = nr_groups; return bitmap; -- cgit v1.2.2 From fab3c8581fc49998f8d0d349b70813d9712fb405 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Wed, 10 Nov 2010 15:46:18 -0800 Subject: fs/udf: Add printf format/argument verification Add __attribute__((format... to udf_warning. All arguments matched formats, no other changes necessary. Signed-off-by: Joe Perches Signed-off-by: Jan Kara --- fs/udf/udfdecl.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs') diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h index 6995ab1f4305..74d58c08ebac 100644 --- a/fs/udf/udfdecl.h +++ b/fs/udf/udfdecl.h @@ -111,6 +111,8 @@ struct extent_position { }; /* super.c */ + +__attribute__((format(printf, 3, 4))) extern void udf_warning(struct super_block *, const char *, const char *, ...); static inline void udf_updated_lvid(struct super_block *sb) { -- cgit v1.2.2 From f2a6cc1f146465e13f31d9163d542d1facf4e203 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 20 Oct 2010 17:25:59 +0200 Subject: udf: Convert UDF_SB(sb)->s_flags to use bitops Use atomic bitops to manipulate with sb flags to make manipulation safe without any locking. Signed-off-by: Jan Kara --- fs/udf/udf_sb.h | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/udf/udf_sb.h b/fs/udf/udf_sb.h index d113b72c2768..9f38a6ca4fd5 100644 --- a/fs/udf/udf_sb.h +++ b/fs/udf/udf_sb.h @@ -2,6 +2,7 @@ #define __LINUX_UDF_SB_H #include +#include /* Since UDF 2.01 is ISO 13346 based... */ #define UDF_SUPER_MAGIC 0x15013346 @@ -139,7 +140,7 @@ struct udf_sb_info { __u16 s_udfrev; /* Miscellaneous flags */ - __u32 s_flags; + unsigned long s_flags; /* Encoding info */ struct nls_table *s_nls_map; @@ -161,8 +162,19 @@ struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct udf_sb_info *sbi); int udf_compute_nr_groups(struct super_block *sb, u32 partition); -#define UDF_QUERY_FLAG(X,Y) ( UDF_SB(X)->s_flags & ( 1 << (Y) ) ) -#define UDF_SET_FLAG(X,Y) ( UDF_SB(X)->s_flags |= ( 1 << (Y) ) ) -#define UDF_CLEAR_FLAG(X,Y) ( UDF_SB(X)->s_flags &= ~( 1 << (Y) ) ) +static inline int UDF_QUERY_FLAG(struct super_block *sb, int flag) +{ + return test_bit(flag, &UDF_SB(sb)->s_flags); +} + +static inline void UDF_SET_FLAG(struct super_block *sb, int flag) +{ + set_bit(flag, &UDF_SB(sb)->s_flags); +} + +static inline void UDF_CLEAR_FLAG(struct super_block *sb, int flag) +{ + clear_bit(flag, &UDF_SB(sb)->s_flags); +} #endif /* __LINUX_UDF_SB_H */ -- cgit v1.2.2 From 49521de119d326d04fb3736ab827e12e1de966d0 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 20 Oct 2010 17:42:44 +0200 Subject: udf: Remove BKL from udf_update_inode udf_update_inode() does not need BKL since on-disk inode modifications are protected by the buffer lock and reading of values of in-memory inode is safe without any lock. In some cases we can write inconsistent inode state to disk but in that case inode will be marked dirty and overwritten later. Also make unnecessarily global udf_sync_inode() static. Signed-off-by: Jan Kara --- fs/udf/inode.c | 13 +++---------- fs/udf/udfdecl.h | 1 - 2 files changed, 3 insertions(+), 11 deletions(-) (limited to 'fs') diff --git a/fs/udf/inode.c b/fs/udf/inode.c index fc48f37aa2dd..9656907f4b81 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c @@ -51,6 +51,7 @@ MODULE_LICENSE("GPL"); static mode_t udf_convert_permissions(struct fileEntry *); static int udf_update_inode(struct inode *, int); static void udf_fill_inode(struct inode *, struct buffer_head *); +static int udf_sync_inode(struct inode *inode); static int udf_alloc_i_data(struct inode *inode, size_t size); static struct buffer_head *inode_getblk(struct inode *, sector_t, int *, sector_t *, int *); @@ -79,9 +80,7 @@ void udf_evict_inode(struct inode *inode) want_delete = 1; inode->i_size = 0; udf_truncate(inode); - lock_kernel(); udf_update_inode(inode, IS_SYNC(inode)); - unlock_kernel(); } invalidate_inode_buffers(inode); end_writeback(inode); @@ -1373,16 +1372,10 @@ static mode_t udf_convert_permissions(struct fileEntry *fe) int udf_write_inode(struct inode *inode, struct writeback_control *wbc) { - int ret; - - lock_kernel(); - ret = udf_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL); - unlock_kernel(); - - return ret; + return udf_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL); } -int udf_sync_inode(struct inode *inode) +static int udf_sync_inode(struct inode *inode) { return udf_update_inode(inode, 1); } diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h index 74d58c08ebac..f25e57e8a777 100644 --- a/fs/udf/udfdecl.h +++ b/fs/udf/udfdecl.h @@ -135,7 +135,6 @@ extern int udf_write_fi(struct inode *inode, struct fileIdentDesc *, extern long udf_ioctl(struct file *, unsigned int, unsigned long); /* inode.c */ extern struct inode *udf_iget(struct super_block *, struct kernel_lb_addr *); -extern int udf_sync_inode(struct inode *); extern void udf_expand_file_adinicb(struct inode *, int, int *); extern struct buffer_head *udf_expand_dir_adinicb(struct inode *, int *, int *); extern struct buffer_head *udf_bread(struct inode *, int, int, int *); -- cgit v1.2.2 From d664b6af609ecf5e7dcedf92f0bf188e3a29b3e0 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 20 Oct 2010 18:28:46 +0200 Subject: udf: Move handling of uniqueID into a helper function and protect it by a s_alloc_mutex uniqueID handling has been duplicated in three places. Move it into a common helper. Since we modify an LVID buffer with uniqueID update, we take sbi->s_alloc_mutex to protect agaist other modifications of the structure. Signed-off-by: Jan Kara --- fs/udf/ialloc.c | 21 ++++++--------------- fs/udf/namei.c | 51 +++++++++++++-------------------------------------- fs/udf/super.c | 27 +++++++++++++++++++++++++++ fs/udf/udfdecl.h | 1 + 4 files changed, 47 insertions(+), 53 deletions(-) (limited to 'fs') diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c index 75d9304d0dc3..6fb7e0adcda0 100644 --- a/fs/udf/ialloc.c +++ b/fs/udf/ialloc.c @@ -92,28 +92,19 @@ struct inode *udf_new_inode(struct inode *dir, int mode, int *err) return NULL; } - mutex_lock(&sbi->s_alloc_mutex); if (sbi->s_lvid_bh) { - struct logicalVolIntegrityDesc *lvid = - (struct logicalVolIntegrityDesc *) - sbi->s_lvid_bh->b_data; - struct logicalVolIntegrityDescImpUse *lvidiu = - udf_sb_lvidiu(sbi); - struct logicalVolHeaderDesc *lvhd; - uint64_t uniqueID; - lvhd = (struct logicalVolHeaderDesc *) - (lvid->logicalVolContentsUse); + struct logicalVolIntegrityDescImpUse *lvidiu; + + iinfo->i_unique = lvid_get_unique_id(sb); + mutex_lock(&sbi->s_alloc_mutex); + lvidiu = udf_sb_lvidiu(sbi); if (S_ISDIR(mode)) le32_add_cpu(&lvidiu->numDirs, 1); else le32_add_cpu(&lvidiu->numFiles, 1); - iinfo->i_unique = uniqueID = le64_to_cpu(lvhd->uniqueID); - if (!(++uniqueID & 0x00000000FFFFFFFFUL)) - uniqueID += 16; - lvhd->uniqueID = cpu_to_le64(uniqueID); udf_updated_lvid(sb); + mutex_unlock(&sbi->s_alloc_mutex); } - mutex_unlock(&sbi->s_alloc_mutex); inode_init_owner(inode, dir, mode); diff --git a/fs/udf/namei.c b/fs/udf/namei.c index 6d8dc02baebb..701fcda18415 100644 --- a/fs/udf/namei.c +++ b/fs/udf/namei.c @@ -890,8 +890,8 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry, int block; unsigned char *name = NULL; int namelen; - struct buffer_head *bh; struct udf_inode_info *iinfo; + struct super_block *sb = dir->i_sb; lock_kernel(); inode = udf_new_inode(dir, S_IFLNK | S_IRWXUGO, &err); @@ -912,7 +912,7 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry, struct kernel_lb_addr eloc; uint32_t bsize; - block = udf_new_block(inode->i_sb, inode, + block = udf_new_block(sb, inode, iinfo->i_location.partitionReferenceNum, iinfo->i_location.logicalBlockNum, &err); if (!block) @@ -923,17 +923,17 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry, eloc.logicalBlockNum = block; eloc.partitionReferenceNum = iinfo->i_location.partitionReferenceNum; - bsize = inode->i_sb->s_blocksize; + bsize = sb->s_blocksize; iinfo->i_lenExtents = bsize; udf_add_aext(inode, &epos, &eloc, bsize, 0); brelse(epos.bh); - block = udf_get_pblock(inode->i_sb, block, + block = udf_get_pblock(sb, block, iinfo->i_location.partitionReferenceNum, 0); - epos.bh = udf_tgetblk(inode->i_sb, block); + epos.bh = udf_tgetblk(sb, block); lock_buffer(epos.bh); - memset(epos.bh->b_data, 0x00, inode->i_sb->s_blocksize); + memset(epos.bh->b_data, 0x00, bsize); set_buffer_uptodate(epos.bh); unlock_buffer(epos.bh); mark_buffer_dirty_inode(epos.bh, inode); @@ -941,7 +941,7 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry, } else ea = iinfo->i_ext.i_data + iinfo->i_lenEAttr; - eoffset = inode->i_sb->s_blocksize - udf_ext0_offset(inode); + eoffset = sb->s_blocksize - udf_ext0_offset(inode); pc = (struct pathComponent *)ea; if (*symname == '/') { @@ -981,7 +981,7 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry, } if (pc->componentType == 5) { - namelen = udf_put_filename(inode->i_sb, compstart, name, + namelen = udf_put_filename(sb, compstart, name, symname - compstart); if (!namelen) goto out_no_entry; @@ -1015,23 +1015,11 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry, fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err); if (!fi) goto out_no_entry; - cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize); + cfi.icb.extLength = cpu_to_le32(sb->s_blocksize); cfi.icb.extLocation = cpu_to_lelb(iinfo->i_location); - bh = UDF_SB(inode->i_sb)->s_lvid_bh; - if (bh) { - struct logicalVolIntegrityDesc *lvid = - (struct logicalVolIntegrityDesc *)bh->b_data; - struct logicalVolHeaderDesc *lvhd; - uint64_t uniqueID; - lvhd = (struct logicalVolHeaderDesc *) - lvid->logicalVolContentsUse; - uniqueID = le64_to_cpu(lvhd->uniqueID); + if (UDF_SB(inode->i_sb)->s_lvid_bh) { *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse = - cpu_to_le32(uniqueID & 0x00000000FFFFFFFFUL); - if (!(++uniqueID & 0x00000000FFFFFFFFUL)) - uniqueID += 16; - lvhd->uniqueID = cpu_to_le64(uniqueID); - mark_buffer_dirty(bh); + cpu_to_le32(lvid_get_unique_id(sb)); } udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL); if (UDF_I(dir)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) @@ -1060,7 +1048,6 @@ static int udf_link(struct dentry *old_dentry, struct inode *dir, struct udf_fileident_bh fibh; struct fileIdentDesc cfi, *fi; int err; - struct buffer_head *bh; lock_kernel(); if (inode->i_nlink >= (256 << sizeof(inode->i_nlink)) - 1) { @@ -1075,21 +1062,9 @@ static int udf_link(struct dentry *old_dentry, struct inode *dir, } cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize); cfi.icb.extLocation = cpu_to_lelb(UDF_I(inode)->i_location); - bh = UDF_SB(inode->i_sb)->s_lvid_bh; - if (bh) { - struct logicalVolIntegrityDesc *lvid = - (struct logicalVolIntegrityDesc *)bh->b_data; - struct logicalVolHeaderDesc *lvhd; - uint64_t uniqueID; - lvhd = (struct logicalVolHeaderDesc *) - (lvid->logicalVolContentsUse); - uniqueID = le64_to_cpu(lvhd->uniqueID); + if (UDF_SB(inode->i_sb)->s_lvid_bh) { *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse = - cpu_to_le32(uniqueID & 0x00000000FFFFFFFFUL); - if (!(++uniqueID & 0x00000000FFFFFFFFUL)) - uniqueID += 16; - lvhd->uniqueID = cpu_to_le64(uniqueID); - mark_buffer_dirty(bh); + cpu_to_le32(lvid_get_unique_id(inode->i_sb)); } udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL); if (UDF_I(dir)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) diff --git a/fs/udf/super.c b/fs/udf/super.c index f99ff5dbd741..948e1aca0f34 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -1823,6 +1823,33 @@ static void udf_close_lvid(struct super_block *sb) sbi->s_lvid_dirty = 0; } +u64 lvid_get_unique_id(struct super_block *sb) +{ + struct buffer_head *bh; + struct udf_sb_info *sbi = UDF_SB(sb); + struct logicalVolIntegrityDesc *lvid; + struct logicalVolHeaderDesc *lvhd; + u64 uniqueID; + u64 ret; + + bh = sbi->s_lvid_bh; + if (!bh) + return 0; + + lvid = (struct logicalVolIntegrityDesc *)bh->b_data; + lvhd = (struct logicalVolHeaderDesc *)lvid->logicalVolContentsUse; + + mutex_lock(&sbi->s_alloc_mutex); + ret = uniqueID = le64_to_cpu(lvhd->uniqueID); + if (!(++uniqueID & 0xFFFFFFFF)) + uniqueID += 16; + lvhd->uniqueID = cpu_to_le64(uniqueID); + mutex_unlock(&sbi->s_alloc_mutex); + mark_buffer_dirty(bh); + + return ret; +} + static void udf_sb_free_bitmap(struct udf_bitmap *bitmap) { int i; diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h index f25e57e8a777..eba48209f9f3 100644 --- a/fs/udf/udfdecl.h +++ b/fs/udf/udfdecl.h @@ -125,6 +125,7 @@ static inline void udf_updated_lvid(struct super_block *sb) sb->s_dirt = 1; UDF_SB(sb)->s_lvid_dirty = 1; } +extern u64 lvid_get_unique_id(struct super_block *sb); /* namei.c */ extern int udf_write_fi(struct inode *inode, struct fileIdentDesc *, -- cgit v1.2.2 From 949f4a7c08bc4a050eae7aeeac3e6d019d1feafb Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 20 Oct 2010 18:49:20 +0200 Subject: udf: Protect all modifications of LVID with s_alloc_mutex udf_open_lvid() and udf_close_lvid() were modifying LVID without s_alloc_mutex. Since they can be called from remount, the modification could race with other filesystem modifications of LVID so protect them by s_alloc_mutex just to be sure. Signed-off-by: Jan Kara --- fs/udf/super.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'fs') diff --git a/fs/udf/super.c b/fs/udf/super.c index 948e1aca0f34..e54960c0e960 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -1773,6 +1773,8 @@ static void udf_open_lvid(struct super_block *sb) if (!bh) return; + + mutex_lock(&sbi->s_alloc_mutex); lvid = (struct logicalVolIntegrityDesc *)bh->b_data; lvidiu = udf_sb_lvidiu(sbi); @@ -1789,6 +1791,7 @@ static void udf_open_lvid(struct super_block *sb) lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag); mark_buffer_dirty(bh); sbi->s_lvid_dirty = 0; + mutex_unlock(&sbi->s_alloc_mutex); } static void udf_close_lvid(struct super_block *sb) @@ -1801,6 +1804,7 @@ static void udf_close_lvid(struct super_block *sb) if (!bh) return; + mutex_lock(&sbi->s_alloc_mutex); lvid = (struct logicalVolIntegrityDesc *)bh->b_data; lvidiu = udf_sb_lvidiu(sbi); lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX; @@ -1821,6 +1825,7 @@ static void udf_close_lvid(struct super_block *sb) lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag); mark_buffer_dirty(bh); sbi->s_lvid_dirty = 0; + mutex_unlock(&sbi->s_alloc_mutex); } u64 lvid_get_unique_id(struct super_block *sb) -- cgit v1.2.2 From c03cad241af63445b751781a09faf08b3a5b77c1 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 20 Oct 2010 22:17:28 +0200 Subject: udf: Protect default inode credentials by rwlock Superblock carries credentials (uid, gid, etc.) which are used as default values in __udf_read_inode() when media does not provide these. These credentials can change during remount so we protect them by a rwlock so that each inode gets a consistent set of credentials. Signed-off-by: Jan Kara --- fs/udf/inode.c | 16 +++++++++------- fs/udf/super.c | 3 +++ fs/udf/udf_sb.h | 2 ++ 3 files changed, 14 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/udf/inode.c b/fs/udf/inode.c index 9656907f4b81..fa3c1541151c 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c @@ -1201,6 +1201,7 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh) return; } + read_lock(&sbi->s_cred_lock); inode->i_uid = le32_to_cpu(fe->uid); if (inode->i_uid == -1 || UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_IGNORE) || @@ -1213,13 +1214,6 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh) UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_SET)) inode->i_gid = UDF_SB(inode->i_sb)->s_gid; - inode->i_nlink = le16_to_cpu(fe->fileLinkCount); - if (!inode->i_nlink) - inode->i_nlink = 1; - - inode->i_size = le64_to_cpu(fe->informationLength); - iinfo->i_lenExtents = inode->i_size; - if (fe->icbTag.fileType != ICBTAG_FILE_TYPE_DIRECTORY && sbi->s_fmode != UDF_INVALID_MODE) inode->i_mode = sbi->s_fmode; @@ -1229,6 +1223,14 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh) else inode->i_mode = udf_convert_permissions(fe); inode->i_mode &= ~sbi->s_umask; + read_unlock(&sbi->s_cred_lock); + + inode->i_nlink = le16_to_cpu(fe->fileLinkCount); + if (!inode->i_nlink) + inode->i_nlink = 1; + + inode->i_size = le64_to_cpu(fe->informationLength); + iinfo->i_lenExtents = inode->i_size; if (iinfo->i_efe == 0) { inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) << diff --git a/fs/udf/super.c b/fs/udf/super.c index e54960c0e960..f06cc67cf864 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -568,12 +568,14 @@ static int udf_remount_fs(struct super_block *sb, int *flags, char *options) return -EINVAL; lock_kernel(); + write_lock(&sbi->s_cred_lock); sbi->s_flags = uopt.flags; sbi->s_uid = uopt.uid; sbi->s_gid = uopt.gid; sbi->s_umask = uopt.umask; sbi->s_fmode = uopt.fmode; sbi->s_dmode = uopt.dmode; + write_unlock(&sbi->s_cred_lock); if (sbi->s_lvid_bh) { int write_rev = le16_to_cpu(udf_sb_lvidiu(sbi)->minUDFWriteRev); @@ -1960,6 +1962,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent) sbi->s_fmode = uopt.fmode; sbi->s_dmode = uopt.dmode; sbi->s_nls_map = uopt.nls_map; + rwlock_init(&sbi->s_cred_lock); if (uopt.session == 0xFFFFFFFF) sbi->s_session = udf_get_last_session(sb); diff --git a/fs/udf/udf_sb.h b/fs/udf/udf_sb.h index 9f38a6ca4fd5..4858c191242b 100644 --- a/fs/udf/udf_sb.h +++ b/fs/udf/udf_sb.h @@ -129,6 +129,8 @@ struct udf_sb_info { uid_t s_uid; mode_t s_fmode; mode_t s_dmode; + /* Lock protecting consistency of above permission settings */ + rwlock_t s_cred_lock; /* Root Info */ struct timespec s_record_time; -- cgit v1.2.2 From 0484b1cedc053cf88a046da5f08bc00747e533cb Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 20 Oct 2010 22:22:57 +0200 Subject: udf: Remove BKL from udf_put_super() and udf_remount_fs() udf_put_super() does not need BKL because the filesystem is shut down so there's nothing to race with. The credential changes in udf_remount_fs() and LVID changes are now protected by dedicated locks so we can remove BKL from this function as well. Signed-off-by: Jan Kara --- fs/udf/super.c | 6 ------ 1 file changed, 6 deletions(-) (limited to 'fs') diff --git a/fs/udf/super.c b/fs/udf/super.c index f06cc67cf864..4cf6121ab41a 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -567,7 +567,6 @@ static int udf_remount_fs(struct super_block *sb, int *flags, char *options) if (!udf_parse_options(options, &uopt, true)) return -EINVAL; - lock_kernel(); write_lock(&sbi->s_cred_lock); sbi->s_flags = uopt.flags; sbi->s_uid = uopt.uid; @@ -592,7 +591,6 @@ static int udf_remount_fs(struct super_block *sb, int *flags, char *options) udf_open_lvid(sb); out_unlock: - unlock_kernel(); return error; } @@ -2132,8 +2130,6 @@ static void udf_put_super(struct super_block *sb) sbi = UDF_SB(sb); - lock_kernel(); - if (sbi->s_vat_inode) iput(sbi->s_vat_inode); if (sbi->s_partitions) @@ -2149,8 +2145,6 @@ static void udf_put_super(struct super_block *sb) kfree(sbi->s_partmaps); kfree(sb->s_fs_info); sb->s_fs_info = NULL; - - unlock_kernel(); } static int udf_sync_fs(struct super_block *sb, int wait) -- cgit v1.2.2 From 7abc2e45e48ca04206949682402d5d55bc64a16b Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 20 Oct 2010 22:32:02 +0200 Subject: udf: Call udf_add_free_space() for more blocks at once in udf_free_blocks() There's no need to call udf_add_free_space() for one block at a time. It saves us noticeable amount of work and yields different result from the original code only if the filesystem is corrupted and bitmap bit is already cleared. In such case counter of free blocks is probably wrong anyways so the change does not matter. Signed-off-by: Jan Kara --- fs/udf/balloc.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c index b608efaa4cee..306ee39ef2c3 100644 --- a/fs/udf/balloc.c +++ b/fs/udf/balloc.c @@ -157,10 +157,9 @@ static void udf_bitmap_free_blocks(struct super_block *sb, udf_debug("bit %ld already set\n", bit + i); udf_debug("byte=%2x\n", ((char *)bh->b_data)[(bit + i) >> 3]); - } else { - udf_add_free_space(sb, sbi->s_partition, 1); } } + udf_add_free_space(sb, sbi->s_partition, count); mark_buffer_dirty(bh); if (overflow) { block += count; -- cgit v1.2.2 From d1668fe390c1e84580575965684a8fa7e4626dee Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 20 Oct 2010 23:24:12 +0200 Subject: udf: Remove BKL from free space counting functions udf_count_free_bitmap() does not need BKL because bitmaps are in a fixed place on disk and so we can count set bits without serialization. udf_count_free_table() is now protected by s_alloc_mutex instead of BKL to get a consistent view of free space extents. Signed-off-by: Jan Kara --- fs/udf/super.c | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/udf/super.c b/fs/udf/super.c index 4cf6121ab41a..d2ec9f31e843 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -2207,8 +2207,6 @@ static unsigned int udf_count_free_bitmap(struct super_block *sb, uint16_t ident; struct spaceBitmapDesc *bm; - lock_kernel(); - loc.logicalBlockNum = bitmap->s_extPosition; loc.partitionReferenceNum = UDF_SB(sb)->s_partition; bh = udf_read_ptagged(sb, &loc, 0, &ident); @@ -2245,10 +2243,7 @@ static unsigned int udf_count_free_bitmap(struct super_block *sb, } } brelse(bh); - out: - unlock_kernel(); - return accum; } @@ -2261,8 +2256,7 @@ static unsigned int udf_count_free_table(struct super_block *sb, int8_t etype; struct extent_position epos; - lock_kernel(); - + mutex_lock(&UDF_SB(sb)->s_alloc_mutex); epos.block = UDF_I(table)->i_location; epos.offset = sizeof(struct unallocSpaceEntry); epos.bh = NULL; @@ -2271,8 +2265,7 @@ static unsigned int udf_count_free_table(struct super_block *sb, accum += (elen >> table->i_sb->s_blocksize_bits); brelse(epos.bh); - - unlock_kernel(); + mutex_unlock(&UDF_SB(sb)->s_alloc_mutex); return accum; } -- cgit v1.2.2 From 4d0fb621d35007c19a396f2bb629e5aeaacef2d0 Mon Sep 17 00:00:00 2001 From: Alessio Igor Bogani Date: Tue, 16 Nov 2010 18:40:47 +0100 Subject: udf: Replace bkl with the UDF_I(inode)->i_data_sem for protect udf_inode_info struct Replace bkl with the UDF_I(inode)->i_data_sem rw semaphore in udf_release_file(), udf_symlink(), udf_symlink_filler(), udf_get_block(), udf_block_map(), and udf_setattr(). The rule now is that any operation on regular file's or symlink's extents (or generally allocation information including goal block) needs to hold i_data_sem. This work was supported by a hardware donation from the CE Linux Forum. Signed-off-by: Alessio Igor Bogani Signed-off-by: Jan Kara --- fs/udf/file.c | 4 ++-- fs/udf/inode.c | 19 ++++++++++--------- fs/udf/namei.c | 7 ++++--- fs/udf/super.c | 1 + fs/udf/symlink.c | 12 +++++++----- fs/udf/udf_i.h | 13 +++++++++++++ 6 files changed, 37 insertions(+), 19 deletions(-) (limited to 'fs') diff --git a/fs/udf/file.c b/fs/udf/file.c index 66b9e7e7e4c5..df0c5561cc7e 100644 --- a/fs/udf/file.c +++ b/fs/udf/file.c @@ -204,10 +204,10 @@ static int udf_release_file(struct inode *inode, struct file *filp) { if (filp->f_mode & FMODE_WRITE) { mutex_lock(&inode->i_mutex); - lock_kernel(); + down_write(&UDF_I(inode)->i_data_sem); udf_discard_prealloc(inode); udf_truncate_tail_extent(inode); - unlock_kernel(); + up_write(&UDF_I(inode)->i_data_sem); mutex_unlock(&inode->i_mutex); } return 0; diff --git a/fs/udf/inode.c b/fs/udf/inode.c index fa3c1541151c..b2fe4d7f20eb 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c @@ -301,10 +301,9 @@ static int udf_get_block(struct inode *inode, sector_t block, err = -EIO; new = 0; bh = NULL; - - lock_kernel(); - iinfo = UDF_I(inode); + + down_write(&iinfo->i_data_sem); if (block == iinfo->i_next_alloc_block + 1) { iinfo->i_next_alloc_block++; iinfo->i_next_alloc_goal++; @@ -323,7 +322,7 @@ static int udf_get_block(struct inode *inode, sector_t block, map_bh(bh_result, inode->i_sb, phys); abort: - unlock_kernel(); + up_write(&iinfo->i_data_sem); return err; } @@ -1021,16 +1020,16 @@ void udf_truncate(struct inode *inode) if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) return; - lock_kernel(); iinfo = UDF_I(inode); if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { + down_write(&iinfo->i_data_sem); if (inode->i_sb->s_blocksize < (udf_file_entry_alloc_offset(inode) + inode->i_size)) { udf_expand_file_adinicb(inode, inode->i_size, &err); if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { inode->i_size = iinfo->i_lenAlloc; - unlock_kernel(); + up_write(&iinfo->i_data_sem); return; } else udf_truncate_extents(inode); @@ -1041,10 +1040,13 @@ void udf_truncate(struct inode *inode) offset - udf_file_entry_alloc_offset(inode)); iinfo->i_lenAlloc = inode->i_size; } + up_write(&iinfo->i_data_sem); } else { block_truncate_page(inode->i_mapping, inode->i_size, udf_get_block); + down_write(&iinfo->i_data_sem); udf_truncate_extents(inode); + up_write(&iinfo->i_data_sem); } inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb); @@ -1052,7 +1054,6 @@ void udf_truncate(struct inode *inode) udf_sync_inode(inode); else mark_inode_dirty(inode); - unlock_kernel(); } static void __udf_read_inode(struct inode *inode) @@ -2043,7 +2044,7 @@ long udf_block_map(struct inode *inode, sector_t block) struct extent_position epos = {}; int ret; - lock_kernel(); + down_read(&UDF_I(inode)->i_data_sem); if (inode_bmap(inode, block, &epos, &eloc, &elen, &offset) == (EXT_RECORDED_ALLOCATED >> 30)) @@ -2051,7 +2052,7 @@ long udf_block_map(struct inode *inode, sector_t block) else ret = 0; - unlock_kernel(); + up_read(&UDF_I(inode)->i_data_sem); brelse(epos.bh); if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_VARCONV)) diff --git a/fs/udf/namei.c b/fs/udf/namei.c index 701fcda18415..d5eb000ddddd 100644 --- a/fs/udf/namei.c +++ b/fs/udf/namei.c @@ -893,18 +893,18 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry, struct udf_inode_info *iinfo; struct super_block *sb = dir->i_sb; - lock_kernel(); inode = udf_new_inode(dir, S_IFLNK | S_IRWXUGO, &err); if (!inode) goto out; + iinfo = UDF_I(inode); + down_write(&iinfo->i_data_sem); name = kmalloc(UDF_NAME_LEN, GFP_NOFS); if (!name) { err = -ENOMEM; goto out_no_entry; } - iinfo = UDF_I(inode); inode->i_data.a_ops = &udf_symlink_aops; inode->i_op = &udf_symlink_inode_operations; @@ -1024,6 +1024,7 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry, udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL); if (UDF_I(dir)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) mark_inode_dirty(dir); + up_write(&iinfo->i_data_sem); if (fibh.sbh != fibh.ebh) brelse(fibh.ebh); brelse(fibh.sbh); @@ -1032,10 +1033,10 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry, out: kfree(name); - unlock_kernel(); return err; out_no_entry: + up_write(&iinfo->i_data_sem); inode_dec_link_count(inode); iput(inode); goto out; diff --git a/fs/udf/super.c b/fs/udf/super.c index d2ec9f31e843..441b892cf85e 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -135,6 +135,7 @@ static struct inode *udf_alloc_inode(struct super_block *sb) ei->i_next_alloc_block = 0; ei->i_next_alloc_goal = 0; ei->i_strat4096 = 0; + init_rwsem(&ei->i_data_sem); return &ei->vfs_inode; } diff --git a/fs/udf/symlink.c b/fs/udf/symlink.c index 16064787d2b7..b1d4488b0f14 100644 --- a/fs/udf/symlink.c +++ b/fs/udf/symlink.c @@ -27,7 +27,6 @@ #include #include #include -#include #include #include "udf_i.h" @@ -78,13 +77,16 @@ static int udf_symlink_filler(struct file *file, struct page *page) int err = -EIO; unsigned char *p = kmap(page); struct udf_inode_info *iinfo; + uint32_t pos; - lock_kernel(); iinfo = UDF_I(inode); + pos = udf_block_map(inode, 0); + + down_read(&iinfo->i_data_sem); if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { symlink = iinfo->i_ext.i_data + iinfo->i_lenEAttr; } else { - bh = sb_bread(inode->i_sb, udf_block_map(inode, 0)); + bh = sb_bread(inode->i_sb, pos); if (!bh) goto out; @@ -95,14 +97,14 @@ static int udf_symlink_filler(struct file *file, struct page *page) udf_pc_to_char(inode->i_sb, symlink, inode->i_size, p); brelse(bh); - unlock_kernel(); + up_read(&iinfo->i_data_sem); SetPageUptodate(page); kunmap(page); unlock_page(page); return 0; out: - unlock_kernel(); + up_read(&iinfo->i_data_sem); SetPageError(page); kunmap(page); unlock_page(page); diff --git a/fs/udf/udf_i.h b/fs/udf/udf_i.h index e58d1de41073..d1bd31ea724e 100644 --- a/fs/udf/udf_i.h +++ b/fs/udf/udf_i.h @@ -1,6 +1,18 @@ #ifndef _UDF_I_H #define _UDF_I_H +/* + * The i_data_sem and i_mutex serve for protection of allocation information + * of a regular files and symlinks. This includes all extents belonging to + * the file/symlink, a fact whether data are in-inode or in external data + * blocks, preallocation, goal block information... When extents are read, + * i_mutex or i_data_sem must be held (for reading is enough in case of + * i_data_sem). When extents are changed, i_data_sem must be held for writing + * and also i_mutex must be held. + * + * For directories i_mutex is used for all the necessary protection. + */ + struct udf_inode_info { struct timespec i_crtime; /* Physical address of inode */ @@ -21,6 +33,7 @@ struct udf_inode_info { struct long_ad *i_lad; __u8 *i_data; } i_ext; + struct rw_semaphore i_data_sem; struct inode vfs_inode; }; -- cgit v1.2.2 From 7db09be629033b79792a1bf18f505f5f15914395 Mon Sep 17 00:00:00 2001 From: Alessio Igor Bogani Date: Tue, 16 Nov 2010 18:40:48 +0100 Subject: udf: Use of s_alloc_mutex to serialize udf_relocate_blocks() execution This work was supported by a hardware donation from the CE Linux Forum. Signed-off-by: Alessio Igor Bogani Signed-off-by: Jan Kara --- fs/udf/partition.c | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/udf/partition.c b/fs/udf/partition.c index 745eb209be0c..a71090ea0e07 100644 --- a/fs/udf/partition.c +++ b/fs/udf/partition.c @@ -25,6 +25,7 @@ #include #include #include +#include uint32_t udf_get_pblock(struct super_block *sb, uint32_t block, uint16_t partition, uint32_t offset) @@ -159,7 +160,9 @@ int udf_relocate_blocks(struct super_block *sb, long old_block, long *new_block) struct udf_sb_info *sbi = UDF_SB(sb); u16 reallocationTableLen; struct buffer_head *bh; + int ret = 0; + mutex_lock(&sbi->s_alloc_mutex); for (i = 0; i < sbi->s_partitions; i++) { struct udf_part_map *map = &sbi->s_partmaps[i]; if (old_block > map->s_partition_root && @@ -175,8 +178,10 @@ int udf_relocate_blocks(struct super_block *sb, long old_block, long *new_block) break; } - if (!st) - return 1; + if (!st) { + ret = 1; + goto out; + } reallocationTableLen = le16_to_cpu(st->reallocationTableLen); @@ -207,14 +212,16 @@ int udf_relocate_blocks(struct super_block *sb, long old_block, long *new_block) ((old_block - map->s_partition_root) & (sdata->s_packet_len - 1)); - return 0; + ret = 0; + goto out; } else if (origLoc == packet) { *new_block = le32_to_cpu( entry->mappedLocation) + ((old_block - map->s_partition_root) & (sdata->s_packet_len - 1)); - return 0; + ret = 0; + goto out; } else if (origLoc > packet) break; } @@ -251,20 +258,24 @@ int udf_relocate_blocks(struct super_block *sb, long old_block, long *new_block) st->mapEntry[k].mappedLocation) + ((old_block - map->s_partition_root) & (sdata->s_packet_len - 1)); - return 0; + ret = 0; + goto out; } - return 1; + ret = 1; + goto out; } /* if old_block */ } if (i == sbi->s_partitions) { /* outside of partitions */ /* for now, fail =) */ - return 1; + ret = 1; } - return 0; +out: + mutex_unlock(&sbi->s_alloc_mutex); + return ret; } static uint32_t udf_try_read_meta(struct inode *inode, uint32_t block, -- cgit v1.2.2 From 9db9f9e31d7661dff35a75ed01ff9fc0d6acdaf8 Mon Sep 17 00:00:00 2001 From: Alessio Igor Bogani Date: Tue, 16 Nov 2010 18:40:49 +0100 Subject: udf: Remove unnecessary bkl usages The udf_readdir(), udf_lookup(), udf_create(), udf_mknod(), udf_mkdir(), udf_rmdir(), udf_link(), udf_get_parent() and udf_unlink() seems already adequately protected by i_mutex held by VFS invoking calls. The udf_rename() instead should be already protected by lock_rename again by VFS. The udf_ioctl(), udf_fill_super() and udf_evict_inode() don't requires any further protection. This work was supported by a hardware donation from the CE Linux Forum. Signed-off-by: Alessio Igor Bogani Signed-off-by: Jan Kara --- fs/udf/dir.c | 5 ----- fs/udf/file.c | 4 ---- fs/udf/inode.c | 3 --- fs/udf/namei.c | 27 --------------------------- fs/udf/super.c | 9 +-------- 5 files changed, 1 insertion(+), 47 deletions(-) (limited to 'fs') diff --git a/fs/udf/dir.c b/fs/udf/dir.c index 51552bf50225..eb8bfe2b89a5 100644 --- a/fs/udf/dir.c +++ b/fs/udf/dir.c @@ -30,7 +30,6 @@ #include #include #include -#include #include #include "udf_i.h" @@ -190,18 +189,14 @@ static int udf_readdir(struct file *filp, void *dirent, filldir_t filldir) struct inode *dir = filp->f_path.dentry->d_inode; int result; - lock_kernel(); - if (filp->f_pos == 0) { if (filldir(dirent, ".", 1, filp->f_pos, dir->i_ino, DT_DIR) < 0) { - unlock_kernel(); return 0; } filp->f_pos++; } result = do_udf_readdir(dir, filp, filldir, dirent); - unlock_kernel(); return result; } diff --git a/fs/udf/file.c b/fs/udf/file.c index df0c5561cc7e..4e3bbd81b57b 100644 --- a/fs/udf/file.c +++ b/fs/udf/file.c @@ -32,7 +32,6 @@ #include /* memset */ #include #include -#include #include #include #include @@ -149,8 +148,6 @@ long udf_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) long old_block, new_block; int result = -EINVAL; - lock_kernel(); - if (file_permission(filp, MAY_READ) != 0) { udf_debug("no permission to access inode %lu\n", inode->i_ino); result = -EPERM; @@ -196,7 +193,6 @@ long udf_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) } out: - unlock_kernel(); return result; } diff --git a/fs/udf/inode.c b/fs/udf/inode.c index b2fe4d7f20eb..c6a2e782b97b 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c @@ -31,7 +31,6 @@ #include "udfdecl.h" #include -#include #include #include #include @@ -96,9 +95,7 @@ void udf_evict_inode(struct inode *inode) kfree(iinfo->i_ext.i_data); iinfo->i_ext.i_data = NULL; if (want_delete) { - lock_kernel(); udf_free_inode(inode); - unlock_kernel(); } } diff --git a/fs/udf/namei.c b/fs/udf/namei.c index d5eb000ddddd..26815a25379d 100644 --- a/fs/udf/namei.c +++ b/fs/udf/namei.c @@ -27,7 +27,6 @@ #include #include #include -#include #include #include #include @@ -263,7 +262,6 @@ static struct dentry *udf_lookup(struct inode *dir, struct dentry *dentry, if (dentry->d_name.len > UDF_NAME_LEN - 2) return ERR_PTR(-ENAMETOOLONG); - lock_kernel(); #ifdef UDF_RECOVERY /* temporary shorthand for specifying files by inode number */ if (!strncmp(dentry->d_name.name, ".B=", 3)) { @@ -275,7 +273,6 @@ static struct dentry *udf_lookup(struct inode *dir, struct dentry *dentry, }; inode = udf_iget(dir->i_sb, lb); if (!inode) { - unlock_kernel(); return ERR_PTR(-EACCES); } } else @@ -291,11 +288,9 @@ static struct dentry *udf_lookup(struct inode *dir, struct dentry *dentry, loc = lelb_to_cpu(cfi.icb.extLocation); inode = udf_iget(dir->i_sb, &loc); if (!inode) { - unlock_kernel(); return ERR_PTR(-EACCES); } } - unlock_kernel(); return d_splice_alias(inode, dentry); } @@ -562,10 +557,8 @@ static int udf_create(struct inode *dir, struct dentry *dentry, int mode, int err; struct udf_inode_info *iinfo; - lock_kernel(); inode = udf_new_inode(dir, mode, &err); if (!inode) { - unlock_kernel(); return err; } @@ -583,7 +576,6 @@ static int udf_create(struct inode *dir, struct dentry *dentry, int mode, inode->i_nlink--; mark_inode_dirty(inode); iput(inode); - unlock_kernel(); return err; } cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize); @@ -596,7 +588,6 @@ static int udf_create(struct inode *dir, struct dentry *dentry, int mode, if (fibh.sbh != fibh.ebh) brelse(fibh.ebh); brelse(fibh.sbh); - unlock_kernel(); d_instantiate(dentry, inode); return 0; @@ -614,7 +605,6 @@ static int udf_mknod(struct inode *dir, struct dentry *dentry, int mode, if (!old_valid_dev(rdev)) return -EINVAL; - lock_kernel(); err = -EIO; inode = udf_new_inode(dir, mode, &err); if (!inode) @@ -627,7 +617,6 @@ static int udf_mknod(struct inode *dir, struct dentry *dentry, int mode, inode->i_nlink--; mark_inode_dirty(inode); iput(inode); - unlock_kernel(); return err; } cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize); @@ -646,7 +635,6 @@ static int udf_mknod(struct inode *dir, struct dentry *dentry, int mode, err = 0; out: - unlock_kernel(); return err; } @@ -659,7 +647,6 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, int mode) struct udf_inode_info *dinfo = UDF_I(dir); struct udf_inode_info *iinfo; - lock_kernel(); err = -EMLINK; if (dir->i_nlink >= (256 << sizeof(dir->i_nlink)) - 1) goto out; @@ -712,7 +699,6 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, int mode) err = 0; out: - unlock_kernel(); return err; } @@ -794,7 +780,6 @@ static int udf_rmdir(struct inode *dir, struct dentry *dentry) struct kernel_lb_addr tloc; retval = -ENOENT; - lock_kernel(); fi = udf_find_entry(dir, &dentry->d_name, &fibh, &cfi); if (!fi) goto out; @@ -826,7 +811,6 @@ end_rmdir: brelse(fibh.sbh); out: - unlock_kernel(); return retval; } @@ -840,7 +824,6 @@ static int udf_unlink(struct inode *dir, struct dentry *dentry) struct kernel_lb_addr tloc; retval = -ENOENT; - lock_kernel(); fi = udf_find_entry(dir, &dentry->d_name, &fibh, &cfi); if (!fi) goto out; @@ -870,7 +853,6 @@ end_unlink: brelse(fibh.sbh); out: - unlock_kernel(); return retval; } @@ -1050,15 +1032,12 @@ static int udf_link(struct dentry *old_dentry, struct inode *dir, struct fileIdentDesc cfi, *fi; int err; - lock_kernel(); if (inode->i_nlink >= (256 << sizeof(inode->i_nlink)) - 1) { - unlock_kernel(); return -EMLINK; } fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err); if (!fi) { - unlock_kernel(); return err; } cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize); @@ -1079,7 +1058,6 @@ static int udf_link(struct dentry *old_dentry, struct inode *dir, mark_inode_dirty(inode); ihold(inode); d_instantiate(dentry, inode); - unlock_kernel(); return 0; } @@ -1100,7 +1078,6 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry, struct kernel_lb_addr tloc; struct udf_inode_info *old_iinfo = UDF_I(old_inode); - lock_kernel(); ofi = udf_find_entry(old_dir, &old_dentry->d_name, &ofibh, &ocfi); if (ofi) { if (ofibh.sbh != ofibh.ebh) @@ -1224,7 +1201,6 @@ end_rename: brelse(nfibh.ebh); brelse(nfibh.sbh); } - unlock_kernel(); return retval; } @@ -1237,7 +1213,6 @@ static struct dentry *udf_get_parent(struct dentry *child) struct fileIdentDesc cfi; struct udf_fileident_bh fibh; - lock_kernel(); if (!udf_find_entry(child->d_inode, &dotdot, &fibh, &cfi)) goto out_unlock; @@ -1249,11 +1224,9 @@ static struct dentry *udf_get_parent(struct dentry *child) inode = udf_iget(child->d_inode->i_sb, &tloc); if (!inode) goto out_unlock; - unlock_kernel(); return d_obtain_alias(inode); out_unlock: - unlock_kernel(); return ERR_PTR(-EACCES); } diff --git a/fs/udf/super.c b/fs/udf/super.c index 441b892cf85e..536f89da4af2 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -48,7 +48,6 @@ #include #include #include -#include #include #include #include @@ -1911,8 +1910,6 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent) struct kernel_lb_addr rootdir, fileset; struct udf_sb_info *sbi; - lock_kernel(); - uopt.flags = (1 << UDF_FLAG_USE_AD_IN_ICB) | (1 << UDF_FLAG_STRICT); uopt.uid = -1; uopt.gid = -1; @@ -1921,10 +1918,8 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent) uopt.dmode = UDF_INVALID_MODE; sbi = kzalloc(sizeof(struct udf_sb_info), GFP_KERNEL); - if (!sbi) { - unlock_kernel(); + if (!sbi) return -ENOMEM; - } sb->s_fs_info = sbi; @@ -2071,7 +2066,6 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent) goto error_out; } sb->s_maxbytes = MAX_LFS_FILESIZE; - unlock_kernel(); return 0; error_out: @@ -2092,7 +2086,6 @@ error_out: kfree(sbi); sb->s_fs_info = NULL; - unlock_kernel(); return -EINVAL; } -- cgit v1.2.2 From 8754a3f718f08dc21b3c5eccd044f612d4bc1ab1 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Tue, 16 Nov 2010 14:33:48 +0100 Subject: udf: Protect udf_file_aio_write from possible races Code doing conversion from INICB file to a normal file in udf_file_aio_write() is not protected by any lock from other code modifying the inode. Use i_alloc_sem for that. Reported-by: Alessio Igor Bogani Signed-off-by: Jan Kara --- fs/udf/file.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'fs') diff --git a/fs/udf/file.c b/fs/udf/file.c index 4e3bbd81b57b..89c78486cbbe 100644 --- a/fs/udf/file.c +++ b/fs/udf/file.c @@ -113,6 +113,7 @@ static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov, size_t count = iocb->ki_left; struct udf_inode_info *iinfo = UDF_I(inode); + down_write(&iinfo->i_data_sem); if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { if (file->f_flags & O_APPEND) pos = inode->i_size; @@ -125,6 +126,7 @@ static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov, udf_expand_file_adinicb(inode, pos + count, &err); if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { udf_debug("udf_expand_adinicb: err=%d\n", err); + up_write(&iinfo->i_data_sem); return err; } } else { @@ -134,6 +136,7 @@ static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov, iinfo->i_lenAlloc = inode->i_size; } } + up_write(&iinfo->i_data_sem); retval = generic_file_aio_write(iocb, iov, nr_segs, ppos); if (retval > 0) -- cgit v1.2.2 From 4651c5900e7a3c84d4b70412f8bbc40c1bcb50cf Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Thu, 25 Nov 2010 03:56:24 +0100 Subject: udf: Fix directory corruption after extent merging If udf_bread() called from udf_add_entry() managed to merge created extent to an already existing one (or if previous extents could be merged), the code truncating the last extent to proper size would just overwrite the freshly allocated extent with an extent that used to be in that place. This obviously results in a directory corruption. Fix the problem by properly reloading the last extent. Signed-off-by: Jan Kara --- fs/udf/namei.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/udf/namei.c b/fs/udf/namei.c index 26815a25379d..a2974f7563a2 100644 --- a/fs/udf/namei.c +++ b/fs/udf/namei.c @@ -471,15 +471,19 @@ add: f_pos >> dir->i_sb->s_blocksize_bits, 1, err); if (!fibh->ebh) goto out_err; + /* Extents could have been merged, invalidate our position */ + brelse(epos.bh); + epos.bh = NULL; + epos.block = dinfo->i_location; + epos.offset = udf_file_entry_alloc_offset(dir); if (!fibh->soffset) { - if (udf_next_aext(dir, &epos, &eloc, &elen, 1) == - (EXT_RECORDED_ALLOCATED >> 30)) { - block = eloc.logicalBlockNum + ((elen - 1) >> + /* Find the freshly allocated block */ + while (udf_next_aext(dir, &epos, &eloc, &elen, 1) == + (EXT_RECORDED_ALLOCATED >> 30)) + ; + block = eloc.logicalBlockNum + ((elen - 1) >> dir->i_sb->s_blocksize_bits); - } else - block++; - brelse(fibh->sbh); fibh->sbh = fibh->ebh; fi = (struct fileIdentDesc *)(fibh->sbh->b_data); -- cgit v1.2.2 From a4264b3f4049ae7aeeb0017f8158119e22fa354f Mon Sep 17 00:00:00 2001 From: Jesper Juhl Date: Sun, 12 Dec 2010 23:18:15 +0100 Subject: UDF: Close small mem leak in udf_find_entry() Hi, There's a small memory leak in fs/udf/namei.c::udf_find_entry(). We dynamically allocate memory for 'fname' with kmalloc() and in most situations we free it before we leave the function, but there is one situation where we do not (but should). This patch closes the leak by jumping to the 'out_ok' label which does the correct cleanup rather than doing half the cleanup and returning directly. Signed-off-by: Jesper Juhl Signed-off-by: Jan Kara --- fs/udf/namei.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/udf/namei.c b/fs/udf/namei.c index a2974f7563a2..2be0f9eb86d2 100644 --- a/fs/udf/namei.c +++ b/fs/udf/namei.c @@ -227,10 +227,8 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir, } if ((cfi->fileCharacteristics & FID_FILE_CHAR_PARENT) && - isdotdot) { - brelse(epos.bh); - return fi; - } + isdotdot) + goto out_ok; if (!lfi) continue; -- cgit v1.2.2 From 262f86adcc0665872812a7458a5d03e19e0efe33 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Thu, 11 Nov 2010 18:42:16 +1100 Subject: cifs: don't overwrite dentry name in d_revalidate Instead, use fatfs's method for dealing with negative dentries to preserve case, rather than overwrite dentry name in d_revalidate, which is a bit ugly and also gets in the way of doing lock-free path walking. Signed-off-by: Nick Piggin Signed-off-by: Steve French --- fs/cifs/dir.c | 43 ++++++++++++++++++++++++------------------- 1 file changed, 24 insertions(+), 19 deletions(-) (limited to 'fs') diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index 3840eddbfb7a..521d841b1fd1 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -656,22 +656,34 @@ lookup_out: static int cifs_d_revalidate(struct dentry *direntry, struct nameidata *nd) { - int isValid = 1; - if (direntry->d_inode) { if (cifs_revalidate_dentry(direntry)) return 0; - } else { - cFYI(1, "neg dentry 0x%p name = %s", - direntry, direntry->d_name.name); - if (time_after(jiffies, direntry->d_time + HZ) || - !lookupCacheEnabled) { - d_drop(direntry); - isValid = 0; - } + else + return 1; } - return isValid; + /* + * This may be nfsd (or something), anyway, we can't see the + * intent of this. So, since this can be for creation, drop it. + */ + if (!nd) + return 0; + + /* + * Drop the negative dentry, in order to make sure to use the + * case sensitive name which is specified by user if this is + * for creation. + */ + if (!(nd->flags & (LOOKUP_CONTINUE | LOOKUP_PARENT))) { + if (nd->flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET)) + return 0; + } + + if (time_after(jiffies, direntry->d_time + HZ) || !lookupCacheEnabled) + return 0; + + return 1; } /* static int cifs_d_delete(struct dentry *direntry) @@ -709,15 +721,8 @@ static int cifs_ci_compare(struct dentry *dentry, struct qstr *a, struct nls_table *codepage = CIFS_SB(dentry->d_inode->i_sb)->local_nls; if ((a->len == b->len) && - (nls_strnicmp(codepage, a->name, b->name, a->len) == 0)) { - /* - * To preserve case, don't let an existing negative dentry's - * case take precedence. If a is not a negative dentry, this - * should have no side effects - */ - memcpy((void *)a->name, b->name, a->len); + (nls_strnicmp(codepage, a->name, b->name, a->len) == 0)) return 0; - } return 1; } -- cgit v1.2.2 From df8fbc241aa3c451248b1f19fff3a3f604b107f9 Mon Sep 17 00:00:00 2001 From: Shirish Pargaonkar Date: Sat, 11 Dec 2010 14:19:22 -0600 Subject: cifs: Support NTLM2 session security during NTLMSSP authentication [try #5] Indicate to the server a capability of NTLM2 session security (NTLM2 Key) during ntlmssp protocol exchange in one of the bits of the flags field. If server supports this capability, send NTLM2 key even if signing is not required on the server. If the server requires signing, the session keys exchanged for NTLMv2 and NTLM2 session security in auth packet of the nlmssp exchange are same. Send the same flags in authenticate message (type 3) that client sent in negotiate message (type 1). Remove function setup_ntlmssp_neg_req Make sure ntlmssp negotiate and authenticate messages are zero'ed before they are built. Reported-and-Tested-by: Robbert Kouprie Signed-off-by: Shirish Pargaonkar Acked-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/sess.c | 33 ++++++++++++++------------------- 1 file changed, 14 insertions(+), 19 deletions(-) (limited to 'fs') diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index 7b01d3f6eed6..54d9f76deff9 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c @@ -431,13 +431,14 @@ static void build_ntlmssp_negotiate_blob(unsigned char *pbuffer, NEGOTIATE_MESSAGE *sec_blob = (NEGOTIATE_MESSAGE *)pbuffer; __u32 flags; + memset(pbuffer, 0, sizeof(NEGOTIATE_MESSAGE)); memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8); sec_blob->MessageType = NtLmNegotiate; /* BB is NTLMV2 session security format easier to use here? */ flags = NTLMSSP_NEGOTIATE_56 | NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE | - NTLMSSP_NEGOTIATE_NTLM; + NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC; if (ses->server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) { flags |= NTLMSSP_NEGOTIATE_SIGN; @@ -446,7 +447,7 @@ static void build_ntlmssp_negotiate_blob(unsigned char *pbuffer, NTLMSSP_NEGOTIATE_EXTENDED_SEC; } - sec_blob->NegotiateFlags |= cpu_to_le32(flags); + sec_blob->NegotiateFlags = cpu_to_le32(flags); sec_blob->WorkstationName.BufferOffset = 0; sec_blob->WorkstationName.Length = 0; @@ -477,7 +478,7 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer, flags = NTLMSSP_NEGOTIATE_56 | NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_TARGET_INFO | NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE | - NTLMSSP_NEGOTIATE_NTLM; + NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC; if (ses->server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) flags |= NTLMSSP_NEGOTIATE_SIGN; @@ -485,7 +486,7 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer, flags |= NTLMSSP_NEGOTIATE_ALWAYS_SIGN; tmp = pbuffer + sizeof(AUTHENTICATE_MESSAGE); - sec_blob->NegotiateFlags |= cpu_to_le32(flags); + sec_blob->NegotiateFlags = cpu_to_le32(flags); sec_blob->LmChallengeResponse.BufferOffset = cpu_to_le32(sizeof(AUTHENTICATE_MESSAGE)); @@ -544,8 +545,9 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer, sec_blob->WorkstationName.MaximumLength = 0; tmp += 2; - if ((ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_KEY_XCH) && - !calc_seckey(ses)) { + if (((ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_KEY_XCH) || + (ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_EXTENDED_SEC)) + && !calc_seckey(ses)) { memcpy(tmp, ses->ntlmssp->ciphertext, CIFS_CPHTXT_SIZE); sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer); sec_blob->SessionKey.Length = cpu_to_le16(CIFS_CPHTXT_SIZE); @@ -562,16 +564,6 @@ setup_ntlmv2_ret: *buflen = tmp - pbuffer; return rc; } - - -static void setup_ntlmssp_neg_req(SESSION_SETUP_ANDX *pSMB, - struct cifsSesInfo *ses) -{ - build_ntlmssp_negotiate_blob(&pSMB->req.SecurityBlob[0], ses); - pSMB->req.SecurityBlobLength = cpu_to_le16(sizeof(NEGOTIATE_MESSAGE)); - - return; -} #endif int @@ -828,16 +820,19 @@ ssetup_ntlmssp_authenticate: capabilities |= CAP_EXTENDED_SECURITY; pSMB->req.Capabilities |= cpu_to_le32(capabilities); if (phase == NtLmNegotiate) { - setup_ntlmssp_neg_req(pSMB, ses); + build_ntlmssp_negotiate_blob( + pSMB->req.SecurityBlob, ses); iov[1].iov_len = sizeof(NEGOTIATE_MESSAGE); - iov[1].iov_base = &pSMB->req.SecurityBlob[0]; + iov[1].iov_base = pSMB->req.SecurityBlob; + pSMB->req.SecurityBlobLength = + cpu_to_le16(sizeof(NEGOTIATE_MESSAGE)); } else if (phase == NtLmAuthenticate) { /* 5 is an empirical value, large enought to * hold authenticate message, max 10 of * av paris, doamin,user,workstation mames, * flags etc.. */ - ntlmsspblob = kmalloc( + ntlmsspblob = kzalloc( 5*sizeof(struct _AUTHENTICATE_MESSAGE), GFP_KERNEL); if (!ntlmsspblob) { -- cgit v1.2.2 From a9f1b85e5ba80519dea6974e3574fa7a30cc5e29 Mon Sep 17 00:00:00 2001 From: Pavel Shilovsky Date: Mon, 13 Dec 2010 19:08:35 +0300 Subject: CIFS: Simplify ipv*_connect functions into one (try #4) Make connect logic more ip-protocol independent and move RFC1001 stuff into a separate function. Also replace union addr in TCP_Server_Info structure with sockaddr_storage. Signed-off-by: Pavel Shilovsky Reviewed-and-Tested-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/cache.c | 16 ++- fs/cifs/cifs_spnego.c | 10 +- fs/cifs/cifsfs.c | 15 +- fs/cifs/cifsglob.h | 5 +- fs/cifs/connect.c | 383 +++++++++++++++++++++----------------------------- fs/cifs/transport.c | 2 +- 6 files changed, 184 insertions(+), 247 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cache.c b/fs/cifs/cache.c index 224d7bbd1fcc..e654dfd092c3 100644 --- a/fs/cifs/cache.c +++ b/fs/cifs/cache.c @@ -64,7 +64,9 @@ static uint16_t cifs_server_get_key(const void *cookie_netfs_data, void *buffer, uint16_t maxbuf) { const struct TCP_Server_Info *server = cookie_netfs_data; - const struct sockaddr *sa = (struct sockaddr *) &server->addr.sockAddr; + const struct sockaddr *sa = (struct sockaddr *) &server->dstaddr; + const struct sockaddr_in *addr = (struct sockaddr_in *) sa; + const struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *) sa; struct cifs_server_key *key = buffer; uint16_t key_len = sizeof(struct cifs_server_key); @@ -76,16 +78,16 @@ static uint16_t cifs_server_get_key(const void *cookie_netfs_data, */ switch (sa->sa_family) { case AF_INET: - key->family = server->addr.sockAddr.sin_family; - key->port = server->addr.sockAddr.sin_port; - key->addr[0].ipv4_addr = server->addr.sockAddr.sin_addr; + key->family = sa->sa_family; + key->port = addr->sin_port; + key->addr[0].ipv4_addr = addr->sin_addr; key_len += sizeof(key->addr[0].ipv4_addr); break; case AF_INET6: - key->family = server->addr.sockAddr6.sin6_family; - key->port = server->addr.sockAddr6.sin6_port; - key->addr[0].ipv6_addr = server->addr.sockAddr6.sin6_addr; + key->family = sa->sa_family; + key->port = addr6->sin6_port; + key->addr[0].ipv6_addr = addr6->sin6_addr; key_len += sizeof(key->addr[0].ipv6_addr); break; diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c index 87044906cd1f..4dfba8283165 100644 --- a/fs/cifs/cifs_spnego.c +++ b/fs/cifs/cifs_spnego.c @@ -98,6 +98,8 @@ struct key * cifs_get_spnego_key(struct cifsSesInfo *sesInfo) { struct TCP_Server_Info *server = sesInfo->server; + struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr; + struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr; char *description, *dp; size_t desc_len; struct key *spnego_key; @@ -127,10 +129,10 @@ cifs_get_spnego_key(struct cifsSesInfo *sesInfo) dp = description + strlen(description); /* add the server address */ - if (server->addr.sockAddr.sin_family == AF_INET) - sprintf(dp, "ip4=%pI4", &server->addr.sockAddr.sin_addr); - else if (server->addr.sockAddr.sin_family == AF_INET6) - sprintf(dp, "ip6=%pI6", &server->addr.sockAddr6.sin6_addr); + if (server->dstaddr.ss_family == AF_INET) + sprintf(dp, "ip4=%pI4", &sa->sin_addr); + else if (server->dstaddr.ss_family == AF_INET6) + sprintf(dp, "ip6=%pI6", &sa6->sin6_addr); else goto out; diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 3936aa7f2c22..9df5c0b94d0f 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -351,18 +351,19 @@ cifs_evict_inode(struct inode *inode) static void cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server) { + struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr; + struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr; + seq_printf(s, ",addr="); - switch (server->addr.sockAddr.sin_family) { + switch (server->dstaddr.ss_family) { case AF_INET: - seq_printf(s, "%pI4", &server->addr.sockAddr.sin_addr.s_addr); + seq_printf(s, "%pI4", &sa->sin_addr.s_addr); break; case AF_INET6: - seq_printf(s, "%pI6", - &server->addr.sockAddr6.sin6_addr.s6_addr); - if (server->addr.sockAddr6.sin6_scope_id) - seq_printf(s, "%%%u", - server->addr.sockAddr6.sin6_scope_id); + seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr); + if (sa6->sin6_scope_id) + seq_printf(s, "%%%u", sa6->sin6_scope_id); break; default: seq_printf(s, "(unknown)"); diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 7136c0c3e2f9..dfd2d46275ab 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -163,10 +163,7 @@ struct TCP_Server_Info { char server_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL]; char *hostname; /* hostname portion of UNC string */ struct socket *ssocket; - union { - struct sockaddr_in sockAddr; - struct sockaddr_in6 sockAddr6; - } addr; + struct sockaddr_storage dstaddr; struct sockaddr_storage srcaddr; /* locally bind to this IP */ wait_queue_head_t response_q; wait_queue_head_t request_q; /* if more than maxmpx to srvr must block*/ diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index cc1a8604a790..b90c7411f4f0 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -115,8 +115,8 @@ struct smb_vol { #define TLINK_ERROR_EXPIRE (1 * HZ) #define TLINK_IDLE_EXPIRE (600 * HZ) -static int ipv4_connect(struct TCP_Server_Info *server); -static int ipv6_connect(struct TCP_Server_Info *server); +static int ip_connect(struct TCP_Server_Info *server); +static int generic_ip_connect(struct TCP_Server_Info *server); static void tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink); static void cifs_prune_tlinks(struct work_struct *work); @@ -200,10 +200,9 @@ cifs_reconnect(struct TCP_Server_Info *server) while ((server->tcpStatus != CifsExiting) && (server->tcpStatus != CifsGood)) { try_to_freeze(); - if (server->addr.sockAddr6.sin6_family == AF_INET6) - rc = ipv6_connect(server); - else - rc = ipv4_connect(server); + + /* we should try only the port we connected to before */ + rc = generic_ip_connect(server); if (rc) { cFYI(1, "reconnect error %d", rc); msleep(3000); @@ -477,7 +476,7 @@ incomplete_rcv: * initialize frame) */ cifs_set_port((struct sockaddr *) - &server->addr.sockAddr, CIFS_PORT); + &server->dstaddr, CIFS_PORT); cifs_reconnect(server); csocket = server->ssocket; wake_up(&server->response_q); @@ -1459,30 +1458,37 @@ static bool match_address(struct TCP_Server_Info *server, struct sockaddr *addr, struct sockaddr *srcaddr) { - struct sockaddr_in *addr4 = (struct sockaddr_in *)addr; - struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)addr; - switch (addr->sa_family) { - case AF_INET: - if (addr4->sin_addr.s_addr != - server->addr.sockAddr.sin_addr.s_addr) + case AF_INET: { + struct sockaddr_in *addr4 = (struct sockaddr_in *)addr; + struct sockaddr_in *srv_addr4 = + (struct sockaddr_in *)&server->dstaddr; + + if (addr4->sin_addr.s_addr != srv_addr4->sin_addr.s_addr) return false; - if (addr4->sin_port && - addr4->sin_port != server->addr.sockAddr.sin_port) + if (addr4->sin_port && addr4->sin_port != srv_addr4->sin_port) return false; break; - case AF_INET6: + } + case AF_INET6: { + struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)addr; + struct sockaddr_in6 *srv_addr6 = + (struct sockaddr_in6 *)&server->dstaddr; + if (!ipv6_addr_equal(&addr6->sin6_addr, - &server->addr.sockAddr6.sin6_addr)) + &srv_addr6->sin6_addr)) return false; - if (addr6->sin6_scope_id != - server->addr.sockAddr6.sin6_scope_id) + if (addr6->sin6_scope_id != srv_addr6->sin6_scope_id) return false; if (addr6->sin6_port && - addr6->sin6_port != server->addr.sockAddr6.sin6_port) + addr6->sin6_port != srv_addr6->sin6_port) return false; break; } + default: + WARN_ON(1); + return false; /* don't expect to be here */ + } if (!srcip_matches(srcaddr, (struct sockaddr *)&server->srcaddr)) return false; @@ -1681,14 +1687,13 @@ cifs_get_tcp_session(struct smb_vol *volume_info) cFYI(1, "attempting ipv6 connect"); /* BB should we allow ipv6 on port 139? */ /* other OS never observed in Wild doing 139 with v6 */ - memcpy(&tcp_ses->addr.sockAddr6, sin_server6, - sizeof(struct sockaddr_in6)); - rc = ipv6_connect(tcp_ses); - } else { - memcpy(&tcp_ses->addr.sockAddr, sin_server, - sizeof(struct sockaddr_in)); - rc = ipv4_connect(tcp_ses); - } + memcpy(&tcp_ses->dstaddr, sin_server6, + sizeof(struct sockaddr_in6)); + } else + memcpy(&tcp_ses->dstaddr, sin_server, + sizeof(struct sockaddr_in)); + + rc = ip_connect(tcp_ses); if (rc < 0) { cERROR(1, "Error connecting to socket. Aborting operation"); goto out_err_crypto_release; @@ -1793,6 +1798,8 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info) { int rc = -ENOMEM, xid; struct cifsSesInfo *ses; + struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr; + struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr; xid = GetXid(); @@ -1836,12 +1843,10 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info) /* new SMB session uses our server ref */ ses->server = server; - if (server->addr.sockAddr6.sin6_family == AF_INET6) - sprintf(ses->serverName, "%pI6", - &server->addr.sockAddr6.sin6_addr); + if (server->dstaddr.ss_family == AF_INET6) + sprintf(ses->serverName, "%pI6", &addr6->sin6_addr); else - sprintf(ses->serverName, "%pI4", - &server->addr.sockAddr.sin_addr.s_addr); + sprintf(ses->serverName, "%pI4", &addr->sin_addr); if (volume_info->username) strncpy(ses->userName, volume_info->username, @@ -2136,19 +2141,106 @@ bind_socket(struct TCP_Server_Info *server) } static int -ipv4_connect(struct TCP_Server_Info *server) +ip_rfc1001_connect(struct TCP_Server_Info *server) { int rc = 0; - int val; - bool connected = false; - __be16 orig_port = 0; + /* + * some servers require RFC1001 sessinit before sending + * negprot - BB check reconnection in case where second + * sessinit is sent but no second negprot + */ + struct rfc1002_session_packet *ses_init_buf; + struct smb_hdr *smb_buf; + ses_init_buf = kzalloc(sizeof(struct rfc1002_session_packet), + GFP_KERNEL); + if (ses_init_buf) { + ses_init_buf->trailer.session_req.called_len = 32; + + if (server->server_RFC1001_name && + server->server_RFC1001_name[0] != 0) + rfc1002mangle(ses_init_buf->trailer. + session_req.called_name, + server->server_RFC1001_name, + RFC1001_NAME_LEN_WITH_NULL); + else + rfc1002mangle(ses_init_buf->trailer. + session_req.called_name, + DEFAULT_CIFS_CALLED_NAME, + RFC1001_NAME_LEN_WITH_NULL); + + ses_init_buf->trailer.session_req.calling_len = 32; + + /* + * calling name ends in null (byte 16) from old smb + * convention. + */ + if (server->workstation_RFC1001_name && + server->workstation_RFC1001_name[0] != 0) + rfc1002mangle(ses_init_buf->trailer. + session_req.calling_name, + server->workstation_RFC1001_name, + RFC1001_NAME_LEN_WITH_NULL); + else + rfc1002mangle(ses_init_buf->trailer. + session_req.calling_name, + "LINUX_CIFS_CLNT", + RFC1001_NAME_LEN_WITH_NULL); + + ses_init_buf->trailer.session_req.scope1 = 0; + ses_init_buf->trailer.session_req.scope2 = 0; + smb_buf = (struct smb_hdr *)ses_init_buf; + + /* sizeof RFC1002_SESSION_REQUEST with no scope */ + smb_buf->smb_buf_length = 0x81000044; + rc = smb_send(server, smb_buf, 0x44); + kfree(ses_init_buf); + /* + * RFC1001 layer in at least one server + * requires very short break before negprot + * presumably because not expecting negprot + * to follow so fast. This is a simple + * solution that works without + * complicating the code and causes no + * significant slowing down on mount + * for everyone else + */ + usleep_range(1000, 2000); + } + /* + * else the negprot may still work without this + * even though malloc failed + */ + + return rc; +} + +static int +generic_ip_connect(struct TCP_Server_Info *server) +{ + int rc = 0; + unsigned short int sport; + int slen, sfamily; struct socket *socket = server->ssocket; + struct sockaddr *saddr; + + saddr = (struct sockaddr *) &server->dstaddr; + + if (server->dstaddr.ss_family == AF_INET6) { + sport = ((struct sockaddr_in6 *) saddr)->sin6_port; + slen = sizeof(struct sockaddr_in6); + sfamily = AF_INET6; + } else { + sport = ((struct sockaddr_in *) saddr)->sin_port; + slen = sizeof(struct sockaddr_in); + sfamily = AF_INET; + } if (socket == NULL) { - rc = sock_create_kern(PF_INET, SOCK_STREAM, + rc = sock_create_kern(sfamily, SOCK_STREAM, IPPROTO_TCP, &socket); if (rc < 0) { cERROR(1, "Error %d creating socket", rc); + server->ssocket = NULL; return rc; } @@ -2156,63 +2248,28 @@ ipv4_connect(struct TCP_Server_Info *server) cFYI(1, "Socket created"); server->ssocket = socket; socket->sk->sk_allocation = GFP_NOFS; - cifs_reclassify_socket4(socket); + if (sfamily == AF_INET6) + cifs_reclassify_socket6(socket); + else + cifs_reclassify_socket4(socket); } rc = bind_socket(server); if (rc < 0) return rc; - /* user overrode default port */ - if (server->addr.sockAddr.sin_port) { - rc = socket->ops->connect(socket, (struct sockaddr *) - &server->addr.sockAddr, - sizeof(struct sockaddr_in), 0); - if (rc >= 0) - connected = true; - } - - if (!connected) { - /* save original port so we can retry user specified port - later if fall back ports fail this time */ - orig_port = server->addr.sockAddr.sin_port; - - /* do not retry on the same port we just failed on */ - if (server->addr.sockAddr.sin_port != htons(CIFS_PORT)) { - server->addr.sockAddr.sin_port = htons(CIFS_PORT); - rc = socket->ops->connect(socket, - (struct sockaddr *) - &server->addr.sockAddr, - sizeof(struct sockaddr_in), 0); - if (rc >= 0) - connected = true; - } - } - if (!connected) { - server->addr.sockAddr.sin_port = htons(RFC1001_PORT); - rc = socket->ops->connect(socket, (struct sockaddr *) - &server->addr.sockAddr, - sizeof(struct sockaddr_in), 0); - if (rc >= 0) - connected = true; - } - - /* give up here - unless we want to retry on different - protocol families some day */ - if (!connected) { - if (orig_port) - server->addr.sockAddr.sin_port = orig_port; - cFYI(1, "Error %d connecting to server via ipv4", rc); + rc = socket->ops->connect(socket, saddr, slen, 0); + if (rc < 0) { + cFYI(1, "Error %d connecting to server", rc); sock_release(socket); server->ssocket = NULL; return rc; } - /* * Eventually check for other socket options to change from - * the default. sock_setsockopt not used because it expects - * user space buffer + * the default. sock_setsockopt not used because it expects + * user space buffer */ socket->sk->sk_rcvtimeo = 7 * HZ; socket->sk->sk_sndtimeo = 5 * HZ; @@ -2226,7 +2283,7 @@ ipv4_connect(struct TCP_Server_Info *server) } if (server->tcp_nodelay) { - val = 1; + int val = 1; rc = kernel_setsockopt(socket, SOL_TCP, TCP_NODELAY, (char *)&val, sizeof(val)); if (rc) @@ -2237,161 +2294,39 @@ ipv4_connect(struct TCP_Server_Info *server) socket->sk->sk_sndbuf, socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo); - /* send RFC1001 sessinit */ - if (server->addr.sockAddr.sin_port == htons(RFC1001_PORT)) { - /* some servers require RFC1001 sessinit before sending - negprot - BB check reconnection in case where second - sessinit is sent but no second negprot */ - struct rfc1002_session_packet *ses_init_buf; - struct smb_hdr *smb_buf; - ses_init_buf = kzalloc(sizeof(struct rfc1002_session_packet), - GFP_KERNEL); - if (ses_init_buf) { - ses_init_buf->trailer.session_req.called_len = 32; - if (server->server_RFC1001_name && - server->server_RFC1001_name[0] != 0) - rfc1002mangle(ses_init_buf->trailer. - session_req.called_name, - server->server_RFC1001_name, - RFC1001_NAME_LEN_WITH_NULL); - else - rfc1002mangle(ses_init_buf->trailer. - session_req.called_name, - DEFAULT_CIFS_CALLED_NAME, - RFC1001_NAME_LEN_WITH_NULL); - - ses_init_buf->trailer.session_req.calling_len = 32; - - /* calling name ends in null (byte 16) from old smb - convention. */ - if (server->workstation_RFC1001_name && - server->workstation_RFC1001_name[0] != 0) - rfc1002mangle(ses_init_buf->trailer. - session_req.calling_name, - server->workstation_RFC1001_name, - RFC1001_NAME_LEN_WITH_NULL); - else - rfc1002mangle(ses_init_buf->trailer. - session_req.calling_name, - "LINUX_CIFS_CLNT", - RFC1001_NAME_LEN_WITH_NULL); - - ses_init_buf->trailer.session_req.scope1 = 0; - ses_init_buf->trailer.session_req.scope2 = 0; - smb_buf = (struct smb_hdr *)ses_init_buf; - /* sizeof RFC1002_SESSION_REQUEST with no scope */ - smb_buf->smb_buf_length = 0x81000044; - rc = smb_send(server, smb_buf, 0x44); - kfree(ses_init_buf); - msleep(1); /* RFC1001 layer in at least one server - requires very short break before negprot - presumably because not expecting negprot - to follow so fast. This is a simple - solution that works without - complicating the code and causes no - significant slowing down on mount - for everyone else */ - } - /* else the negprot may still work without this - even though malloc failed */ - - } + if (sport == htons(RFC1001_PORT)) + rc = ip_rfc1001_connect(server); return rc; } static int -ipv6_connect(struct TCP_Server_Info *server) +ip_connect(struct TCP_Server_Info *server) { - int rc = 0; - int val; - bool connected = false; - __be16 orig_port = 0; - struct socket *socket = server->ssocket; + unsigned short int *sport; + struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr; + struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr; - if (socket == NULL) { - rc = sock_create_kern(PF_INET6, SOCK_STREAM, - IPPROTO_TCP, &socket); - if (rc < 0) { - cERROR(1, "Error %d creating ipv6 socket", rc); - socket = NULL; - return rc; - } + if (server->dstaddr.ss_family == AF_INET6) + sport = &addr6->sin6_port; + else + sport = &addr->sin_port; - /* BB other socket options to set KEEPALIVE, NODELAY? */ - cFYI(1, "ipv6 Socket created"); - server->ssocket = socket; - socket->sk->sk_allocation = GFP_NOFS; - cifs_reclassify_socket6(socket); - } + if (*sport == 0) { + int rc; - rc = bind_socket(server); - if (rc < 0) - return rc; + /* try with 445 port at first */ + *sport = htons(CIFS_PORT); - /* user overrode default port */ - if (server->addr.sockAddr6.sin6_port) { - rc = socket->ops->connect(socket, - (struct sockaddr *) &server->addr.sockAddr6, - sizeof(struct sockaddr_in6), 0); - if (rc >= 0) - connected = true; - } - - if (!connected) { - /* save original port so we can retry user specified port - later if fall back ports fail this time */ - - orig_port = server->addr.sockAddr6.sin6_port; - /* do not retry on the same port we just failed on */ - if (server->addr.sockAddr6.sin6_port != htons(CIFS_PORT)) { - server->addr.sockAddr6.sin6_port = htons(CIFS_PORT); - rc = socket->ops->connect(socket, (struct sockaddr *) - &server->addr.sockAddr6, - sizeof(struct sockaddr_in6), 0); - if (rc >= 0) - connected = true; - } - } - if (!connected) { - server->addr.sockAddr6.sin6_port = htons(RFC1001_PORT); - rc = socket->ops->connect(socket, (struct sockaddr *) - &server->addr.sockAddr6, - sizeof(struct sockaddr_in6), 0); + rc = generic_ip_connect(server); if (rc >= 0) - connected = true; - } - - /* give up here - unless we want to retry on different - protocol families some day */ - if (!connected) { - if (orig_port) - server->addr.sockAddr6.sin6_port = orig_port; - cFYI(1, "Error %d connecting to server via ipv6", rc); - sock_release(socket); - server->ssocket = NULL; - return rc; - } - - /* - * Eventually check for other socket options to change from - * the default. sock_setsockopt not used because it expects - * user space buffer - */ - socket->sk->sk_rcvtimeo = 7 * HZ; - socket->sk->sk_sndtimeo = 5 * HZ; + return rc; - if (server->tcp_nodelay) { - val = 1; - rc = kernel_setsockopt(socket, SOL_TCP, TCP_NODELAY, - (char *)&val, sizeof(val)); - if (rc) - cFYI(1, "set TCP_NODELAY socket option error %d", rc); + /* if it failed, try with 139 port */ + *sport = htons(RFC1001_PORT); } - server->ssocket = socket; - - return rc; + return generic_ip_connect(server); } void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon, diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index e0588cdf4cc5..59ca81b16919 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -119,7 +119,7 @@ smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec) if (ssocket == NULL) return -ENOTSOCK; /* BB eventually add reconnect code here */ - smb_msg.msg_name = (struct sockaddr *) &server->addr.sockAddr; + smb_msg.msg_name = (struct sockaddr *) &server->dstaddr; smb_msg.msg_namelen = sizeof(struct sockaddr); smb_msg.msg_control = NULL; smb_msg.msg_controllen = 0; -- cgit v1.2.2 From 4b886136df2b923b6fc6b2d83faa9554e84e05ab Mon Sep 17 00:00:00 2001 From: Pavel Shilovsky Date: Mon, 13 Dec 2010 22:18:07 +0300 Subject: CIFS: Add match_port check during looking for an existing connection (try #4) If we have a share mounted by non-standard port and try to mount another share on the same host with standard port, we connect to the first share again - that's wrong. This patch fixes this bug. Signed-off-by: Pavel Shilovsky Reviewed-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/connect.c | 42 +++++++++++++++++++++++++++++++++++++----- 1 file changed, 37 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index b90c7411f4f0..41f002fb4a04 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -1453,6 +1453,40 @@ srcip_matches(struct sockaddr *srcaddr, struct sockaddr *rhs) } } +/* + * If no port is specified in addr structure, we try to match with 445 port + * and if it fails - with 139 ports. It should be called only if address + * families of server and addr are equal. + */ +static bool +match_port(struct TCP_Server_Info *server, struct sockaddr *addr) +{ + unsigned short int port, *sport; + + switch (addr->sa_family) { + case AF_INET: + sport = &((struct sockaddr_in *) &server->dstaddr)->sin_port; + port = ((struct sockaddr_in *) addr)->sin_port; + break; + case AF_INET6: + sport = &((struct sockaddr_in6 *) &server->dstaddr)->sin6_port; + port = ((struct sockaddr_in6 *) addr)->sin6_port; + break; + default: + WARN_ON(1); + return false; + } + + if (!port) { + port = htons(CIFS_PORT); + if (port == *sport) + return true; + + port = htons(RFC1001_PORT); + } + + return port == *sport; +} static bool match_address(struct TCP_Server_Info *server, struct sockaddr *addr, @@ -1466,8 +1500,6 @@ match_address(struct TCP_Server_Info *server, struct sockaddr *addr, if (addr4->sin_addr.s_addr != srv_addr4->sin_addr.s_addr) return false; - if (addr4->sin_port && addr4->sin_port != srv_addr4->sin_port) - return false; break; } case AF_INET6: { @@ -1480,9 +1512,6 @@ match_address(struct TCP_Server_Info *server, struct sockaddr *addr, return false; if (addr6->sin6_scope_id != srv_addr6->sin6_scope_id) return false; - if (addr6->sin6_port && - addr6->sin6_port != srv_addr6->sin6_port) - return false; break; } default: @@ -1555,6 +1584,9 @@ cifs_find_tcp_session(struct sockaddr *addr, struct smb_vol *vol) (struct sockaddr *)&vol->srcaddr)) continue; + if (!match_port(server, addr)) + continue; + if (!match_security(server, vol)) continue; -- cgit v1.2.2 From eeb910a6d46103594eb63e6eba1aeb02022368a4 Mon Sep 17 00:00:00 2001 From: Pavel Shilovsky Date: Thu, 25 Nov 2010 15:12:39 +0300 Subject: CIFS: Simplify non-posix open stuff (try #2) Delete cifs_open_inode_helper and move non-posix open related things to cifs_nt_open function. Reviewed-by: Jeff Layton Signed-off-by: Pavel Shilovsky Signed-off-by: Steve French --- fs/cifs/file.c | 189 ++++++++++++++++++++++----------------------------------- 1 file changed, 73 insertions(+), 116 deletions(-) (limited to 'fs') diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 5a28660ca2b5..f95ba451173f 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -104,53 +104,6 @@ static inline int cifs_get_disposition(unsigned int flags) return FILE_OPEN; } -static inline int cifs_open_inode_helper(struct inode *inode, - struct cifsTconInfo *pTcon, __u32 oplock, FILE_ALL_INFO *buf, - char *full_path, int xid) -{ - struct cifsInodeInfo *pCifsInode = CIFS_I(inode); - struct timespec temp; - int rc; - - if (pCifsInode->clientCanCacheRead) { - /* we have the inode open somewhere else - no need to discard cache data */ - goto client_can_cache; - } - - /* BB need same check in cifs_create too? */ - /* if not oplocked, invalidate inode pages if mtime or file - size changed */ - temp = cifs_NTtimeToUnix(buf->LastWriteTime); - if (timespec_equal(&inode->i_mtime, &temp) && - (inode->i_size == - (loff_t)le64_to_cpu(buf->EndOfFile))) { - cFYI(1, "inode unchanged on server"); - } else { - if (inode->i_mapping) { - /* BB no need to lock inode until after invalidate - since namei code should already have it locked? */ - rc = filemap_write_and_wait(inode->i_mapping); - mapping_set_error(inode->i_mapping, rc); - } - cFYI(1, "invalidating remote inode since open detected it " - "changed"); - invalidate_remote_inode(inode); - } - -client_can_cache: - if (pTcon->unix_ext) - rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb, - xid); - else - rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb, - xid, NULL); - - cifs_set_oplock_level(pCifsInode, oplock); - - return rc; -} - int cifs_posix_open(char *full_path, struct inode **pinode, struct super_block *sb, int mode, unsigned int f_flags, __u32 *poplock, __u16 *pnetfid, int xid) @@ -213,6 +166,76 @@ posix_open_ret: return rc; } +static int +cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb, + struct cifsTconInfo *tcon, unsigned int f_flags, __u32 *poplock, + __u16 *pnetfid, int xid) +{ + int rc; + int desiredAccess; + int disposition; + FILE_ALL_INFO *buf; + + desiredAccess = cifs_convert_flags(f_flags); + +/********************************************************************* + * open flag mapping table: + * + * POSIX Flag CIFS Disposition + * ---------- ---------------- + * O_CREAT FILE_OPEN_IF + * O_CREAT | O_EXCL FILE_CREATE + * O_CREAT | O_TRUNC FILE_OVERWRITE_IF + * O_TRUNC FILE_OVERWRITE + * none of the above FILE_OPEN + * + * Note that there is not a direct match between disposition + * FILE_SUPERSEDE (ie create whether or not file exists although + * O_CREAT | O_TRUNC is similar but truncates the existing + * file rather than creating a new file as FILE_SUPERSEDE does + * (which uses the attributes / metadata passed in on open call) + *? + *? O_SYNC is a reasonable match to CIFS writethrough flag + *? and the read write flags match reasonably. O_LARGEFILE + *? is irrelevant because largefile support is always used + *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY, + * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation + *********************************************************************/ + + disposition = cifs_get_disposition(f_flags); + + /* BB pass O_SYNC flag through on file attributes .. BB */ + + buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL); + if (!buf) + return -ENOMEM; + + if (tcon->ses->capabilities & CAP_NT_SMBS) + rc = CIFSSMBOpen(xid, tcon, full_path, disposition, + desiredAccess, CREATE_NOT_DIR, pnetfid, poplock, buf, + cifs_sb->local_nls, cifs_sb->mnt_cifs_flags + & CIFS_MOUNT_MAP_SPECIAL_CHR); + else + rc = SMBLegacyOpen(xid, tcon, full_path, disposition, + desiredAccess, CREATE_NOT_DIR, pnetfid, poplock, buf, + cifs_sb->local_nls, cifs_sb->mnt_cifs_flags + & CIFS_MOUNT_MAP_SPECIAL_CHR); + + if (rc) + goto out; + + if (tcon->unix_ext) + rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb, + xid); + else + rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb, + xid, pnetfid); + +out: + kfree(buf); + return rc; +} + struct cifsFileInfo * cifs_new_fileinfo(__u16 fileHandle, struct file *file, struct tcon_link *tlink, __u32 oplock) @@ -317,10 +340,7 @@ int cifs_open(struct inode *inode, struct file *file) struct cifsFileInfo *pCifsFile = NULL; struct cifsInodeInfo *pCifsInode; char *full_path = NULL; - int desiredAccess; - int disposition; __u16 netfid; - FILE_ALL_INFO *buf = NULL; xid = GetXid(); @@ -385,71 +405,9 @@ int cifs_open(struct inode *inode, struct file *file) or DFS errors */ } - desiredAccess = cifs_convert_flags(file->f_flags); - -/********************************************************************* - * open flag mapping table: - * - * POSIX Flag CIFS Disposition - * ---------- ---------------- - * O_CREAT FILE_OPEN_IF - * O_CREAT | O_EXCL FILE_CREATE - * O_CREAT | O_TRUNC FILE_OVERWRITE_IF - * O_TRUNC FILE_OVERWRITE - * none of the above FILE_OPEN - * - * Note that there is not a direct match between disposition - * FILE_SUPERSEDE (ie create whether or not file exists although - * O_CREAT | O_TRUNC is similar but truncates the existing - * file rather than creating a new file as FILE_SUPERSEDE does - * (which uses the attributes / metadata passed in on open call) - *? - *? O_SYNC is a reasonable match to CIFS writethrough flag - *? and the read write flags match reasonably. O_LARGEFILE - *? is irrelevant because largefile support is always used - *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY, - * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation - *********************************************************************/ - - disposition = cifs_get_disposition(file->f_flags); - - /* BB pass O_SYNC flag through on file attributes .. BB */ - - /* Also refresh inode by passing in file_info buf returned by SMBOpen - and calling get_inode_info with returned buf (at least helps - non-Unix server case) */ - - /* BB we can not do this if this is the second open of a file - and the first handle has writebehind data, we might be - able to simply do a filemap_fdatawrite/filemap_fdatawait first */ - buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL); - if (!buf) { - rc = -ENOMEM; - goto out; - } - - if (tcon->ses->capabilities & CAP_NT_SMBS) - rc = CIFSSMBOpen(xid, tcon, full_path, disposition, - desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf, - cifs_sb->local_nls, cifs_sb->mnt_cifs_flags - & CIFS_MOUNT_MAP_SPECIAL_CHR); - else - rc = -EIO; /* no NT SMB support fall into legacy open below */ - - if (rc == -EIO) { - /* Old server, try legacy style OpenX */ - rc = SMBLegacyOpen(xid, tcon, full_path, disposition, - desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf, - cifs_sb->local_nls, cifs_sb->mnt_cifs_flags - & CIFS_MOUNT_MAP_SPECIAL_CHR); - } - if (rc) { - cFYI(1, "cifs_open returned 0x%x", rc); - goto out; - } - - rc = cifs_open_inode_helper(inode, tcon, oplock, buf, full_path, xid); - if (rc != 0) + rc = cifs_nt_open(full_path, inode, cifs_sb, tcon, file->f_flags, + &oplock, &netfid, xid); + if (rc) goto out; pCifsFile = cifs_new_fileinfo(netfid, file, tlink, oplock); @@ -481,7 +439,6 @@ int cifs_open(struct inode *inode, struct file *file) } out: - kfree(buf); kfree(full_path); FreeXid(xid); cifs_put_tlink(tlink); -- cgit v1.2.2 From 7e12eddb73d4f288b0339ee13832a34d6bc4fd90 Mon Sep 17 00:00:00 2001 From: Pavel Shilovsky Date: Thu, 25 Nov 2010 17:20:20 +0300 Subject: CIFS: Simplify cifs_open code Make the code more general for use in posix and non-posix open. Reviewed-by: Jeff Layton Signed-off-by: Pavel Shilovsky Signed-off-by: Steve French --- fs/cifs/file.c | 54 +++++++++++++++++++++++------------------------------- 1 file changed, 23 insertions(+), 31 deletions(-) (limited to 'fs') diff --git a/fs/cifs/file.c b/fs/cifs/file.c index f95ba451173f..97ddbf2fdfc3 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -340,6 +340,7 @@ int cifs_open(struct inode *inode, struct file *file) struct cifsFileInfo *pCifsFile = NULL; struct cifsInodeInfo *pCifsInode; char *full_path = NULL; + bool posix_open_ok = false; __u16 netfid; xid = GetXid(); @@ -378,17 +379,7 @@ int cifs_open(struct inode *inode, struct file *file) file->f_flags, &oplock, &netfid, xid); if (rc == 0) { cFYI(1, "posix open succeeded"); - - pCifsFile = cifs_new_fileinfo(netfid, file, tlink, - oplock); - if (pCifsFile == NULL) { - CIFSSMBClose(xid, tcon, netfid); - rc = -ENOMEM; - } - - cifs_fscache_set_inode_cookie(inode, file); - - goto out; + posix_open_ok = true; } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) { if (tcon->ses->serverNOS) cERROR(1, "server %s of type %s returned" @@ -405,37 +396,38 @@ int cifs_open(struct inode *inode, struct file *file) or DFS errors */ } - rc = cifs_nt_open(full_path, inode, cifs_sb, tcon, file->f_flags, - &oplock, &netfid, xid); - if (rc) - goto out; + if (!posix_open_ok) { + rc = cifs_nt_open(full_path, inode, cifs_sb, tcon, + file->f_flags, &oplock, &netfid, xid); + if (rc) + goto out; + } pCifsFile = cifs_new_fileinfo(netfid, file, tlink, oplock); if (pCifsFile == NULL) { + CIFSSMBClose(xid, tcon, netfid); rc = -ENOMEM; goto out; } cifs_fscache_set_inode_cookie(inode, file); - if (oplock & CIFS_CREATE_ACTION) { + if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) { /* time to set mode which we can not set earlier due to problems creating new read-only files */ - if (tcon->unix_ext) { - struct cifs_unix_set_info_args args = { - .mode = inode->i_mode, - .uid = NO_CHANGE_64, - .gid = NO_CHANGE_64, - .ctime = NO_CHANGE_64, - .atime = NO_CHANGE_64, - .mtime = NO_CHANGE_64, - .device = 0, - }; - CIFSSMBUnixSetPathInfo(xid, tcon, full_path, &args, - cifs_sb->local_nls, - cifs_sb->mnt_cifs_flags & - CIFS_MOUNT_MAP_SPECIAL_CHR); - } + struct cifs_unix_set_info_args args = { + .mode = inode->i_mode, + .uid = NO_CHANGE_64, + .gid = NO_CHANGE_64, + .ctime = NO_CHANGE_64, + .atime = NO_CHANGE_64, + .mtime = NO_CHANGE_64, + .device = 0, + }; + CIFSSMBUnixSetPathInfo(xid, tcon, full_path, &args, + cifs_sb->local_nls, + cifs_sb->mnt_cifs_flags & + CIFS_MOUNT_MAP_SPECIAL_CHR); } out: -- cgit v1.2.2 From 01c9a0bc60507af7f28cb9138a81836de4528199 Mon Sep 17 00:00:00 2001 From: Andy Adamson Date: Thu, 6 Jan 2011 02:04:28 +0000 Subject: NFS use svc_create_xprt for NFSv4.1 callback service The new back channel transport means we call the normal creation routine as well as svc_xprt_put. Signed-off-by: Andy Adamson Signed-off-by: Trond Myklebust --- fs/nfs/callback.c | 28 ++++++++++++++++++---------- 1 file changed, 18 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index 93a8b3bd69e3..0e9fae831dfa 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c @@ -177,30 +177,38 @@ nfs41_callback_svc(void *vrqstp) struct svc_rqst * nfs41_callback_up(struct svc_serv *serv, struct rpc_xprt *xprt) { - struct svc_xprt *bc_xprt; - struct svc_rqst *rqstp = ERR_PTR(-ENOMEM); + struct svc_rqst *rqstp; + int ret; - dprintk("--> %s\n", __func__); - /* Create a svc_sock for the service */ - bc_xprt = svc_sock_create(serv, xprt->prot); - if (!bc_xprt) + /* + * Create an svc_sock for the back channel service that shares the + * fore channel connection. + * Returns the input port (0) and sets the svc_serv bc_xprt on success + */ + ret = svc_create_xprt(serv, "tcp-bc", &init_net, PF_INET, 0, + SVC_SOCK_ANONYMOUS); + if (ret < 0) { + rqstp = ERR_PTR(ret); goto out; + } /* * Save the svc_serv in the transport so that it can * be referenced when the session backchannel is initialized */ - serv->bc_xprt = bc_xprt; xprt->bc_serv = serv; INIT_LIST_HEAD(&serv->sv_cb_list); spin_lock_init(&serv->sv_cb_lock); init_waitqueue_head(&serv->sv_cb_waitq); rqstp = svc_prepare_thread(serv, &serv->sv_pools[0]); - if (IS_ERR(rqstp)) - svc_sock_destroy(bc_xprt); + if (IS_ERR(rqstp)) { + svc_xprt_put(serv->bc_xprt); + serv->bc_xprt = NULL; + } out: - dprintk("--> %s return %p\n", __func__, rqstp); + dprintk("--> %s return %ld\n", __func__, + IS_ERR(rqstp) ? PTR_ERR(rqstp) : 0); return rqstp; } -- cgit v1.2.2 From ea00528126a701845d7c445e725b271940381e3d Mon Sep 17 00:00:00 2001 From: Andy Adamson Date: Thu, 6 Jan 2011 02:04:29 +0000 Subject: NFS do not clear minor version at nfs_client free Resetting the client minor version operations causes nfs4_destroy_callback to fail to shutdown the NFSv4.1 callback service. There is no reason to reset the client minorversion operations when the nfs_client struct is being freed. Remove the minorverion reset and rename the function. Signed-off-by: Andy Adamson Signed-off-by: Trond Myklebust --- fs/nfs/client.c | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) (limited to 'fs') diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 0870d0d4efc0..855add62abc1 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -170,21 +170,17 @@ error_0: } #ifdef CONFIG_NFS_V4 -/* - * Clears/puts all minor version specific parts from an nfs_client struct - * reverting it to minorversion 0. - */ -static void nfs4_clear_client_minor_version(struct nfs_client *clp) -{ #ifdef CONFIG_NFS_V4_1 - if (nfs4_has_session(clp)) { +static void nfs4_shutdown_session(struct nfs_client *clp) +{ + if (nfs4_has_session(clp)) nfs4_destroy_session(clp->cl_session); - clp->cl_session = NULL; - } - - clp->cl_mvops = nfs_v4_minor_ops[0]; -#endif /* CONFIG_NFS_V4_1 */ } +#else /* CONFIG_NFS_V4_1 */ +static void nfs4_shutdown_session(struct nfs_client *clp) +{ +} +#endif /* CONFIG_NFS_V4_1 */ /* * Destroy the NFS4 callback service @@ -199,7 +195,7 @@ static void nfs4_shutdown_client(struct nfs_client *clp) { if (__test_and_clear_bit(NFS_CS_RENEWD, &clp->cl_res_state)) nfs4_kill_renewd(clp); - nfs4_clear_client_minor_version(clp); + nfs4_shutdown_session(clp); nfs4_destroy_callback(clp); if (__test_and_clear_bit(NFS_CS_IDMAP, &clp->cl_res_state)) nfs_idmap_delete(clp); -- cgit v1.2.2 From f4eecd5da3422e82e88e36c33cbd2595eebcacb1 Mon Sep 17 00:00:00 2001 From: Andy Adamson Date: Thu, 6 Jan 2011 02:04:30 +0000 Subject: NFS implement v4.0 callback_ident Use the small id to pointer translator service to provide a unique callback identifier per SETCLIENTID call used to identify the v4.0 callback service associated with the clientid. Signed-off-by: Andy Adamson Signed-off-by: Trond Myklebust --- fs/nfs/client.c | 51 +++++++++++++++++++++++++++++++++++++++++++++++++++ fs/nfs/inode.c | 1 + fs/nfs/internal.h | 1 + fs/nfs/nfs4proc.c | 1 + 4 files changed, 54 insertions(+) (limited to 'fs') diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 855add62abc1..bc3a8620e8c3 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -56,6 +56,30 @@ static DEFINE_SPINLOCK(nfs_client_lock); static LIST_HEAD(nfs_client_list); static LIST_HEAD(nfs_volume_list); static DECLARE_WAIT_QUEUE_HEAD(nfs_client_active_wq); +#ifdef CONFIG_NFS_V4 +static DEFINE_IDR(cb_ident_idr); /* Protected by nfs_client_lock */ + +/* + * Get a unique NFSv4.0 callback identifier which will be used + * by the V4.0 callback service to lookup the nfs_client struct + */ +static int nfs_get_cb_ident_idr(struct nfs_client *clp, int minorversion) +{ + int ret = 0; + + if (clp->rpc_ops->version != 4 || minorversion != 0) + return ret; +retry: + if (!idr_pre_get(&cb_ident_idr, GFP_KERNEL)) + return -ENOMEM; + spin_lock(&nfs_client_lock); + ret = idr_get_new(&cb_ident_idr, clp, &clp->cl_cb_ident); + spin_unlock(&nfs_client_lock); + if (ret == -EAGAIN) + goto retry; + return ret; +} +#endif /* CONFIG_NFS_V4 */ /* * RPC cruft for NFS @@ -144,6 +168,10 @@ static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_ clp->cl_proto = cl_init->proto; #ifdef CONFIG_NFS_V4 + err = nfs_get_cb_ident_idr(clp, cl_init->minorversion); + if (err) + goto error_cleanup; + INIT_LIST_HEAD(&clp->cl_delegations); spin_lock_init(&clp->cl_lock); INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state); @@ -202,10 +230,32 @@ static void nfs4_shutdown_client(struct nfs_client *clp) rpc_destroy_wait_queue(&clp->cl_rpcwaitq); } + +/* idr_remove_all is not needed as all id's are removed by nfs_put_client */ +void nfs_cleanup_cb_ident_idr(void) +{ + idr_destroy(&cb_ident_idr); +} + +/* nfs_client_lock held */ +static void nfs_cb_idr_remove_locked(struct nfs_client *clp) +{ + if (clp->cl_cb_ident) + idr_remove(&cb_ident_idr, clp->cl_cb_ident); +} + #else static void nfs4_shutdown_client(struct nfs_client *clp) { } + +void nfs_cleanup_cb_ident_idr(void) +{ +} + +static void nfs_cb_idr_remove_locked(struct nfs_client *clp) +{ +} #endif /* CONFIG_NFS_V4 */ /* @@ -244,6 +294,7 @@ void nfs_put_client(struct nfs_client *clp) if (atomic_dec_and_lock(&clp->cl_count, &nfs_client_lock)) { list_del(&clp->cl_share_link); + nfs_cb_idr_remove_locked(clp); spin_unlock(&nfs_client_lock); BUG_ON(!list_empty(&clp->cl_superblocks)); diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index e67e31c73416..c7782b278e8b 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -1612,6 +1612,7 @@ static void __exit exit_nfs_fs(void) #ifdef CONFIG_PROC_FS rpc_proc_unregister("nfs"); #endif + nfs_cleanup_cb_ident_idr(); unregister_nfs_fs(); nfs_fs_proc_exit(); nfsiod_stop(); diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 435eae3666bd..7c803c916574 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -128,6 +128,7 @@ extern void nfs_umount(const struct nfs_mount_request *info); /* client.c */ extern struct rpc_program nfs_program; +extern void nfs_cleanup_cb_ident_idr(void); extern void nfs_put_client(struct nfs_client *); extern struct nfs_client *nfs_find_client(const struct sockaddr *, u32); extern struct nfs_client *nfs_find_client_next(struct nfs_client *); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 82f3a82b7115..e165c53db08f 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -3484,6 +3484,7 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, struct nfs4_setclientid setclientid = { .sc_verifier = &sc_verifier, .sc_prog = program, + .sc_cb_ident = clp->cl_cb_ident, }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID], -- cgit v1.2.2 From 2c2618c6f29c41a0a966f14f05c8bf45fcabb750 Mon Sep 17 00:00:00 2001 From: Andy Adamson Date: Thu, 6 Jan 2011 02:04:31 +0000 Subject: NFS associate sessionid with callback connection The sessions based callback service is started prior to the CREATE_SESSION call so that it can handle CB_NULL requests which can be sent before the CREATE_SESSION call returns and the session ID is known. Set the callback sessionid after a sucessful CREATE_SESSION. Signed-off-by: Andy Adamson Signed-off-by: Trond Myklebust --- fs/nfs/callback.c | 31 +++++++++++++++++++++++++++++++ fs/nfs/callback.h | 1 + fs/nfs/nfs4state.c | 6 ++++++ 3 files changed, 38 insertions(+) (limited to 'fs') diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index 0e9fae831dfa..c0b05497972b 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c @@ -136,6 +136,33 @@ out_err: } #if defined(CONFIG_NFS_V4_1) +/* + * * CB_SEQUENCE operations will fail until the callback sessionid is set. + * */ +int nfs4_set_callback_sessionid(struct nfs_client *clp) +{ + struct svc_serv *serv = clp->cl_rpcclient->cl_xprt->bc_serv; + struct nfs4_sessionid *bc_sid; + + if (!serv->bc_xprt) + return -EINVAL; + + /* on success freed in xprt_free */ + bc_sid = kmalloc(sizeof(struct nfs4_sessionid), GFP_KERNEL); + if (!bc_sid) + return -ENOMEM; + memcpy(bc_sid->data, &clp->cl_session->sess_id.data, + NFS4_MAX_SESSIONID_LEN); + spin_lock_bh(&serv->sv_cb_lock); + serv->bc_xprt->xpt_bc_sid = bc_sid; + spin_unlock_bh(&serv->sv_cb_lock); + dprintk("%s set xpt_bc_sid=%u:%u:%u:%u for bc_xprt %p\n", __func__, + ((u32 *)bc_sid->data)[0], ((u32 *)bc_sid->data)[1], + ((u32 *)bc_sid->data)[2], ((u32 *)bc_sid->data)[3], + serv->bc_xprt); + return 0; +} + /* * The callback service for NFSv4.1 callbacks */ @@ -241,6 +268,10 @@ static inline void nfs_callback_bc_serv(u32 minorversion, struct rpc_xprt *xprt, struct nfs_callback_data *cb_info) { } +int nfs4_set_callback_sessionid(struct nfs_client *clp) +{ + return 0; +} #endif /* CONFIG_NFS_V4_1 */ /* diff --git a/fs/nfs/callback.h b/fs/nfs/callback.h index 85a7cfd1b8dd..58d61a8ce8b9 100644 --- a/fs/nfs/callback.h +++ b/fs/nfs/callback.h @@ -137,6 +137,7 @@ extern int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt); extern void nfs_callback_down(int minorversion); extern int nfs4_validate_delegation_stateid(struct nfs_delegation *delegation, const nfs4_stateid *stateid); +extern int nfs4_set_callback_sessionid(struct nfs_client *clp); #endif /* CONFIG_NFS_V4 */ /* * nfs41: Callbacks are expected to not cause substantial latency, diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index f575a3126737..485e95e8fd62 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -192,6 +192,12 @@ int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred) status = nfs4_proc_create_session(clp); if (status != 0) goto out; + status = nfs4_set_callback_sessionid(clp); + if (status != 0) { + printk(KERN_WARNING "Sessionid not set. No callback service\n"); + nfs_callback_down(1); + status = 0; + } nfs41_setup_state_renewal(clp); nfs_mark_client_ready(clp, NFS_CS_READY); out: -- cgit v1.2.2 From c36fca52f5e4594ffd0ff175b328966b0d393184 Mon Sep 17 00:00:00 2001 From: Andy Adamson Date: Thu, 6 Jan 2011 02:04:32 +0000 Subject: NFS refactor nfs_find_client and reference client across callback processing Fixes a bug where the nfs_client could be freed during callback processing. Refactor nfs_find_client to use minorversion specific means to locate the correct nfs_client structure. In the NFS layer, V4.0 clients are found using the callback_ident field in the CB_COMPOUND header. V4.1 clients are found using the sessionID in the CB_SEQUENCE operation which is also compared against the sessionID associated with the back channel thread after a successful CREATE_SESSION. Each of these methods finds the one an only nfs_client associated with the incoming callback request - so nfs_find_client_next is not needed. In the RPC layer, the pg_authenticate call needs to find the nfs_client. For the v4.0 callback service, the callback identifier has not been decoded so a search by address, version, and minorversion is used. The sessionid for the sessions based callback service has (usually) not been set for the pg_authenticate on a CB_NULL call which can be sent prior to the return of a CREATE_SESSION call, so the sessionid associated with the back channel thread is not used to find the client in pg_authenticate for CB_NULL calls. Pass the referenced nfs_client to each CB_COMPOUND operation being proceesed via the new cb_process_state structure. The reference is held across cb_compound processing. Use the new cb_process_state struct to move the NFS4ERR_RETRY_UNCACHED_REP processing from process_op into nfs4_callback_sequence where it belongs. Signed-off-by: Andy Adamson Signed-off-by: Trond Myklebust --- fs/nfs/callback.c | 21 +++++- fs/nfs/callback.h | 28 ++++++-- fs/nfs/callback_proc.c | 167 +++++++++++++++++++---------------------------- fs/nfs/callback_xdr.c | 39 ++++++----- fs/nfs/client.c | 171 ++++++++++++++++++++++++++++++++----------------- fs/nfs/internal.h | 7 +- 6 files changed, 245 insertions(+), 188 deletions(-) (limited to 'fs') diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index c0b05497972b..15677e7bede5 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c @@ -16,9 +16,7 @@ #include #include #include -#if defined(CONFIG_NFS_V4_1) #include -#endif #include @@ -384,6 +382,23 @@ static int check_gss_callback_principal(struct nfs_client *clp, return SVC_OK; } +/* pg_authenticate method helper */ +static struct nfs_client *nfs_cb_find_client(struct svc_rqst *rqstp) +{ + struct nfs4_sessionid *sessionid = bc_xprt_sid(rqstp); + int is_cb_compound = rqstp->rq_proc == CB_COMPOUND ? 1 : 0; + + dprintk("--> %s rq_proc %d\n", __func__, rqstp->rq_proc); + if (svc_is_backchannel(rqstp)) + /* Sessionid (usually) set after CB_NULL ping */ + return nfs4_find_client_sessionid(svc_addr(rqstp), sessionid, + is_cb_compound); + else + /* No callback identifier in pg_authenticate */ + return nfs4_find_client_no_ident(svc_addr(rqstp)); +} + +/* pg_authenticate method for nfsv4 callback threads. */ static int nfs_callback_authenticate(struct svc_rqst *rqstp) { struct nfs_client *clp; @@ -391,7 +406,7 @@ static int nfs_callback_authenticate(struct svc_rqst *rqstp) int ret = SVC_OK; /* Don't talk to strangers */ - clp = nfs_find_client(svc_addr(rqstp), 4); + clp = nfs_cb_find_client(rqstp); if (clp == NULL) return SVC_DROP; diff --git a/fs/nfs/callback.h b/fs/nfs/callback.h index 58d61a8ce8b9..25e8802a51d1 100644 --- a/fs/nfs/callback.h +++ b/fs/nfs/callback.h @@ -34,10 +34,17 @@ enum nfs4_callback_opnum { OP_CB_ILLEGAL = 10044, }; +struct cb_process_state { + __be32 drc_status; + struct nfs_client *clp; + struct nfs4_sessionid *svc_sid; /* v4.1 callback service sessionid */ +}; + struct cb_compound_hdr_arg { unsigned int taglen; const char *tag; unsigned int minorversion; + unsigned int cb_ident; /* v4.0 callback identifier */ unsigned nops; }; @@ -103,8 +110,9 @@ struct cb_sequenceres { uint32_t csr_target_highestslotid; }; -extern unsigned nfs4_callback_sequence(struct cb_sequenceargs *args, - struct cb_sequenceres *res); +extern __be32 nfs4_callback_sequence(struct cb_sequenceargs *args, + struct cb_sequenceres *res, + struct cb_process_state *cps); extern int nfs41_validate_delegation_stateid(struct nfs_delegation *delegation, const nfs4_stateid *stateid); @@ -118,19 +126,25 @@ struct cb_recallanyargs { uint32_t craa_type_mask; }; -extern unsigned nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy); +extern __be32 nfs4_callback_recallany(struct cb_recallanyargs *args, + void *dummy, + struct cb_process_state *cps); struct cb_recallslotargs { struct sockaddr *crsa_addr; uint32_t crsa_target_max_slots; }; -extern unsigned nfs4_callback_recallslot(struct cb_recallslotargs *args, - void *dummy); +extern __be32 nfs4_callback_recallslot(struct cb_recallslotargs *args, + void *dummy, + struct cb_process_state *cps); #endif /* CONFIG_NFS_V4_1 */ -extern __be32 nfs4_callback_getattr(struct cb_getattrargs *args, struct cb_getattrres *res); -extern __be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy); +extern __be32 nfs4_callback_getattr(struct cb_getattrargs *args, + struct cb_getattrres *res, + struct cb_process_state *cps); +extern __be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy, + struct cb_process_state *cps); #ifdef CONFIG_NFS_V4 extern int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt); diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c index 2950fca0c61b..b70e46da16fc 100644 --- a/fs/nfs/callback_proc.c +++ b/fs/nfs/callback_proc.c @@ -16,26 +16,28 @@ #ifdef NFS_DEBUG #define NFSDBG_FACILITY NFSDBG_CALLBACK #endif - -__be32 nfs4_callback_getattr(struct cb_getattrargs *args, struct cb_getattrres *res) + +__be32 nfs4_callback_getattr(struct cb_getattrargs *args, + struct cb_getattrres *res, + struct cb_process_state *cps) { - struct nfs_client *clp; struct nfs_delegation *delegation; struct nfs_inode *nfsi; struct inode *inode; + res->status = htonl(NFS4ERR_OP_NOT_IN_SESSION); + if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */ + goto out; + res->bitmap[0] = res->bitmap[1] = 0; res->status = htonl(NFS4ERR_BADHANDLE); - clp = nfs_find_client(args->addr, 4); - if (clp == NULL) - goto out; dprintk("NFS: GETATTR callback request from %s\n", - rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)); + rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR)); - inode = nfs_delegation_find_inode(clp, &args->fh); + inode = nfs_delegation_find_inode(cps->clp, &args->fh); if (inode == NULL) - goto out_putclient; + goto out; nfsi = NFS_I(inode); rcu_read_lock(); delegation = rcu_dereference(nfsi->delegation); @@ -55,49 +57,41 @@ __be32 nfs4_callback_getattr(struct cb_getattrargs *args, struct cb_getattrres * out_iput: rcu_read_unlock(); iput(inode); -out_putclient: - nfs_put_client(clp); out: dprintk("%s: exit with status = %d\n", __func__, ntohl(res->status)); return res->status; } -__be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy) +__be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy, + struct cb_process_state *cps) { - struct nfs_client *clp; struct inode *inode; __be32 res; - res = htonl(NFS4ERR_BADHANDLE); - clp = nfs_find_client(args->addr, 4); - if (clp == NULL) + res = htonl(NFS4ERR_OP_NOT_IN_SESSION); + if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */ goto out; dprintk("NFS: RECALL callback request from %s\n", - rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)); - - do { - struct nfs_client *prev = clp; - - inode = nfs_delegation_find_inode(clp, &args->fh); - if (inode != NULL) { - /* Set up a helper thread to actually return the delegation */ - switch (nfs_async_inode_return_delegation(inode, &args->stateid)) { - case 0: - res = 0; - break; - case -ENOENT: - if (res != 0) - res = htonl(NFS4ERR_BAD_STATEID); - break; - default: - res = htonl(NFS4ERR_RESOURCE); - } - iput(inode); - } - clp = nfs_find_client_next(prev); - nfs_put_client(prev); - } while (clp != NULL); + rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR)); + + res = htonl(NFS4ERR_BADHANDLE); + inode = nfs_delegation_find_inode(cps->clp, &args->fh); + if (inode == NULL) + goto out; + /* Set up a helper thread to actually return the delegation */ + switch (nfs_async_inode_return_delegation(inode, &args->stateid)) { + case 0: + res = 0; + break; + case -ENOENT: + if (res != 0) + res = htonl(NFS4ERR_BAD_STATEID); + break; + default: + res = htonl(NFS4ERR_RESOURCE); + } + iput(inode); out: dprintk("%s: exit with status = %d\n", __func__, ntohl(res)); return res; @@ -184,42 +178,6 @@ validate_seqid(struct nfs4_slot_table *tbl, struct cb_sequenceargs * args) return htonl(NFS4ERR_SEQ_MISORDERED); } -/* - * Returns a pointer to a held 'struct nfs_client' that matches the server's - * address, major version number, and session ID. It is the caller's - * responsibility to release the returned reference. - * - * Returns NULL if there are no connections with sessions, or if no session - * matches the one of interest. - */ - static struct nfs_client *find_client_with_session( - const struct sockaddr *addr, u32 nfsversion, - struct nfs4_sessionid *sessionid) -{ - struct nfs_client *clp; - - clp = nfs_find_client(addr, 4); - if (clp == NULL) - return NULL; - - do { - struct nfs_client *prev = clp; - - if (clp->cl_session != NULL) { - if (memcmp(clp->cl_session->sess_id.data, - sessionid->data, - NFS4_MAX_SESSIONID_LEN) == 0) { - /* Returns a held reference to clp */ - return clp; - } - } - clp = nfs_find_client_next(prev); - nfs_put_client(prev); - } while (clp != NULL); - - return NULL; -} - /* * For each referring call triple, check the session's slot table for * a match. If the slot is in use and the sequence numbers match, the @@ -276,20 +234,28 @@ out: } __be32 nfs4_callback_sequence(struct cb_sequenceargs *args, - struct cb_sequenceres *res) + struct cb_sequenceres *res, + struct cb_process_state *cps) { struct nfs_client *clp; int i; __be32 status; + cps->clp = NULL; + status = htonl(NFS4ERR_BADSESSION); - clp = find_client_with_session(args->csa_addr, 4, &args->csa_sessionid); + /* Incoming session must match the callback session */ + if (memcmp(&args->csa_sessionid, cps->svc_sid, NFS4_MAX_SESSIONID_LEN)) + goto out; + + clp = nfs4_find_client_sessionid(args->csa_addr, + &args->csa_sessionid, 1); if (clp == NULL) goto out; status = validate_seqid(&clp->cl_session->bc_slot_table, args); if (status) - goto out_putclient; + goto out; /* * Check for pending referring calls. If a match is found, a @@ -298,7 +264,7 @@ __be32 nfs4_callback_sequence(struct cb_sequenceargs *args, */ if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists)) { status = htonl(NFS4ERR_DELAY); - goto out_putclient; + goto out; } memcpy(&res->csr_sessionid, &args->csa_sessionid, @@ -307,36 +273,36 @@ __be32 nfs4_callback_sequence(struct cb_sequenceargs *args, res->csr_slotid = args->csa_slotid; res->csr_highestslotid = NFS41_BC_MAX_CALLBACKS - 1; res->csr_target_highestslotid = NFS41_BC_MAX_CALLBACKS - 1; + cps->clp = clp; /* put in nfs4_callback_compound */ -out_putclient: - nfs_put_client(clp); out: for (i = 0; i < args->csa_nrclists; i++) kfree(args->csa_rclists[i].rcl_refcalls); kfree(args->csa_rclists); - if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) - res->csr_status = 0; - else + if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) { + cps->drc_status = status; + status = 0; + } else res->csr_status = status; + dprintk("%s: exit with status = %d res->csr_status %d\n", __func__, ntohl(status), ntohl(res->csr_status)); return status; } -__be32 nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy) +__be32 nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy, + struct cb_process_state *cps) { - struct nfs_client *clp; __be32 status; fmode_t flags = 0; status = htonl(NFS4ERR_OP_NOT_IN_SESSION); - clp = nfs_find_client(args->craa_addr, 4); - if (clp == NULL) + if (!cps->clp) /* set in cb_sequence */ goto out; dprintk("NFS: RECALL_ANY callback request from %s\n", - rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)); + rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR)); if (test_bit(RCA4_TYPE_MASK_RDATA_DLG, (const unsigned long *) &args->craa_type_mask)) @@ -346,7 +312,7 @@ __be32 nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy) flags |= FMODE_WRITE; if (flags) - nfs_expire_all_delegation_types(clp, flags); + nfs_expire_all_delegation_types(cps->clp, flags); status = htonl(NFS4_OK); out: dprintk("%s: exit with status = %d\n", __func__, ntohl(status)); @@ -354,36 +320,33 @@ out: } /* Reduce the fore channel's max_slots to the target value */ -__be32 nfs4_callback_recallslot(struct cb_recallslotargs *args, void *dummy) +__be32 nfs4_callback_recallslot(struct cb_recallslotargs *args, void *dummy, + struct cb_process_state *cps) { - struct nfs_client *clp; struct nfs4_slot_table *fc_tbl; __be32 status; status = htonl(NFS4ERR_OP_NOT_IN_SESSION); - clp = nfs_find_client(args->crsa_addr, 4); - if (clp == NULL) + if (!cps->clp) /* set in cb_sequence */ goto out; dprintk("NFS: CB_RECALL_SLOT request from %s target max slots %d\n", - rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR), + rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR), args->crsa_target_max_slots); - fc_tbl = &clp->cl_session->fc_slot_table; + fc_tbl = &cps->clp->cl_session->fc_slot_table; status = htonl(NFS4ERR_BAD_HIGH_SLOT); if (args->crsa_target_max_slots > fc_tbl->max_slots || args->crsa_target_max_slots < 1) - goto out_putclient; + goto out; status = htonl(NFS4_OK); if (args->crsa_target_max_slots == fc_tbl->max_slots) - goto out_putclient; + goto out; fc_tbl->target_max_slots = args->crsa_target_max_slots; - nfs41_handle_recall_slot(clp); -out_putclient: - nfs_put_client(clp); /* balance nfs_find_client */ + nfs41_handle_recall_slot(cps->clp); out: dprintk("%s: exit with status = %d\n", __func__, ntohl(status)); return status; diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c index 05af212f0edf..dbd0d649805c 100644 --- a/fs/nfs/callback_xdr.c +++ b/fs/nfs/callback_xdr.c @@ -10,8 +10,10 @@ #include #include #include +#include #include "nfs4_fs.h" #include "callback.h" +#include "internal.h" #define CB_OP_TAGLEN_MAXSZ (512) #define CB_OP_HDR_RES_MAXSZ (2 + CB_OP_TAGLEN_MAXSZ) @@ -33,7 +35,8 @@ /* Internal error code */ #define NFS4ERR_RESOURCE_HDR 11050 -typedef __be32 (*callback_process_op_t)(void *, void *); +typedef __be32 (*callback_process_op_t)(void *, void *, + struct cb_process_state *); typedef __be32 (*callback_decode_arg_t)(struct svc_rqst *, struct xdr_stream *, void *); typedef __be32 (*callback_encode_res_t)(struct svc_rqst *, struct xdr_stream *, void *); @@ -160,7 +163,7 @@ static __be32 decode_compound_hdr_arg(struct xdr_stream *xdr, struct cb_compound hdr->minorversion = ntohl(*p++); /* Check minor version is zero or one. */ if (hdr->minorversion <= 1) { - p++; /* skip callback_ident */ + hdr->cb_ident = ntohl(*p++); /* ignored by v4.1 */ } else { printk(KERN_WARNING "%s: NFSv4 server callback with " "illegal minor version %u!\n", @@ -621,7 +624,8 @@ preprocess_nfs4_op(unsigned int op_nr, struct callback_op **op) static __be32 process_op(uint32_t minorversion, int nop, struct svc_rqst *rqstp, struct xdr_stream *xdr_in, void *argp, - struct xdr_stream *xdr_out, void *resp, int* drc_status) + struct xdr_stream *xdr_out, void *resp, + struct cb_process_state *cps) { struct callback_op *op = &callback_ops[0]; unsigned int op_nr; @@ -644,8 +648,8 @@ static __be32 process_op(uint32_t minorversion, int nop, if (status) goto encode_hdr; - if (*drc_status) { - status = *drc_status; + if (cps->drc_status) { + status = cps->drc_status; goto encode_hdr; } @@ -653,16 +657,10 @@ static __be32 process_op(uint32_t minorversion, int nop, if (maxlen > 0 && maxlen < PAGE_SIZE) { status = op->decode_args(rqstp, xdr_in, argp); if (likely(status == 0)) - status = op->process_op(argp, resp); + status = op->process_op(argp, resp, cps); } else status = htonl(NFS4ERR_RESOURCE); - /* Only set by OP_CB_SEQUENCE processing */ - if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) { - *drc_status = status; - status = 0; - } - encode_hdr: res = encode_op_hdr(xdr_out, op_nr, status); if (unlikely(res)) @@ -681,8 +679,11 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r struct cb_compound_hdr_arg hdr_arg = { 0 }; struct cb_compound_hdr_res hdr_res = { NULL }; struct xdr_stream xdr_in, xdr_out; - __be32 *p; - __be32 status, drc_status = 0; + __be32 *p, status; + struct cb_process_state cps = { + .drc_status = 0, + .clp = NULL, + }; unsigned int nops = 0; dprintk("%s: start\n", __func__); @@ -696,6 +697,13 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r if (status == __constant_htonl(NFS4ERR_RESOURCE)) return rpc_garbage_args; + if (hdr_arg.minorversion == 0) { + cps.clp = nfs4_find_client_ident(hdr_arg.cb_ident); + if (!cps.clp) + return rpc_drop_reply; + } else + cps.svc_sid = bc_xprt_sid(rqstp); + hdr_res.taglen = hdr_arg.taglen; hdr_res.tag = hdr_arg.tag; if (encode_compound_hdr_res(&xdr_out, &hdr_res) != 0) @@ -703,7 +711,7 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r while (status == 0 && nops != hdr_arg.nops) { status = process_op(hdr_arg.minorversion, nops, rqstp, - &xdr_in, argp, &xdr_out, resp, &drc_status); + &xdr_in, argp, &xdr_out, resp, &cps); nops++; } @@ -716,6 +724,7 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r *hdr_res.status = status; *hdr_res.nops = htonl(nops); + nfs_put_client(cps.clp); dprintk("%s: done, status = %u\n", __func__, ntohl(status)); return rpc_success; } diff --git a/fs/nfs/client.c b/fs/nfs/client.c index bc3a8620e8c3..11eb9934c747 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -410,70 +410,28 @@ static int nfs_sockaddr_cmp(const struct sockaddr *sa1, return 0; } -/* - * Find a client by IP address and protocol version - * - returns NULL if no such client - */ -struct nfs_client *nfs_find_client(const struct sockaddr *addr, u32 nfsversion) +/* Common match routine for v4.0 and v4.1 callback services */ +bool +nfs4_cb_match_client(const struct sockaddr *addr, struct nfs_client *clp, + u32 minorversion) { - struct nfs_client *clp; + struct sockaddr *clap = (struct sockaddr *)&clp->cl_addr; - spin_lock(&nfs_client_lock); - list_for_each_entry(clp, &nfs_client_list, cl_share_link) { - struct sockaddr *clap = (struct sockaddr *)&clp->cl_addr; + /* Don't match clients that failed to initialise */ + if (!(clp->cl_cons_state == NFS_CS_READY || + clp->cl_cons_state == NFS_CS_SESSION_INITING)) + return false; - /* Don't match clients that failed to initialise properly */ - if (!(clp->cl_cons_state == NFS_CS_READY || - clp->cl_cons_state == NFS_CS_SESSION_INITING)) - continue; + /* Match the version and minorversion */ + if (clp->rpc_ops->version != 4 || + clp->cl_minorversion != minorversion) + return false; - /* Different NFS versions cannot share the same nfs_client */ - if (clp->rpc_ops->version != nfsversion) - continue; - - /* Match only the IP address, not the port number */ - if (!nfs_sockaddr_match_ipaddr(addr, clap)) - continue; + /* Match only the IP address, not the port number */ + if (!nfs_sockaddr_match_ipaddr(addr, clap)) + return false; - atomic_inc(&clp->cl_count); - spin_unlock(&nfs_client_lock); - return clp; - } - spin_unlock(&nfs_client_lock); - return NULL; -} - -/* - * Find a client by IP address and protocol version - * - returns NULL if no such client - */ -struct nfs_client *nfs_find_client_next(struct nfs_client *clp) -{ - struct sockaddr *sap = (struct sockaddr *)&clp->cl_addr; - u32 nfsvers = clp->rpc_ops->version; - - spin_lock(&nfs_client_lock); - list_for_each_entry_continue(clp, &nfs_client_list, cl_share_link) { - struct sockaddr *clap = (struct sockaddr *)&clp->cl_addr; - - /* Don't match clients that failed to initialise properly */ - if (clp->cl_cons_state != NFS_CS_READY) - continue; - - /* Different NFS versions cannot share the same nfs_client */ - if (clp->rpc_ops->version != nfsvers) - continue; - - /* Match only the IP address, not the port number */ - if (!nfs_sockaddr_match_ipaddr(sap, clap)) - continue; - - atomic_inc(&clp->cl_count); - spin_unlock(&nfs_client_lock); - return clp; - } - spin_unlock(&nfs_client_lock); - return NULL; + return true; } /* @@ -1171,6 +1129,101 @@ error: } #ifdef CONFIG_NFS_V4 +/* + * NFSv4.0 callback thread helper + * + * Find a client by IP address, protocol version, and minorversion + * + * Called from the pg_authenticate method. The callback identifier + * is not used as it has not been decoded. + * + * Returns NULL if no such client + */ +struct nfs_client * +nfs4_find_client_no_ident(const struct sockaddr *addr) +{ + struct nfs_client *clp; + + spin_lock(&nfs_client_lock); + list_for_each_entry(clp, &nfs_client_list, cl_share_link) { + if (nfs4_cb_match_client(addr, clp, 0) == false) + continue; + atomic_inc(&clp->cl_count); + spin_unlock(&nfs_client_lock); + return clp; + } + spin_unlock(&nfs_client_lock); + return NULL; +} + +/* + * NFSv4.0 callback thread helper + * + * Find a client by callback identifier + */ +struct nfs_client * +nfs4_find_client_ident(int cb_ident) +{ + struct nfs_client *clp; + + spin_lock(&nfs_client_lock); + clp = idr_find(&cb_ident_idr, cb_ident); + if (clp) + atomic_inc(&clp->cl_count); + spin_unlock(&nfs_client_lock); + return clp; +} + +#if defined(CONFIG_NFS_V4_1) +/* + * NFSv4.1 callback thread helper + * For CB_COMPOUND calls, find a client by IP address, protocol version, + * minorversion, and sessionID + * + * CREATE_SESSION triggers a CB_NULL ping from servers. The callback service + * sessionid can only be set after the CREATE_SESSION return, so a CB_NULL + * can arrive before the callback sessionid is set. For CB_NULL calls, + * find a client by IP address protocol version, and minorversion. + * + * Returns NULL if no such client + */ +struct nfs_client * +nfs4_find_client_sessionid(const struct sockaddr *addr, + struct nfs4_sessionid *sid, int is_cb_compound) +{ + struct nfs_client *clp; + + spin_lock(&nfs_client_lock); + list_for_each_entry(clp, &nfs_client_list, cl_share_link) { + if (nfs4_cb_match_client(addr, clp, 1) == false) + continue; + + if (!nfs4_has_session(clp)) + continue; + + /* Match sessionid unless cb_null call*/ + if (is_cb_compound && (memcmp(clp->cl_session->sess_id.data, + sid->data, NFS4_MAX_SESSIONID_LEN) != 0)) + continue; + + atomic_inc(&clp->cl_count); + spin_unlock(&nfs_client_lock); + return clp; + } + spin_unlock(&nfs_client_lock); + return NULL; +} + +#else /* CONFIG_NFS_V4_1 */ + +struct nfs_client * +nfs4_find_client_sessionid(const struct sockaddr *addr, + struct nfs4_sessionid *sid, int is_cb_compound) +{ + return NULL; +} +#endif /* CONFIG_NFS_V4_1 */ + /* * Initialize the NFS4 callback service */ diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 7c803c916574..bfa3a34af801 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -130,8 +130,11 @@ extern struct rpc_program nfs_program; extern void nfs_cleanup_cb_ident_idr(void); extern void nfs_put_client(struct nfs_client *); -extern struct nfs_client *nfs_find_client(const struct sockaddr *, u32); -extern struct nfs_client *nfs_find_client_next(struct nfs_client *); +extern struct nfs_client *nfs4_find_client_no_ident(const struct sockaddr *); +extern struct nfs_client *nfs4_find_client_ident(int); +extern struct nfs_client * +nfs4_find_client_sessionid(const struct sockaddr *, struct nfs4_sessionid *, + int); extern struct nfs_server *nfs_create_server( const struct nfs_parsed_mount_data *, struct nfs_fh *); -- cgit v1.2.2 From ece0de633c4d9106c39ea9f0db1638c42ead2541 Mon Sep 17 00:00:00 2001 From: Andy Adamson Date: Thu, 6 Jan 2011 02:04:33 +0000 Subject: NFS RPC_AUTH_GSS unsupported on v4.1 back channel Signed-off-by: Andy Adamson Acked-by: Bruce Fields Signed-off-by: Trond Myklebust --- fs/nfs/callback.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'fs') diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index 15677e7bede5..753a9e315518 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c @@ -365,6 +365,9 @@ static int check_gss_callback_principal(struct nfs_client *clp, struct rpc_clnt *r = clp->cl_rpcclient; char *p = svc_gss_principal(rqstp); + /* No RPC_AUTH_GSS on NFSv4.1 back channel yet */ + if (clp->cl_minorversion != 0) + return SVC_DROP; /* * It might just be a normal user principal, in which case * userspace won't bother to tell us the name at all. -- cgit v1.2.2 From 42acd021824578fa0eeb6eb58d457c23ec5dc9c0 Mon Sep 17 00:00:00 2001 From: Andy Adamson Date: Thu, 6 Jan 2011 02:04:34 +0000 Subject: NFS add session back channel draining Currently session draining only drains the fore channel. The back channel processing must also be drained. Use the back channel highest_slot_used to indicate that a callback is being processed by the callback thread. Move the session complete to be per channel. When the session is draininig, wait for any current back channel processing to complete and stop all new back channel processing by returning NFS4ERR_DELAY to the back channel client. Drain the back channel, then the fore channel. Signed-off-by: Andy Adamson Signed-off-by: Trond Myklebust --- fs/nfs/callback.h | 3 ++- fs/nfs/callback_proc.c | 7 +++++++ fs/nfs/callback_xdr.c | 35 +++++++++++++++++++++++++++++++++++ fs/nfs/nfs4proc.c | 26 +++++++++++++++++++------- fs/nfs/nfs4state.c | 29 ++++++++++++++++++++++------- 5 files changed, 85 insertions(+), 15 deletions(-) (limited to 'fs') diff --git a/fs/nfs/callback.h b/fs/nfs/callback.h index 25e8802a51d1..b678e3e15bd9 100644 --- a/fs/nfs/callback.h +++ b/fs/nfs/callback.h @@ -138,6 +138,8 @@ extern __be32 nfs4_callback_recallslot(struct cb_recallslotargs *args, void *dummy, struct cb_process_state *cps); +extern void nfs4_check_drain_bc_complete(struct nfs4_session *ses); +extern void nfs4_cb_take_slot(struct nfs_client *clp); #endif /* CONFIG_NFS_V4_1 */ extern __be32 nfs4_callback_getattr(struct cb_getattrargs *args, @@ -145,7 +147,6 @@ extern __be32 nfs4_callback_getattr(struct cb_getattrargs *args, struct cb_process_state *cps); extern __be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy, struct cb_process_state *cps); - #ifdef CONFIG_NFS_V4 extern int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt); extern void nfs_callback_down(int minorversion); diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c index b70e46da16fc..c1bead2f3e04 100644 --- a/fs/nfs/callback_proc.c +++ b/fs/nfs/callback_proc.c @@ -253,6 +253,12 @@ __be32 nfs4_callback_sequence(struct cb_sequenceargs *args, if (clp == NULL) goto out; + /* state manager is resetting the session */ + if (test_bit(NFS4_SESSION_DRAINING, &clp->cl_session->session_state)) { + status = NFS4ERR_DELAY; + goto out; + } + status = validate_seqid(&clp->cl_session->bc_slot_table, args); if (status) goto out; @@ -273,6 +279,7 @@ __be32 nfs4_callback_sequence(struct cb_sequenceargs *args, res->csr_slotid = args->csa_slotid; res->csr_highestslotid = NFS41_BC_MAX_CALLBACKS - 1; res->csr_target_highestslotid = NFS41_BC_MAX_CALLBACKS - 1; + nfs4_cb_take_slot(clp); cps->clp = clp; /* put in nfs4_callback_compound */ out: diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c index dbd0d649805c..7a2d6c5864ca 100644 --- a/fs/nfs/callback_xdr.c +++ b/fs/nfs/callback_xdr.c @@ -596,6 +596,37 @@ preprocess_nfs41_op(int nop, unsigned int op_nr, struct callback_op **op) return htonl(NFS_OK); } +static void nfs4_callback_free_slot(struct nfs4_session *session) +{ + struct nfs4_slot_table *tbl = &session->bc_slot_table; + + spin_lock(&tbl->slot_tbl_lock); + /* + * Let the state manager know callback processing done. + * A single slot, so highest used slotid is either 0 or -1 + */ + tbl->highest_used_slotid--; + nfs4_check_drain_bc_complete(session); + spin_unlock(&tbl->slot_tbl_lock); +} + +static void nfs4_cb_free_slot(struct nfs_client *clp) +{ + if (clp && clp->cl_session) + nfs4_callback_free_slot(clp->cl_session); +} + +/* A single slot, so highest used slotid is either 0 or -1 */ +void nfs4_cb_take_slot(struct nfs_client *clp) +{ + struct nfs4_slot_table *tbl = &clp->cl_session->bc_slot_table; + + spin_lock(&tbl->slot_tbl_lock); + tbl->highest_used_slotid++; + BUG_ON(tbl->highest_used_slotid != 0); + spin_unlock(&tbl->slot_tbl_lock); +} + #else /* CONFIG_NFS_V4_1 */ static __be32 @@ -604,6 +635,9 @@ preprocess_nfs41_op(int nop, unsigned int op_nr, struct callback_op **op) return htonl(NFS4ERR_MINOR_VERS_MISMATCH); } +static void nfs4_cb_free_slot(struct nfs_client *clp) +{ +} #endif /* CONFIG_NFS_V4_1 */ static __be32 @@ -724,6 +758,7 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r *hdr_res.status = status; *hdr_res.nops = htonl(nops); + nfs4_cb_free_slot(cps.clp); nfs_put_client(cps.clp); dprintk("%s: done, status = %u\n", __func__, ntohl(status)); return rpc_success; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index e165c53db08f..18a4d5a9a4e9 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -356,9 +356,9 @@ nfs4_free_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *free_slot) } /* - * Signal state manager thread if session is drained + * Signal state manager thread if session fore channel is drained */ -static void nfs41_check_drain_session_complete(struct nfs4_session *ses) +static void nfs4_check_drain_fc_complete(struct nfs4_session *ses) { struct rpc_task *task; @@ -372,8 +372,20 @@ static void nfs41_check_drain_session_complete(struct nfs4_session *ses) if (ses->fc_slot_table.highest_used_slotid != -1) return; - dprintk("%s COMPLETE: Session Drained\n", __func__); - complete(&ses->complete); + dprintk("%s COMPLETE: Session Fore Channel Drained\n", __func__); + complete(&ses->fc_slot_table.complete); +} + +/* + * Signal state manager thread if session back channel is drained + */ +void nfs4_check_drain_bc_complete(struct nfs4_session *ses) +{ + if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state) || + ses->bc_slot_table.highest_used_slotid != -1) + return; + dprintk("%s COMPLETE: Session Back Channel Drained\n", __func__); + complete(&ses->bc_slot_table.complete); } static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res) @@ -390,7 +402,7 @@ static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res) spin_lock(&tbl->slot_tbl_lock); nfs4_free_slot(tbl, res->sr_slot); - nfs41_check_drain_session_complete(res->sr_session); + nfs4_check_drain_fc_complete(res->sr_session); spin_unlock(&tbl->slot_tbl_lock); res->sr_slot = NULL; } @@ -4777,17 +4789,17 @@ struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp) if (!session) return NULL; - init_completion(&session->complete); - tbl = &session->fc_slot_table; tbl->highest_used_slotid = -1; spin_lock_init(&tbl->slot_tbl_lock); rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, "ForeChannel Slot table"); + init_completion(&tbl->complete); tbl = &session->bc_slot_table; tbl->highest_used_slotid = -1; spin_lock_init(&tbl->slot_tbl_lock); rpc_init_wait_queue(&tbl->slot_tbl_waitq, "BackChannel Slot table"); + init_completion(&tbl->complete); session->session_state = 1<cl_session; @@ -165,22 +170,32 @@ static void nfs4_end_drain_session(struct nfs_client *clp) } } -static int nfs4_begin_drain_session(struct nfs_client *clp) +static int nfs4_wait_on_slot_tbl(struct nfs4_slot_table *tbl) { - struct nfs4_session *ses = clp->cl_session; - struct nfs4_slot_table *tbl = &ses->fc_slot_table; - spin_lock(&tbl->slot_tbl_lock); - set_bit(NFS4_SESSION_DRAINING, &ses->session_state); if (tbl->highest_used_slotid != -1) { - INIT_COMPLETION(ses->complete); + INIT_COMPLETION(tbl->complete); spin_unlock(&tbl->slot_tbl_lock); - return wait_for_completion_interruptible(&ses->complete); + return wait_for_completion_interruptible(&tbl->complete); } spin_unlock(&tbl->slot_tbl_lock); return 0; } +static int nfs4_begin_drain_session(struct nfs_client *clp) +{ + struct nfs4_session *ses = clp->cl_session; + int ret = 0; + + set_bit(NFS4_SESSION_DRAINING, &ses->session_state); + /* back channel */ + ret = nfs4_wait_on_slot_tbl(&ses->bc_slot_table); + if (ret) + return ret; + /* fore channel */ + return nfs4_wait_on_slot_tbl(&ses->fc_slot_table); +} + int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred) { int status; -- cgit v1.2.2 From 4a19de0f4b693139bb10b7cc3cfe1f618576ba67 Mon Sep 17 00:00:00 2001 From: Andy Adamson Date: Thu, 6 Jan 2011 02:04:35 +0000 Subject: NFS rename client back channel transport field Differentiate from server backchannel Signed-off-by: Andy Adamson Acked-by: Bruce Fields Signed-off-by: Trond Myklebust --- fs/nfs/callback.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index 753a9e315518..199016528fcb 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c @@ -142,7 +142,7 @@ int nfs4_set_callback_sessionid(struct nfs_client *clp) struct svc_serv *serv = clp->cl_rpcclient->cl_xprt->bc_serv; struct nfs4_sessionid *bc_sid; - if (!serv->bc_xprt) + if (!serv->sv_bc_xprt) return -EINVAL; /* on success freed in xprt_free */ @@ -152,12 +152,12 @@ int nfs4_set_callback_sessionid(struct nfs_client *clp) memcpy(bc_sid->data, &clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN); spin_lock_bh(&serv->sv_cb_lock); - serv->bc_xprt->xpt_bc_sid = bc_sid; + serv->sv_bc_xprt->xpt_bc_sid = bc_sid; spin_unlock_bh(&serv->sv_cb_lock); - dprintk("%s set xpt_bc_sid=%u:%u:%u:%u for bc_xprt %p\n", __func__, + dprintk("%s set xpt_bc_sid=%u:%u:%u:%u for sv_bc_xprt %p\n", __func__, ((u32 *)bc_sid->data)[0], ((u32 *)bc_sid->data)[1], ((u32 *)bc_sid->data)[2], ((u32 *)bc_sid->data)[3], - serv->bc_xprt); + serv->sv_bc_xprt); return 0; } @@ -228,8 +228,8 @@ nfs41_callback_up(struct svc_serv *serv, struct rpc_xprt *xprt) init_waitqueue_head(&serv->sv_cb_waitq); rqstp = svc_prepare_thread(serv, &serv->sv_pools[0]); if (IS_ERR(rqstp)) { - svc_xprt_put(serv->bc_xprt); - serv->bc_xprt = NULL; + svc_xprt_put(serv->sv_bc_xprt); + serv->sv_bc_xprt = NULL; } out: dprintk("--> %s return %ld\n", __func__, -- cgit v1.2.2 From 52fabd73199cd00932f92c9f548bdf66a5bbc23d Mon Sep 17 00:00:00 2001 From: Fred Isaman Date: Thu, 6 Jan 2011 11:36:18 +0000 Subject: pnfs: fix incorrect comment in destroy_lseg Comment references get_layout_hdr_locked, which never existed in submitted code. Signed-off-by: Fred Isaman Signed-off-by: Trond Myklebust --- fs/nfs/pnfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index db773428f95f..6e9daffa5a37 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -225,7 +225,7 @@ destroy_lseg(struct kref *kref) dprintk("--> %s\n", __func__); NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg); - /* Matched by get_layout_hdr_locked in pnfs_insert_layout */ + /* Matched by get_layout_hdr in pnfs_insert_layout */ put_layout_hdr(ino); } -- cgit v1.2.2 From daaa82d1c72e10dc16cad3a810e225f9188dc7aa Mon Sep 17 00:00:00 2001 From: Fred Isaman Date: Thu, 6 Jan 2011 11:36:19 +0000 Subject: pnfs: remove unnecessary field lgp->status Signed-off-by: Fred Isaman Signed-off-by: Benny Halevy Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 18a4d5a9a4e9..28e175e74de2 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -5326,7 +5326,6 @@ static void nfs4_layoutget_done(struct rpc_task *task, void *calldata) return; } } - lgp->status = task->tk_status; dprintk("<-- %s\n", __func__); } @@ -5382,7 +5381,7 @@ int nfs4_proc_layoutget(struct nfs4_layoutget *lgp) status = nfs4_wait_for_completion_rpc_task(task); if (status != 0) goto out; - status = lgp->status; + status = task->tk_status; if (status != 0) goto out; status = pnfs_layout_process(lgp); -- cgit v1.2.2 From 566052c53b5146e23a99ab95fb5c11f8a295a084 Mon Sep 17 00:00:00 2001 From: Fred Isaman Date: Thu, 6 Jan 2011 11:36:20 +0000 Subject: pnfs: add prefix to struct pnfs_layout_segment fields While we are renaming all the fields, change lo->state to lo->plh_flags. Signed-off-by: Fred Isaman Signed-off-by: Trond Myklebust --- fs/nfs/nfs4filelayout.c | 2 +- fs/nfs/pnfs.c | 66 ++++++++++++++++++++++++------------------------- fs/nfs/pnfs.h | 10 ++++---- 3 files changed, 39 insertions(+), 39 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c index 2e92f0d8d654..738d6a4e77fe 100644 --- a/fs/nfs/nfs4filelayout.c +++ b/fs/nfs/nfs4filelayout.c @@ -243,7 +243,7 @@ filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid, static void filelayout_free_lseg(struct pnfs_layout_segment *lseg) { - struct nfs_server *nfss = NFS_SERVER(lseg->layout->inode); + struct nfs_server *nfss = NFS_SERVER(lseg->pls_layout->inode); struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg); dprintk("--> %s\n", __func__); diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 6e9daffa5a37..c3ca5fe1f3bd 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -210,9 +210,9 @@ put_layout_hdr(struct inode *inode) static void init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg) { - INIT_LIST_HEAD(&lseg->fi_list); - kref_init(&lseg->kref); - lseg->layout = lo; + INIT_LIST_HEAD(&lseg->pls_list); + kref_init(&lseg->pls_refcount); + lseg->pls_layout = lo; } /* Called without i_lock held, as the free_lseg call may sleep */ @@ -220,8 +220,8 @@ static void destroy_lseg(struct kref *kref) { struct pnfs_layout_segment *lseg = - container_of(kref, struct pnfs_layout_segment, kref); - struct inode *ino = lseg->layout->inode; + container_of(kref, struct pnfs_layout_segment, pls_refcount); + struct inode *ino = lseg->pls_layout->inode; dprintk("--> %s\n", __func__); NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg); @@ -236,8 +236,8 @@ put_lseg(struct pnfs_layout_segment *lseg) return; dprintk("%s: lseg %p ref %d\n", __func__, lseg, - atomic_read(&lseg->kref.refcount)); - kref_put(&lseg->kref, destroy_lseg); + atomic_read(&lseg->pls_refcount.refcount)); + kref_put(&lseg->pls_refcount, destroy_lseg); } static void @@ -249,9 +249,9 @@ pnfs_clear_lseg_list(struct pnfs_layout_hdr *lo, struct list_head *tmp_list) dprintk("%s:Begin lo %p\n", __func__, lo); assert_spin_locked(&lo->inode->i_lock); - list_for_each_entry_safe(lseg, next, &lo->segs, fi_list) { + list_for_each_entry_safe(lseg, next, &lo->segs, pls_list) { dprintk("%s: freeing lseg %p\n", __func__, lseg); - list_move(&lseg->fi_list, tmp_list); + list_move(&lseg->pls_list, tmp_list); } clp = NFS_SERVER(lo->inode)->nfs_client; spin_lock(&clp->cl_lock); @@ -259,7 +259,7 @@ pnfs_clear_lseg_list(struct pnfs_layout_hdr *lo, struct list_head *tmp_list) list_del_init(&lo->layouts); spin_unlock(&clp->cl_lock); write_seqlock(&lo->seqlock); - clear_bit(NFS_LAYOUT_STATEID_SET, &lo->state); + clear_bit(NFS_LAYOUT_STATEID_SET, &lo->plh_flags); write_sequnlock(&lo->seqlock); dprintk("%s:Return\n", __func__); @@ -272,9 +272,9 @@ pnfs_free_lseg_list(struct list_head *tmp_list) while (!list_empty(tmp_list)) { lseg = list_entry(tmp_list->next, struct pnfs_layout_segment, - fi_list); + pls_list); dprintk("%s calling put_lseg on %p\n", __func__, lseg); - list_del(&lseg->fi_list); + list_del(&lseg->pls_list); put_lseg(lseg); } } @@ -331,7 +331,7 @@ pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, bool overwrite = false; write_seqlock(&lo->seqlock); - if (!test_bit(NFS_LAYOUT_STATEID_SET, &lo->state) || + if (!test_bit(NFS_LAYOUT_STATEID_SET, &lo->plh_flags) || memcmp(old->stateid.other, new->stateid.other, sizeof(new->stateid.other))) overwrite = true; else { @@ -360,7 +360,7 @@ pnfs_layout_from_open_stateid(struct pnfs_layout_hdr *lo, memcpy(lo->stateid.data, state->stateid.data, sizeof(state->stateid.data)); } while (read_seqretry(&state->seqlock, seq)); - set_bit(NFS_LAYOUT_STATEID_SET, &lo->state); + set_bit(NFS_LAYOUT_STATEID_SET, &lo->plh_flags); write_sequnlock(&lo->seqlock); dprintk("<-- %s\n", __func__); } @@ -374,7 +374,7 @@ pnfs_get_layout_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo, dprintk("--> %s\n", __func__); do { seq = read_seqbegin(&lo->seqlock); - if (!test_bit(NFS_LAYOUT_STATEID_SET, &lo->state)) { + if (!test_bit(NFS_LAYOUT_STATEID_SET, &lo->plh_flags)) { /* This will trigger retry of the read */ pnfs_layout_from_open_stateid(lo, open_state); } else @@ -424,7 +424,7 @@ send_layoutget(struct pnfs_layout_hdr *lo, nfs4_proc_layoutget(lgp); if (!lseg) { /* remember that LAYOUTGET failed and suspend trying */ - set_bit(lo_fail_bit(iomode), &lo->state); + set_bit(lo_fail_bit(iomode), &lo->plh_flags); } return lseg; } @@ -459,26 +459,26 @@ pnfs_insert_layout(struct pnfs_layout_hdr *lo, list_add_tail(&lo->layouts, &clp->cl_layouts); spin_unlock(&clp->cl_lock); } - list_for_each_entry(lp, &lo->segs, fi_list) { - if (cmp_layout(lp->range.iomode, lseg->range.iomode) > 0) + list_for_each_entry(lp, &lo->segs, pls_list) { + if (cmp_layout(lp->pls_range.iomode, lseg->pls_range.iomode) > 0) continue; - list_add_tail(&lseg->fi_list, &lp->fi_list); + list_add_tail(&lseg->pls_list, &lp->pls_list); dprintk("%s: inserted lseg %p " "iomode %d offset %llu length %llu before " "lp %p iomode %d offset %llu length %llu\n", - __func__, lseg, lseg->range.iomode, - lseg->range.offset, lseg->range.length, - lp, lp->range.iomode, lp->range.offset, - lp->range.length); + __func__, lseg, lseg->pls_range.iomode, + lseg->pls_range.offset, lseg->pls_range.length, + lp, lp->pls_range.iomode, lp->pls_range.offset, + lp->pls_range.length); found = 1; break; } if (!found) { - list_add_tail(&lseg->fi_list, &lo->segs); + list_add_tail(&lseg->pls_list, &lo->segs); dprintk("%s: inserted lseg %p " "iomode %d offset %llu length %llu at tail\n", - __func__, lseg, lseg->range.iomode, - lseg->range.offset, lseg->range.length); + __func__, lseg, lseg->pls_range.iomode, + lseg->pls_range.offset, lseg->pls_range.length); } get_layout_hdr_locked(lo); @@ -538,7 +538,7 @@ pnfs_find_alloc_layout(struct inode *ino) static int is_matching_lseg(struct pnfs_layout_segment *lseg, u32 iomode) { - return (iomode != IOMODE_RW || lseg->range.iomode == IOMODE_RW); + return (iomode != IOMODE_RW || lseg->pls_range.iomode == IOMODE_RW); } /* @@ -552,17 +552,17 @@ pnfs_has_layout(struct pnfs_layout_hdr *lo, u32 iomode) dprintk("%s:Begin\n", __func__); assert_spin_locked(&lo->inode->i_lock); - list_for_each_entry(lseg, &lo->segs, fi_list) { + list_for_each_entry(lseg, &lo->segs, pls_list) { if (is_matching_lseg(lseg, iomode)) { ret = lseg; break; } - if (cmp_layout(iomode, lseg->range.iomode) > 0) + if (cmp_layout(iomode, lseg->pls_range.iomode) > 0) break; } dprintk("%s:Return lseg %p ref %d\n", - __func__, ret, ret ? atomic_read(&ret->kref.refcount) : 0); + __func__, ret, ret ? atomic_read(&ret->pls_refcount.refcount) : 0); return ret; } @@ -597,7 +597,7 @@ pnfs_update_layout(struct inode *ino, } /* if LAYOUTGET already failed once we don't try again */ - if (test_bit(lo_fail_bit(iomode), &nfsi->layout->state)) + if (test_bit(lo_fail_bit(iomode), &nfsi->layout->plh_flags)) goto out_unlock; get_layout_hdr_locked(lo); /* Matched in nfs4_layoutget_release */ @@ -606,7 +606,7 @@ pnfs_update_layout(struct inode *ino, lseg = send_layoutget(lo, ctx, iomode); out: dprintk("%s end, state 0x%lx lseg %p\n", __func__, - nfsi->layout->state, lseg); + nfsi->layout->plh_flags, lseg); return lseg; out_unlock: spin_unlock(&ino->i_lock); @@ -636,7 +636,7 @@ pnfs_layout_process(struct nfs4_layoutget *lgp) spin_lock(&ino->i_lock); init_lseg(lo, lseg); - lseg->range = res->range; + lseg->pls_range = res->range; *lgp->lsegpp = lseg; pnfs_insert_layout(lo, lseg); diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index e12367d50489..6fcc07353004 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -31,10 +31,10 @@ #define FS_NFS_PNFS_H struct pnfs_layout_segment { - struct list_head fi_list; - struct pnfs_layout_range range; - struct kref kref; - struct pnfs_layout_hdr *layout; + struct list_head pls_list; + struct pnfs_layout_range pls_range; + struct kref pls_refcount; + struct pnfs_layout_hdr *pls_layout; }; #ifdef CONFIG_NFS_V4_1 @@ -65,7 +65,7 @@ struct pnfs_layout_hdr { struct list_head segs; /* layout segments list */ seqlock_t seqlock; /* Protects the stateid */ nfs4_stateid stateid; - unsigned long state; + unsigned long plh_flags; struct inode *inode; }; -- cgit v1.2.2 From b7edfaa1983362842351e425adeb8e297b4c11fb Mon Sep 17 00:00:00 2001 From: Fred Isaman Date: Thu, 6 Jan 2011 11:36:21 +0000 Subject: pnfs: add prefix to struct pnfs_layout_hdr fields Signed-off-by: Fred Isaman Signed-off-by: Trond Myklebust --- fs/nfs/nfs4filelayout.c | 6 ++-- fs/nfs/pnfs.c | 94 ++++++++++++++++++++++++------------------------- fs/nfs/pnfs.h | 12 +++---- 3 files changed, 56 insertions(+), 56 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c index 738d6a4e77fe..23f930caf1e2 100644 --- a/fs/nfs/nfs4filelayout.c +++ b/fs/nfs/nfs4filelayout.c @@ -82,7 +82,7 @@ filelayout_check_layout(struct pnfs_layout_hdr *lo, { struct nfs4_file_layout_dsaddr *dsaddr; int status = -EINVAL; - struct nfs_server *nfss = NFS_SERVER(lo->inode); + struct nfs_server *nfss = NFS_SERVER(lo->plh_inode); dprintk("--> %s\n", __func__); @@ -101,7 +101,7 @@ filelayout_check_layout(struct pnfs_layout_hdr *lo, /* find and reference the deviceid */ dsaddr = nfs4_fl_find_get_deviceid(nfss->nfs_client, id); if (dsaddr == NULL) { - dsaddr = get_device_info(lo->inode, id); + dsaddr = get_device_info(lo->plh_inode, id); if (dsaddr == NULL) goto out; } @@ -243,7 +243,7 @@ filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid, static void filelayout_free_lseg(struct pnfs_layout_segment *lseg) { - struct nfs_server *nfss = NFS_SERVER(lseg->pls_layout->inode); + struct nfs_server *nfss = NFS_SERVER(lseg->pls_layout->plh_inode); struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg); dprintk("--> %s\n", __func__); diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index c3ca5fe1f3bd..6736f9e4f2e1 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -180,21 +180,21 @@ EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver); static void get_layout_hdr_locked(struct pnfs_layout_hdr *lo) { - assert_spin_locked(&lo->inode->i_lock); - lo->refcount++; + assert_spin_locked(&lo->plh_inode->i_lock); + lo->plh_refcount++; } static void put_layout_hdr_locked(struct pnfs_layout_hdr *lo) { - assert_spin_locked(&lo->inode->i_lock); - BUG_ON(lo->refcount == 0); + assert_spin_locked(&lo->plh_inode->i_lock); + BUG_ON(lo->plh_refcount == 0); - lo->refcount--; - if (!lo->refcount) { + lo->plh_refcount--; + if (!lo->plh_refcount) { dprintk("%s: freeing layout cache %p\n", __func__, lo); - BUG_ON(!list_empty(&lo->layouts)); - NFS_I(lo->inode)->layout = NULL; + BUG_ON(!list_empty(&lo->plh_layouts)); + NFS_I(lo->plh_inode)->layout = NULL; kfree(lo); } } @@ -221,7 +221,7 @@ destroy_lseg(struct kref *kref) { struct pnfs_layout_segment *lseg = container_of(kref, struct pnfs_layout_segment, pls_refcount); - struct inode *ino = lseg->pls_layout->inode; + struct inode *ino = lseg->pls_layout->plh_inode; dprintk("--> %s\n", __func__); NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg); @@ -248,19 +248,19 @@ pnfs_clear_lseg_list(struct pnfs_layout_hdr *lo, struct list_head *tmp_list) dprintk("%s:Begin lo %p\n", __func__, lo); - assert_spin_locked(&lo->inode->i_lock); - list_for_each_entry_safe(lseg, next, &lo->segs, pls_list) { + assert_spin_locked(&lo->plh_inode->i_lock); + list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list) { dprintk("%s: freeing lseg %p\n", __func__, lseg); list_move(&lseg->pls_list, tmp_list); } - clp = NFS_SERVER(lo->inode)->nfs_client; + clp = NFS_SERVER(lo->plh_inode)->nfs_client; spin_lock(&clp->cl_lock); /* List does not take a reference, so no need for put here */ - list_del_init(&lo->layouts); + list_del_init(&lo->plh_layouts); spin_unlock(&clp->cl_lock); - write_seqlock(&lo->seqlock); + write_seqlock(&lo->plh_seqlock); clear_bit(NFS_LAYOUT_STATEID_SET, &lo->plh_flags); - write_sequnlock(&lo->seqlock); + write_sequnlock(&lo->plh_seqlock); dprintk("%s:Return\n", __func__); } @@ -312,25 +312,25 @@ pnfs_destroy_all_layouts(struct nfs_client *clp) while (!list_empty(&tmp_list)) { lo = list_entry(tmp_list.next, struct pnfs_layout_hdr, - layouts); + plh_layouts); dprintk("%s freeing layout for inode %lu\n", __func__, - lo->inode->i_ino); - pnfs_destroy_layout(NFS_I(lo->inode)); + lo->plh_inode->i_ino); + pnfs_destroy_layout(NFS_I(lo->plh_inode)); } } -/* update lo->stateid with new if is more recent +/* update lo->plh_stateid with new if is more recent * - * lo->stateid could be the open stateid, in which case we just use what given. + * lo->plh_stateid could be the open stateid, in which case we just use what given. */ static void pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new) { - nfs4_stateid *old = &lo->stateid; + nfs4_stateid *old = &lo->plh_stateid; bool overwrite = false; - write_seqlock(&lo->seqlock); + write_seqlock(&lo->plh_seqlock); if (!test_bit(NFS_LAYOUT_STATEID_SET, &lo->plh_flags) || memcmp(old->stateid.other, new->stateid.other, sizeof(new->stateid.other))) overwrite = true; @@ -344,7 +344,7 @@ pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, } if (overwrite) memcpy(&old->stateid, &new->stateid, sizeof(new->stateid)); - write_sequnlock(&lo->seqlock); + write_sequnlock(&lo->plh_seqlock); } static void @@ -354,14 +354,14 @@ pnfs_layout_from_open_stateid(struct pnfs_layout_hdr *lo, int seq; dprintk("--> %s\n", __func__); - write_seqlock(&lo->seqlock); + write_seqlock(&lo->plh_seqlock); do { seq = read_seqbegin(&state->seqlock); - memcpy(lo->stateid.data, state->stateid.data, + memcpy(lo->plh_stateid.data, state->stateid.data, sizeof(state->stateid.data)); } while (read_seqretry(&state->seqlock, seq)); set_bit(NFS_LAYOUT_STATEID_SET, &lo->plh_flags); - write_sequnlock(&lo->seqlock); + write_sequnlock(&lo->plh_seqlock); dprintk("<-- %s\n", __func__); } @@ -373,14 +373,14 @@ pnfs_get_layout_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo, dprintk("--> %s\n", __func__); do { - seq = read_seqbegin(&lo->seqlock); + seq = read_seqbegin(&lo->plh_seqlock); if (!test_bit(NFS_LAYOUT_STATEID_SET, &lo->plh_flags)) { /* This will trigger retry of the read */ pnfs_layout_from_open_stateid(lo, open_state); } else - memcpy(dst->data, lo->stateid.data, - sizeof(lo->stateid.data)); - } while (read_seqretry(&lo->seqlock, seq)); + memcpy(dst->data, lo->plh_stateid.data, + sizeof(lo->plh_stateid.data)); + } while (read_seqretry(&lo->plh_seqlock, seq)); dprintk("<-- %s\n", __func__); } @@ -395,7 +395,7 @@ send_layoutget(struct pnfs_layout_hdr *lo, struct nfs_open_context *ctx, u32 iomode) { - struct inode *ino = lo->inode; + struct inode *ino = lo->plh_inode; struct nfs_server *server = NFS_SERVER(ino); struct nfs4_layoutget *lgp; struct pnfs_layout_segment *lseg = NULL; @@ -405,7 +405,7 @@ send_layoutget(struct pnfs_layout_hdr *lo, BUG_ON(ctx == NULL); lgp = kzalloc(sizeof(*lgp), GFP_KERNEL); if (lgp == NULL) { - put_layout_hdr(lo->inode); + put_layout_hdr(lo->plh_inode); return NULL; } lgp->args.minlength = NFS4_MAX_UINT64; @@ -450,16 +450,16 @@ pnfs_insert_layout(struct pnfs_layout_hdr *lo, dprintk("%s:Begin\n", __func__); - assert_spin_locked(&lo->inode->i_lock); - if (list_empty(&lo->segs)) { - struct nfs_client *clp = NFS_SERVER(lo->inode)->nfs_client; + assert_spin_locked(&lo->plh_inode->i_lock); + if (list_empty(&lo->plh_segs)) { + struct nfs_client *clp = NFS_SERVER(lo->plh_inode)->nfs_client; spin_lock(&clp->cl_lock); - BUG_ON(!list_empty(&lo->layouts)); - list_add_tail(&lo->layouts, &clp->cl_layouts); + BUG_ON(!list_empty(&lo->plh_layouts)); + list_add_tail(&lo->plh_layouts, &clp->cl_layouts); spin_unlock(&clp->cl_lock); } - list_for_each_entry(lp, &lo->segs, pls_list) { + list_for_each_entry(lp, &lo->plh_segs, pls_list) { if (cmp_layout(lp->pls_range.iomode, lseg->pls_range.iomode) > 0) continue; list_add_tail(&lseg->pls_list, &lp->pls_list); @@ -474,7 +474,7 @@ pnfs_insert_layout(struct pnfs_layout_hdr *lo, break; } if (!found) { - list_add_tail(&lseg->pls_list, &lo->segs); + list_add_tail(&lseg->pls_list, &lo->plh_segs); dprintk("%s: inserted lseg %p " "iomode %d offset %llu length %llu at tail\n", __func__, lseg, lseg->pls_range.iomode, @@ -493,11 +493,11 @@ alloc_init_layout_hdr(struct inode *ino) lo = kzalloc(sizeof(struct pnfs_layout_hdr), GFP_KERNEL); if (!lo) return NULL; - lo->refcount = 1; - INIT_LIST_HEAD(&lo->layouts); - INIT_LIST_HEAD(&lo->segs); - seqlock_init(&lo->seqlock); - lo->inode = ino; + lo->plh_refcount = 1; + INIT_LIST_HEAD(&lo->plh_layouts); + INIT_LIST_HEAD(&lo->plh_segs); + seqlock_init(&lo->plh_seqlock); + lo->plh_inode = ino; return lo; } @@ -551,8 +551,8 @@ pnfs_has_layout(struct pnfs_layout_hdr *lo, u32 iomode) dprintk("%s:Begin\n", __func__); - assert_spin_locked(&lo->inode->i_lock); - list_for_each_entry(lseg, &lo->segs, pls_list) { + assert_spin_locked(&lo->plh_inode->i_lock); + list_for_each_entry(lseg, &lo->plh_segs, pls_list) { if (is_matching_lseg(lseg, iomode)) { ret = lseg; break; @@ -619,7 +619,7 @@ pnfs_layout_process(struct nfs4_layoutget *lgp) struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout; struct nfs4_layoutget_res *res = &lgp->res; struct pnfs_layout_segment *lseg; - struct inode *ino = lo->inode; + struct inode *ino = lo->plh_inode; int status = 0; /* Inject layout blob into I/O device driver */ diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 6fcc07353004..c2f108640fc4 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -60,13 +60,13 @@ struct pnfs_layoutdriver_type { }; struct pnfs_layout_hdr { - unsigned long refcount; - struct list_head layouts; /* other client layouts */ - struct list_head segs; /* layout segments list */ - seqlock_t seqlock; /* Protects the stateid */ - nfs4_stateid stateid; + unsigned long plh_refcount; + struct list_head plh_layouts; /* other client layouts */ + struct list_head plh_segs; /* layout segments list */ + seqlock_t plh_seqlock; /* Protects the stateid */ + nfs4_stateid plh_stateid; unsigned long plh_flags; - struct inode *inode; + struct inode *plh_inode; }; struct pnfs_device { -- cgit v1.2.2 From fd6002e9b8a93220d5f53b93d9624caf73cdc8a2 Mon Sep 17 00:00:00 2001 From: Fred Isaman Date: Thu, 6 Jan 2011 11:36:22 +0000 Subject: pnfs: change layout state seqlock to a spinlock This prepares for future changes, where the layout state needs to change atomically with several other variables. In particular, it will need to know if lo->segs is empty, as we test that instead of manipulating the NFS_LAYOUT_STATEID_SET bit. Moreover, the layoutstateid is not really a read-mostly structure, as it is written almost as often as it is read. The behavior of pnfs_get_layout_stateid is also slightly changed, so that it no longer changes the stateid. Its name is changed to +pnfs_choose_layoutget_stateid. Signed-off-by: Fred Isaman Signed-off-by: Benny Halevy Signed-off-by: Trond Myklebust --- fs/nfs/nfs4xdr.c | 2 +- fs/nfs/pnfs.c | 79 +++++++++++++++++--------------------------------------- fs/nfs/pnfs.h | 7 +++-- 3 files changed, 27 insertions(+), 61 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index f3f99156bfcb..4e28242360d6 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -1798,7 +1798,7 @@ encode_layoutget(struct xdr_stream *xdr, p = xdr_encode_hyper(p, args->range.offset); p = xdr_encode_hyper(p, args->range.length); p = xdr_encode_hyper(p, args->minlength); - pnfs_get_layout_stateid(&stateid, NFS_I(args->inode)->layout, + pnfs_choose_layoutget_stateid(&stateid, NFS_I(args->inode)->layout, args->ctx->state); p = xdr_encode_opaque_fixed(p, &stateid.data, NFS4_STATEID_SIZE); *p = cpu_to_be32(args->maxcount); diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 6736f9e4f2e1..08313f536b45 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -258,9 +258,6 @@ pnfs_clear_lseg_list(struct pnfs_layout_hdr *lo, struct list_head *tmp_list) /* List does not take a reference, so no need for put here */ list_del_init(&lo->plh_layouts); spin_unlock(&clp->cl_lock); - write_seqlock(&lo->plh_seqlock); - clear_bit(NFS_LAYOUT_STATEID_SET, &lo->plh_flags); - write_sequnlock(&lo->plh_seqlock); dprintk("%s:Return\n", __func__); } @@ -319,69 +316,40 @@ pnfs_destroy_all_layouts(struct nfs_client *clp) } } -/* update lo->plh_stateid with new if is more recent - * - * lo->plh_stateid could be the open stateid, in which case we just use what given. - */ +/* update lo->plh_stateid with new if is more recent */ static void pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new) { - nfs4_stateid *old = &lo->plh_stateid; - bool overwrite = false; - - write_seqlock(&lo->plh_seqlock); - if (!test_bit(NFS_LAYOUT_STATEID_SET, &lo->plh_flags) || - memcmp(old->stateid.other, new->stateid.other, sizeof(new->stateid.other))) - overwrite = true; - else { - u32 oldseq, newseq; - - oldseq = be32_to_cpu(old->stateid.seqid); - newseq = be32_to_cpu(new->stateid.seqid); - if ((int)(newseq - oldseq) > 0) - overwrite = true; - } - if (overwrite) - memcpy(&old->stateid, &new->stateid, sizeof(new->stateid)); - write_sequnlock(&lo->plh_seqlock); -} - -static void -pnfs_layout_from_open_stateid(struct pnfs_layout_hdr *lo, - struct nfs4_state *state) -{ - int seq; + u32 oldseq, newseq; - dprintk("--> %s\n", __func__); - write_seqlock(&lo->plh_seqlock); - do { - seq = read_seqbegin(&state->seqlock); - memcpy(lo->plh_stateid.data, state->stateid.data, - sizeof(state->stateid.data)); - } while (read_seqretry(&state->seqlock, seq)); - set_bit(NFS_LAYOUT_STATEID_SET, &lo->plh_flags); - write_sequnlock(&lo->plh_seqlock); - dprintk("<-- %s\n", __func__); + oldseq = be32_to_cpu(lo->plh_stateid.stateid.seqid); + newseq = be32_to_cpu(new->stateid.seqid); + if ((int)(newseq - oldseq) > 0) + memcpy(&lo->plh_stateid, &new->stateid, sizeof(new->stateid)); } -void -pnfs_get_layout_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo, - struct nfs4_state *open_state) +int +pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo, + struct nfs4_state *open_state) { - int seq; + int status = 0; dprintk("--> %s\n", __func__); - do { - seq = read_seqbegin(&lo->plh_seqlock); - if (!test_bit(NFS_LAYOUT_STATEID_SET, &lo->plh_flags)) { - /* This will trigger retry of the read */ - pnfs_layout_from_open_stateid(lo, open_state); - } else - memcpy(dst->data, lo->plh_stateid.data, - sizeof(lo->plh_stateid.data)); - } while (read_seqretry(&lo->plh_seqlock, seq)); + spin_lock(&lo->plh_inode->i_lock); + if (list_empty(&lo->plh_segs)) { + int seq; + + do { + seq = read_seqbegin(&open_state->seqlock); + memcpy(dst->data, open_state->stateid.data, + sizeof(open_state->stateid.data)); + } while (read_seqretry(&open_state->seqlock, seq)); + } else + memcpy(dst->data, lo->plh_stateid.data, sizeof(lo->plh_stateid.data)); + spin_unlock(&lo->plh_inode->i_lock); dprintk("<-- %s\n", __func__); + return status; } /* @@ -496,7 +464,6 @@ alloc_init_layout_hdr(struct inode *ino) lo->plh_refcount = 1; INIT_LIST_HEAD(&lo->plh_layouts); INIT_LIST_HEAD(&lo->plh_segs); - seqlock_init(&lo->plh_seqlock); lo->plh_inode = ino; return lo; } diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index c2f108640fc4..10937203d236 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -44,7 +44,6 @@ struct pnfs_layout_segment { enum { NFS_LAYOUT_RO_FAILED = 0, /* get ro layout failed stop trying */ NFS_LAYOUT_RW_FAILED, /* get rw layout failed stop trying */ - NFS_LAYOUT_STATEID_SET, /* have a valid layout stateid */ }; /* Per-layout driver specific registration structure */ @@ -63,7 +62,6 @@ struct pnfs_layout_hdr { unsigned long plh_refcount; struct list_head plh_layouts; /* other client layouts */ struct list_head plh_segs; /* layout segments list */ - seqlock_t plh_seqlock; /* Protects the stateid */ nfs4_stateid plh_stateid; unsigned long plh_flags; struct inode *plh_inode; @@ -143,8 +141,9 @@ int pnfs_layout_process(struct nfs4_layoutget *lgp); void pnfs_destroy_layout(struct nfs_inode *); void pnfs_destroy_all_layouts(struct nfs_client *); void put_layout_hdr(struct inode *inode); -void pnfs_get_layout_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo, - struct nfs4_state *open_state); +int pnfs_choose_layoutget_stateid(nfs4_stateid *dst, + struct pnfs_layout_hdr *lo, + struct nfs4_state *open_state); static inline int lo_fail_bit(u32 iomode) -- cgit v1.2.2 From 4541d16c024ce40a0781e03c185ecdfe34aec46f Mon Sep 17 00:00:00 2001 From: Fred Isaman Date: Thu, 6 Jan 2011 11:36:23 +0000 Subject: pnfs: change how lsegs are removed from layout list This is to prepare the way for sensible io draining. Instead of just removing the lseg from the list, we instead clear the VALID flag (preventing new io from grabbing references to the lseg) and remove the reference holding it in the list. Thus the lseg will be removed once any io in progress completes and any references still held are dropped. Signed-off-by: Fred Isaman Signed-off-by: Trond Myklebust --- fs/nfs/inode.c | 2 +- fs/nfs/pnfs.c | 130 ++++++++++++++++++++++++++++++++++++++------------------- fs/nfs/pnfs.h | 8 +++- 3 files changed, 96 insertions(+), 44 deletions(-) (limited to 'fs') diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index c7782b278e8b..790b786e1ae1 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -1410,9 +1410,9 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) */ void nfs4_evict_inode(struct inode *inode) { + pnfs_destroy_layout(NFS_I(inode)); truncate_inode_pages(&inode->i_data, 0); end_writeback(inode); - pnfs_destroy_layout(NFS_I(inode)); /* If we are holding a delegation, return it! */ nfs_inode_return_delegation_noreclaim(inode); /* First call standard NFS clear_inode() code */ diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 08313f536b45..212cbc22c59d 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -211,68 +211,109 @@ static void init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg) { INIT_LIST_HEAD(&lseg->pls_list); - kref_init(&lseg->pls_refcount); + atomic_set(&lseg->pls_refcount, 1); + smp_mb(); + set_bit(NFS_LSEG_VALID, &lseg->pls_flags); lseg->pls_layout = lo; } -/* Called without i_lock held, as the free_lseg call may sleep */ -static void -destroy_lseg(struct kref *kref) +static void free_lseg(struct pnfs_layout_segment *lseg) { - struct pnfs_layout_segment *lseg = - container_of(kref, struct pnfs_layout_segment, pls_refcount); struct inode *ino = lseg->pls_layout->plh_inode; - dprintk("--> %s\n", __func__); NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg); /* Matched by get_layout_hdr in pnfs_insert_layout */ put_layout_hdr(ino); } -static void -put_lseg(struct pnfs_layout_segment *lseg) +/* The use of tmp_list is necessary because pnfs_curr_ld->free_lseg + * could sleep, so must be called outside of the lock. + * Returns 1 if object was removed, otherwise return 0. + */ +static int +put_lseg_locked(struct pnfs_layout_segment *lseg, + struct list_head *tmp_list) { - if (!lseg) - return; + dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg, + atomic_read(&lseg->pls_refcount), + test_bit(NFS_LSEG_VALID, &lseg->pls_flags)); + if (atomic_dec_and_test(&lseg->pls_refcount)) { + struct inode *ino = lseg->pls_layout->plh_inode; - dprintk("%s: lseg %p ref %d\n", __func__, lseg, - atomic_read(&lseg->pls_refcount.refcount)); - kref_put(&lseg->pls_refcount, destroy_lseg); + BUG_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags)); + list_del(&lseg->pls_list); + if (list_empty(&lseg->pls_layout->plh_segs)) { + struct nfs_client *clp; + + clp = NFS_SERVER(ino)->nfs_client; + spin_lock(&clp->cl_lock); + /* List does not take a reference, so no need for put here */ + list_del_init(&lseg->pls_layout->plh_layouts); + spin_unlock(&clp->cl_lock); + } + list_add(&lseg->pls_list, tmp_list); + return 1; + } + return 0; } -static void -pnfs_clear_lseg_list(struct pnfs_layout_hdr *lo, struct list_head *tmp_list) +static bool +should_free_lseg(u32 lseg_iomode, u32 recall_iomode) { - struct pnfs_layout_segment *lseg, *next; - struct nfs_client *clp; + return (recall_iomode == IOMODE_ANY || + lseg_iomode == recall_iomode); +} - dprintk("%s:Begin lo %p\n", __func__, lo); +/* Returns 1 if lseg is removed from list, 0 otherwise */ +static int mark_lseg_invalid(struct pnfs_layout_segment *lseg, + struct list_head *tmp_list) +{ + int rv = 0; - assert_spin_locked(&lo->plh_inode->i_lock); - list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list) { - dprintk("%s: freeing lseg %p\n", __func__, lseg); - list_move(&lseg->pls_list, tmp_list); + if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags)) { + /* Remove the reference keeping the lseg in the + * list. It will now be removed when all + * outstanding io is finished. + */ + rv = put_lseg_locked(lseg, tmp_list); } - clp = NFS_SERVER(lo->plh_inode)->nfs_client; - spin_lock(&clp->cl_lock); - /* List does not take a reference, so no need for put here */ - list_del_init(&lo->plh_layouts); - spin_unlock(&clp->cl_lock); + return rv; +} - dprintk("%s:Return\n", __func__); +/* Returns count of number of matching invalid lsegs remaining in list + * after call. + */ +static int +mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo, + struct list_head *tmp_list, + u32 iomode) +{ + struct pnfs_layout_segment *lseg, *next; + int invalid = 0, removed = 0; + + dprintk("%s:Begin lo %p\n", __func__, lo); + + list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list) + if (should_free_lseg(lseg->pls_range.iomode, iomode)) { + dprintk("%s: freeing lseg %p iomode %d " + "offset %llu length %llu\n", __func__, + lseg, lseg->pls_range.iomode, lseg->pls_range.offset, + lseg->pls_range.length); + invalid++; + removed += mark_lseg_invalid(lseg, tmp_list); + } + dprintk("%s:Return %i\n", __func__, invalid - removed); + return invalid - removed; } static void -pnfs_free_lseg_list(struct list_head *tmp_list) +pnfs_free_lseg_list(struct list_head *free_me) { - struct pnfs_layout_segment *lseg; + struct pnfs_layout_segment *lseg, *tmp; - while (!list_empty(tmp_list)) { - lseg = list_entry(tmp_list->next, struct pnfs_layout_segment, - pls_list); - dprintk("%s calling put_lseg on %p\n", __func__, lseg); + list_for_each_entry_safe(lseg, tmp, free_me, pls_list) { list_del(&lseg->pls_list); - put_lseg(lseg); + free_lseg(lseg); } } @@ -285,7 +326,8 @@ pnfs_destroy_layout(struct nfs_inode *nfsi) spin_lock(&nfsi->vfs_inode.i_lock); lo = nfsi->layout; if (lo) { - pnfs_clear_lseg_list(lo, &tmp_list); + set_bit(NFS_LAYOUT_DESTROYED, &nfsi->layout->plh_flags); + mark_matching_lsegs_invalid(lo, &tmp_list, IOMODE_ANY); /* Matched by refcount set to 1 in alloc_init_layout_hdr */ put_layout_hdr_locked(lo); } @@ -477,9 +519,12 @@ pnfs_find_alloc_layout(struct inode *ino) dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout); assert_spin_locked(&ino->i_lock); - if (nfsi->layout) - return nfsi->layout; - + if (nfsi->layout) { + if (test_bit(NFS_LAYOUT_DESTROYED, &nfsi->layout->plh_flags)) + return NULL; + else + return nfsi->layout; + } spin_unlock(&ino->i_lock); new = alloc_init_layout_hdr(ino); spin_lock(&ino->i_lock); @@ -520,7 +565,8 @@ pnfs_has_layout(struct pnfs_layout_hdr *lo, u32 iomode) assert_spin_locked(&lo->plh_inode->i_lock); list_for_each_entry(lseg, &lo->plh_segs, pls_list) { - if (is_matching_lseg(lseg, iomode)) { + if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) && + is_matching_lseg(lseg, iomode)) { ret = lseg; break; } @@ -529,7 +575,7 @@ pnfs_has_layout(struct pnfs_layout_hdr *lo, u32 iomode) } dprintk("%s:Return lseg %p ref %d\n", - __func__, ret, ret ? atomic_read(&ret->pls_refcount.refcount) : 0); + __func__, ret, ret ? atomic_read(&ret->pls_refcount) : 0); return ret; } diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 10937203d236..787253e6fca3 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -30,10 +30,15 @@ #ifndef FS_NFS_PNFS_H #define FS_NFS_PNFS_H +enum { + NFS_LSEG_VALID = 0, /* cleared when lseg is recalled/returned */ +}; + struct pnfs_layout_segment { struct list_head pls_list; struct pnfs_layout_range pls_range; - struct kref pls_refcount; + atomic_t pls_refcount; + unsigned long pls_flags; struct pnfs_layout_hdr *pls_layout; }; @@ -44,6 +49,7 @@ struct pnfs_layout_segment { enum { NFS_LAYOUT_RO_FAILED = 0, /* get ro layout failed stop trying */ NFS_LAYOUT_RW_FAILED, /* get rw layout failed stop trying */ + NFS_LAYOUT_DESTROYED, /* no new use of layout allowed */ }; /* Per-layout driver specific registration structure */ -- cgit v1.2.2 From c31663d4a1fac5ce1954d656cbcf80eb883b814a Mon Sep 17 00:00:00 2001 From: Fred Isaman Date: Thu, 6 Jan 2011 11:36:24 +0000 Subject: pnfs: layoutget rpc code cleanup No functional changes, just some code minor code rearrangement and comments. Signed-off-by: Fred Isaman Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 28e175e74de2..5bee453d36d6 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -5293,10 +5293,14 @@ static void nfs4_layoutget_prepare(struct rpc_task *task, void *calldata) { struct nfs4_layoutget *lgp = calldata; - struct inode *ino = lgp->args.inode; - struct nfs_server *server = NFS_SERVER(ino); + struct nfs_server *server = NFS_SERVER(lgp->args.inode); dprintk("--> %s\n", __func__); + /* Note the is a race here, where a CB_LAYOUTRECALL can come in + * right now covering the LAYOUTGET we are about to send. + * However, that is not so catastrophic, and there seems + * to be no way to prevent it completely. + */ if (nfs4_setup_sequence(server, &lgp->args.seq_args, &lgp->res.seq_res, 0, task)) return; @@ -5379,13 +5383,10 @@ int nfs4_proc_layoutget(struct nfs4_layoutget *lgp) if (IS_ERR(task)) return PTR_ERR(task); status = nfs4_wait_for_completion_rpc_task(task); - if (status != 0) - goto out; - status = task->tk_status; - if (status != 0) - goto out; - status = pnfs_layout_process(lgp); -out: + if (status == 0) + status = task->tk_status; + if (status == 0) + status = pnfs_layout_process(lgp); rpc_put_task(task); dprintk("<-- %s status=%d\n", __func__, status); return status; -- cgit v1.2.2 From cf7d63f1f9895713551df2e6d18b006f8af26e91 Mon Sep 17 00:00:00 2001 From: Fred Isaman Date: Thu, 6 Jan 2011 11:36:25 +0000 Subject: pnfs: serialize LAYOUTGET(openstateid) We shouldn't send a LAYOUTGET(openstateid) unless all outstanding RPCs using the previous stateid are completed. This requires choosing the stateid to encode earlier, so we can abort if one is not available (we want to use the open stateid, but a LAYOUTGET is already out using it), and adding a count of the number of outstanding rpc calls using layout state (which for now consist solely of LAYOUTGETs). Signed-off-by: Fred Isaman Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 7 ++++++- fs/nfs/nfs4xdr.c | 5 +---- fs/nfs/pnfs.c | 24 +++++++++++++++++++----- fs/nfs/pnfs.h | 1 + 4 files changed, 27 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 5bee453d36d6..a3549ce72ab2 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -5304,6 +5304,12 @@ nfs4_layoutget_prepare(struct rpc_task *task, void *calldata) if (nfs4_setup_sequence(server, &lgp->args.seq_args, &lgp->res.seq_res, 0, task)) return; + if (pnfs_choose_layoutget_stateid(&lgp->args.stateid, + NFS_I(lgp->args.inode)->layout, + lgp->args.ctx->state)) { + rpc_exit(task, NFS4_OK); + return; + } rpc_call_start(task); } @@ -5338,7 +5344,6 @@ static void nfs4_layoutget_release(void *calldata) struct nfs4_layoutget *lgp = calldata; dprintk("--> %s\n", __func__); - put_layout_hdr(lgp->args.inode); if (lgp->res.layout.buf != NULL) free_page((unsigned long) lgp->res.layout.buf); put_nfs_open_context(lgp->args.ctx); diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 4e28242360d6..3cbdd0c80a2d 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -1787,7 +1787,6 @@ encode_layoutget(struct xdr_stream *xdr, const struct nfs4_layoutget_args *args, struct compound_hdr *hdr) { - nfs4_stateid stateid; __be32 *p; p = reserve_space(xdr, 44 + NFS4_STATEID_SIZE); @@ -1798,9 +1797,7 @@ encode_layoutget(struct xdr_stream *xdr, p = xdr_encode_hyper(p, args->range.offset); p = xdr_encode_hyper(p, args->range.length); p = xdr_encode_hyper(p, args->minlength); - pnfs_choose_layoutget_stateid(&stateid, NFS_I(args->inode)->layout, - args->ctx->state); - p = xdr_encode_opaque_fixed(p, &stateid.data, NFS4_STATEID_SIZE); + p = xdr_encode_opaque_fixed(p, &args->stateid.data, NFS4_STATEID_SIZE); *p = cpu_to_be32(args->maxcount); dprintk("%s: 1st type:0x%x iomode:%d off:%lu len:%lu mc:%d\n", diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 212cbc22c59d..59ed68bf79fa 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -371,6 +371,14 @@ pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, memcpy(&lo->plh_stateid, &new->stateid, sizeof(new->stateid)); } +/* lget is set to 1 if called from inside send_layoutget call chain */ +static bool +pnfs_layoutgets_blocked(struct pnfs_layout_hdr *lo, int lget) +{ + return (list_empty(&lo->plh_segs) && + (atomic_read(&lo->plh_outstanding) > lget)); +} + int pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo, struct nfs4_state *open_state) @@ -379,7 +387,9 @@ pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo, dprintk("--> %s\n", __func__); spin_lock(&lo->plh_inode->i_lock); - if (list_empty(&lo->plh_segs)) { + if (pnfs_layoutgets_blocked(lo, 1)) { + status = -EAGAIN; + } else if (list_empty(&lo->plh_segs)) { int seq; do { @@ -414,10 +424,8 @@ send_layoutget(struct pnfs_layout_hdr *lo, BUG_ON(ctx == NULL); lgp = kzalloc(sizeof(*lgp), GFP_KERNEL); - if (lgp == NULL) { - put_layout_hdr(lo->plh_inode); + if (lgp == NULL) return NULL; - } lgp->args.minlength = NFS4_MAX_UINT64; lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE; lgp->args.range.iomode = iomode; @@ -613,10 +621,16 @@ pnfs_update_layout(struct inode *ino, if (test_bit(lo_fail_bit(iomode), &nfsi->layout->plh_flags)) goto out_unlock; - get_layout_hdr_locked(lo); /* Matched in nfs4_layoutget_release */ + if (pnfs_layoutgets_blocked(lo, 0)) + goto out_unlock; + atomic_inc(&lo->plh_outstanding); + + get_layout_hdr_locked(lo); spin_unlock(&ino->i_lock); lseg = send_layoutget(lo, ctx, iomode); + atomic_dec(&lo->plh_outstanding); + put_layout_hdr(ino); out: dprintk("%s end, state 0x%lx lseg %p\n", __func__, nfsi->layout->plh_flags, lseg); diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 787253e6fca3..698380da24cc 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -69,6 +69,7 @@ struct pnfs_layout_hdr { struct list_head plh_layouts; /* other client layouts */ struct list_head plh_segs; /* layout segments list */ nfs4_stateid plh_stateid; + atomic_t plh_outstanding; /* number of RPCs out */ unsigned long plh_flags; struct inode *plh_inode; }; -- cgit v1.2.2 From 2130ff663633e8a57921779ebfe62fc39d5585ec Mon Sep 17 00:00:00 2001 From: Fred Isaman Date: Thu, 6 Jan 2011 11:36:26 +0000 Subject: pnfs: add layout to client list before sending rpc Since this list will be used to search for layouts to recall, this is necessary to avoid a race where the recall comes in, sees there is nothing in the client list, and prepares to return NOMATCHING, while the LAYOUTGET gets processed before the recall updates the stateid. Signed-off-by: Fred Isaman Signed-off-by: Trond Myklebust --- fs/nfs/pnfs.c | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 59ed68bf79fa..c00b673261f9 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -469,14 +469,6 @@ pnfs_insert_layout(struct pnfs_layout_hdr *lo, dprintk("%s:Begin\n", __func__); assert_spin_locked(&lo->plh_inode->i_lock); - if (list_empty(&lo->plh_segs)) { - struct nfs_client *clp = NFS_SERVER(lo->plh_inode)->nfs_client; - - spin_lock(&clp->cl_lock); - BUG_ON(!list_empty(&lo->plh_layouts)); - list_add_tail(&lo->plh_layouts, &clp->cl_layouts); - spin_unlock(&clp->cl_lock); - } list_for_each_entry(lp, &lo->plh_segs, pls_list) { if (cmp_layout(lp->pls_range.iomode, lseg->pls_range.iomode) > 0) continue; @@ -597,6 +589,7 @@ pnfs_update_layout(struct inode *ino, enum pnfs_iomode iomode) { struct nfs_inode *nfsi = NFS_I(ino); + struct nfs_client *clp = NFS_SERVER(ino)->nfs_client; struct pnfs_layout_hdr *lo; struct pnfs_layout_segment *lseg = NULL; @@ -626,9 +619,27 @@ pnfs_update_layout(struct inode *ino, atomic_inc(&lo->plh_outstanding); get_layout_hdr_locked(lo); + if (list_empty(&lo->plh_segs)) { + /* The lo must be on the clp list if there is any + * chance of a CB_LAYOUTRECALL(FILE) coming in. + */ + spin_lock(&clp->cl_lock); + BUG_ON(!list_empty(&lo->plh_layouts)); + list_add_tail(&lo->plh_layouts, &clp->cl_layouts); + spin_unlock(&clp->cl_lock); + } spin_unlock(&ino->i_lock); lseg = send_layoutget(lo, ctx, iomode); + if (!lseg) { + spin_lock(&ino->i_lock); + if (list_empty(&lo->plh_segs)) { + spin_lock(&clp->cl_lock); + list_del_init(&lo->plh_layouts); + spin_unlock(&clp->cl_lock); + } + spin_unlock(&ino->i_lock); + } atomic_dec(&lo->plh_outstanding); put_layout_hdr(ino); out: -- cgit v1.2.2 From fc1794c5b04f5322bad05385cd91b52ec85aab72 Mon Sep 17 00:00:00 2001 From: Fred Isaman Date: Thu, 6 Jan 2011 11:36:27 +0000 Subject: pnfs: check that partial LAYOUTGET return is ignored Either a bad server reply, or our ignoring of multiple array segments in a reply, can cause a reply to not meet our requirements. Ensure that we ignore such replies. Signed-off-by: Fred Isaman Signed-off-by: Trond Myklebust --- fs/nfs/pnfs.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'fs') diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index c00b673261f9..cd9906415a14 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -660,6 +660,17 @@ pnfs_layout_process(struct nfs4_layoutget *lgp) struct inode *ino = lo->plh_inode; int status = 0; + /* Verify we got what we asked for. + * Note that because the xdr parsing only accepts a single + * element array, this can fail even if the server is behaving + * correctly. + */ + if (lgp->args.range.iomode > res->range.iomode || + res->range.offset != 0 || + res->range.length != NFS4_MAX_UINT64) { + status = -EINVAL; + goto out; + } /* Inject layout blob into I/O device driver */ lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res); if (!lseg || IS_ERR(lseg)) { -- cgit v1.2.2 From cc6e5340b0981feac5a00a992bab6154cb4b1fa1 Mon Sep 17 00:00:00 2001 From: Fred Isaman Date: Thu, 6 Jan 2011 11:36:28 +0000 Subject: pnfs: change lo refcounting to atomic_t This will be required to allow us to grab reference outside of i_lock. While we are at it, make put_layout_hdr take the same argument as all the related functions. Signed-off-by: Fred Isaman Signed-off-by: Trond Myklebust --- fs/nfs/pnfs.c | 48 ++++++++++++++++++++++++++---------------------- fs/nfs/pnfs.h | 4 ++-- 2 files changed, 28 insertions(+), 24 deletions(-) (limited to 'fs') diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index cd9906415a14..32b66468e5db 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -177,34 +177,38 @@ EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver); * pNFS client layout cache */ +/* Need to hold i_lock if caller does not already hold reference */ static void -get_layout_hdr_locked(struct pnfs_layout_hdr *lo) +get_layout_hdr(struct pnfs_layout_hdr *lo) { - assert_spin_locked(&lo->plh_inode->i_lock); - lo->plh_refcount++; + atomic_inc(&lo->plh_refcount); } static void -put_layout_hdr_locked(struct pnfs_layout_hdr *lo) +destroy_layout_hdr(struct pnfs_layout_hdr *lo) { - assert_spin_locked(&lo->plh_inode->i_lock); - BUG_ON(lo->plh_refcount == 0); + dprintk("%s: freeing layout cache %p\n", __func__, lo); + BUG_ON(!list_empty(&lo->plh_layouts)); + NFS_I(lo->plh_inode)->layout = NULL; + kfree(lo); +} - lo->plh_refcount--; - if (!lo->plh_refcount) { - dprintk("%s: freeing layout cache %p\n", __func__, lo); - BUG_ON(!list_empty(&lo->plh_layouts)); - NFS_I(lo->plh_inode)->layout = NULL; - kfree(lo); - } +static void +put_layout_hdr_locked(struct pnfs_layout_hdr *lo) +{ + if (atomic_dec_and_test(&lo->plh_refcount)) + destroy_layout_hdr(lo); } void -put_layout_hdr(struct inode *inode) +put_layout_hdr(struct pnfs_layout_hdr *lo) { - spin_lock(&inode->i_lock); - put_layout_hdr_locked(NFS_I(inode)->layout); - spin_unlock(&inode->i_lock); + struct inode *inode = lo->plh_inode; + + if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) { + destroy_layout_hdr(lo); + spin_unlock(&inode->i_lock); + } } static void @@ -223,7 +227,7 @@ static void free_lseg(struct pnfs_layout_segment *lseg) NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg); /* Matched by get_layout_hdr in pnfs_insert_layout */ - put_layout_hdr(ino); + put_layout_hdr(NFS_I(ino)->layout); } /* The use of tmp_list is necessary because pnfs_curr_ld->free_lseg @@ -490,7 +494,7 @@ pnfs_insert_layout(struct pnfs_layout_hdr *lo, __func__, lseg, lseg->pls_range.iomode, lseg->pls_range.offset, lseg->pls_range.length); } - get_layout_hdr_locked(lo); + get_layout_hdr(lo); dprintk("%s:Return\n", __func__); } @@ -503,7 +507,7 @@ alloc_init_layout_hdr(struct inode *ino) lo = kzalloc(sizeof(struct pnfs_layout_hdr), GFP_KERNEL); if (!lo) return NULL; - lo->plh_refcount = 1; + atomic_set(&lo->plh_refcount, 1); INIT_LIST_HEAD(&lo->plh_layouts); INIT_LIST_HEAD(&lo->plh_segs); lo->plh_inode = ino; @@ -618,7 +622,7 @@ pnfs_update_layout(struct inode *ino, goto out_unlock; atomic_inc(&lo->plh_outstanding); - get_layout_hdr_locked(lo); + get_layout_hdr(lo); if (list_empty(&lo->plh_segs)) { /* The lo must be on the clp list if there is any * chance of a CB_LAYOUTRECALL(FILE) coming in. @@ -641,7 +645,7 @@ pnfs_update_layout(struct inode *ino, spin_unlock(&ino->i_lock); } atomic_dec(&lo->plh_outstanding); - put_layout_hdr(ino); + put_layout_hdr(lo); out: dprintk("%s end, state 0x%lx lseg %p\n", __func__, nfsi->layout->plh_flags, lseg); diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 698380da24cc..8aaab56b794f 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -65,7 +65,7 @@ struct pnfs_layoutdriver_type { }; struct pnfs_layout_hdr { - unsigned long plh_refcount; + atomic_t plh_refcount; struct list_head plh_layouts; /* other client layouts */ struct list_head plh_segs; /* layout segments list */ nfs4_stateid plh_stateid; @@ -147,7 +147,7 @@ void unset_pnfs_layoutdriver(struct nfs_server *); int pnfs_layout_process(struct nfs4_layoutget *lgp); void pnfs_destroy_layout(struct nfs_inode *); void pnfs_destroy_all_layouts(struct nfs_client *); -void put_layout_hdr(struct inode *inode); +void put_layout_hdr(struct pnfs_layout_hdr *lo); int pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo, struct nfs4_state *open_state); -- cgit v1.2.2 From f2a625616045fe46e1d5fceebdd825f5acdecdb7 Mon Sep 17 00:00:00 2001 From: Fred Isaman Date: Thu, 6 Jan 2011 11:36:29 +0000 Subject: pnfs: CB_LAYOUTRECALL xdr code This is the xdr decoding for CB_LAYOUTRECALL. Signed-off-by: Alexandros Batsakis Signed-off-by: Dean Hildebrand Signed-off-by: Marc Eshel Signed-off-by: Andy Adamson Signed-off-by: Benny Halevy Signed-off-by: Fred Isaman Signed-off-by: Trond Myklebust --- fs/nfs/callback.h | 19 ++++++++++++++ fs/nfs/callback_proc.c | 6 +++++ fs/nfs/callback_xdr.c | 69 +++++++++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 93 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/callback.h b/fs/nfs/callback.h index b678e3e15bd9..f6768ac09190 100644 --- a/fs/nfs/callback.h +++ b/fs/nfs/callback.h @@ -138,6 +138,25 @@ extern __be32 nfs4_callback_recallslot(struct cb_recallslotargs *args, void *dummy, struct cb_process_state *cps); +struct cb_layoutrecallargs { + struct sockaddr *cbl_addr; + uint32_t cbl_recall_type; + uint32_t cbl_layout_type; + uint32_t cbl_layoutchanged; + union { + struct { + struct nfs_fh cbl_fh; + struct pnfs_layout_range cbl_range; + nfs4_stateid cbl_stateid; + }; + struct nfs_fsid cbl_fsid; + }; +}; + +extern unsigned nfs4_callback_layoutrecall( + struct cb_layoutrecallargs *args, + void *dummy, struct cb_process_state *cps); + extern void nfs4_check_drain_bc_complete(struct nfs4_session *ses); extern void nfs4_cb_take_slot(struct nfs_client *clp); #endif /* CONFIG_NFS_V4_1 */ diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c index c1bead2f3e04..c1bb157e94bd 100644 --- a/fs/nfs/callback_proc.c +++ b/fs/nfs/callback_proc.c @@ -107,6 +107,12 @@ int nfs4_validate_delegation_stateid(struct nfs_delegation *delegation, const nf #if defined(CONFIG_NFS_V4_1) +__be32 nfs4_callback_layoutrecall(struct cb_layoutrecallargs *args, + void *dummy, struct cb_process_state *cps) +{ + return cpu_to_be32(NFS4ERR_NOTSUPP); /* STUB */ +} + int nfs41_validate_delegation_stateid(struct nfs_delegation *delegation, const nfs4_stateid *stateid) { if (delegation == NULL) diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c index 7a2d6c5864ca..23112c263f81 100644 --- a/fs/nfs/callback_xdr.c +++ b/fs/nfs/callback_xdr.c @@ -24,6 +24,7 @@ #define CB_OP_RECALL_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ) #if defined(CONFIG_NFS_V4_1) +#define CB_OP_LAYOUTRECALL_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ) #define CB_OP_SEQUENCE_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ + \ 4 + 1 + 3) #define CB_OP_RECALLANY_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ) @@ -223,6 +224,66 @@ out: #if defined(CONFIG_NFS_V4_1) +static __be32 decode_layoutrecall_args(struct svc_rqst *rqstp, + struct xdr_stream *xdr, + struct cb_layoutrecallargs *args) +{ + __be32 *p; + __be32 status = 0; + uint32_t iomode; + + args->cbl_addr = svc_addr(rqstp); + p = read_buf(xdr, 4 * sizeof(uint32_t)); + if (unlikely(p == NULL)) { + status = htonl(NFS4ERR_BADXDR); + goto out; + } + + args->cbl_layout_type = ntohl(*p++); + /* Depite the spec's xdr, iomode really belongs in the FILE switch, + * as it is unuseable and ignored with the other types. + */ + iomode = ntohl(*p++); + args->cbl_layoutchanged = ntohl(*p++); + args->cbl_recall_type = ntohl(*p++); + + if (args->cbl_recall_type == RETURN_FILE) { + args->cbl_range.iomode = iomode; + status = decode_fh(xdr, &args->cbl_fh); + if (unlikely(status != 0)) + goto out; + + p = read_buf(xdr, 2 * sizeof(uint64_t)); + if (unlikely(p == NULL)) { + status = htonl(NFS4ERR_BADXDR); + goto out; + } + p = xdr_decode_hyper(p, &args->cbl_range.offset); + p = xdr_decode_hyper(p, &args->cbl_range.length); + status = decode_stateid(xdr, &args->cbl_stateid); + if (unlikely(status != 0)) + goto out; + } else if (args->cbl_recall_type == RETURN_FSID) { + p = read_buf(xdr, 2 * sizeof(uint64_t)); + if (unlikely(p == NULL)) { + status = htonl(NFS4ERR_BADXDR); + goto out; + } + p = xdr_decode_hyper(p, &args->cbl_fsid.major); + p = xdr_decode_hyper(p, &args->cbl_fsid.minor); + } else if (args->cbl_recall_type != RETURN_ALL) { + status = htonl(NFS4ERR_BADXDR); + goto out; + } + dprintk("%s: ltype 0x%x iomode %d changed %d recall_type %d\n", + __func__, + args->cbl_layout_type, iomode, + args->cbl_layoutchanged, args->cbl_recall_type); +out: + dprintk("%s: exit with status = %d\n", __func__, ntohl(status)); + return status; +} + static __be32 decode_sessionid(struct xdr_stream *xdr, struct nfs4_sessionid *sid) { @@ -577,10 +638,10 @@ preprocess_nfs41_op(int nop, unsigned int op_nr, struct callback_op **op) case OP_CB_SEQUENCE: case OP_CB_RECALL_ANY: case OP_CB_RECALL_SLOT: + case OP_CB_LAYOUTRECALL: *op = &callback_ops[op_nr]; break; - case OP_CB_LAYOUTRECALL: case OP_CB_NOTIFY_DEVICEID: case OP_CB_NOTIFY: case OP_CB_PUSH_DELEG: @@ -783,6 +844,12 @@ static struct callback_op callback_ops[] = { .res_maxsize = CB_OP_RECALL_RES_MAXSZ, }, #if defined(CONFIG_NFS_V4_1) + [OP_CB_LAYOUTRECALL] = { + .process_op = (callback_process_op_t)nfs4_callback_layoutrecall, + .decode_args = + (callback_decode_arg_t)decode_layoutrecall_args, + .res_maxsize = CB_OP_LAYOUTRECALL_RES_MAXSZ, + }, [OP_CB_SEQUENCE] = { .process_op = (callback_process_op_t)nfs4_callback_sequence, .decode_args = (callback_decode_arg_t)decode_cb_sequence_args, -- cgit v1.2.2 From 43f1b3da8b35d706d6c47880fc211d2519b4a587 Mon Sep 17 00:00:00 2001 From: Fred Isaman Date: Thu, 6 Jan 2011 11:36:30 +0000 Subject: pnfs: add CB_LAYOUTRECALL handling This is the heart of the wave 2 submission. Add the code to trigger drain and forget of any afected layouts. In addition, we set a "barrier", below which any LAYOUTGET reply is ignored. This is to compensate for the fact that we do not wait for outstanding LAYOUTGETs to complete as per section 12.5.5.2.1 of RFC 5661. Signed-off-by: Fred Isaman Signed-off-by: Trond Myklebust --- fs/nfs/callback_proc.c | 119 ++++++++++++++++++++++++++++++++++++++++++++++++- fs/nfs/nfs4_fs.h | 1 + fs/nfs/pnfs.c | 83 ++++++++++++++++++++++++++-------- fs/nfs/pnfs.h | 11 +++++ 4 files changed, 194 insertions(+), 20 deletions(-) (limited to 'fs') diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c index c1bb157e94bd..6619c05b55a0 100644 --- a/fs/nfs/callback_proc.c +++ b/fs/nfs/callback_proc.c @@ -12,6 +12,7 @@ #include "callback.h" #include "delegation.h" #include "internal.h" +#include "pnfs.h" #ifdef NFS_DEBUG #define NFSDBG_FACILITY NFSDBG_CALLBACK @@ -107,10 +108,126 @@ int nfs4_validate_delegation_stateid(struct nfs_delegation *delegation, const nf #if defined(CONFIG_NFS_V4_1) +static u32 initiate_file_draining(struct nfs_client *clp, + struct cb_layoutrecallargs *args) +{ + struct pnfs_layout_hdr *lo; + struct inode *ino; + bool found = false; + u32 rv = NFS4ERR_NOMATCHING_LAYOUT; + LIST_HEAD(free_me_list); + + spin_lock(&clp->cl_lock); + list_for_each_entry(lo, &clp->cl_layouts, plh_layouts) { + if (nfs_compare_fh(&args->cbl_fh, + &NFS_I(lo->plh_inode)->fh)) + continue; + ino = igrab(lo->plh_inode); + if (!ino) + continue; + found = true; + /* Without this, layout can be freed as soon + * as we release cl_lock. + */ + get_layout_hdr(lo); + break; + } + spin_unlock(&clp->cl_lock); + if (!found) + return NFS4ERR_NOMATCHING_LAYOUT; + + spin_lock(&ino->i_lock); + if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) || + mark_matching_lsegs_invalid(lo, &free_me_list, + args->cbl_range.iomode)) + rv = NFS4ERR_DELAY; + else + rv = NFS4ERR_NOMATCHING_LAYOUT; + pnfs_set_layout_stateid(lo, &args->cbl_stateid, true); + spin_unlock(&ino->i_lock); + pnfs_free_lseg_list(&free_me_list); + put_layout_hdr(lo); + iput(ino); + return rv; +} + +static u32 initiate_bulk_draining(struct nfs_client *clp, + struct cb_layoutrecallargs *args) +{ + struct pnfs_layout_hdr *lo; + struct inode *ino; + u32 rv = NFS4ERR_NOMATCHING_LAYOUT; + struct pnfs_layout_hdr *tmp; + LIST_HEAD(recall_list); + LIST_HEAD(free_me_list); + struct pnfs_layout_range range = { + .iomode = IOMODE_ANY, + .offset = 0, + .length = NFS4_MAX_UINT64, + }; + + spin_lock(&clp->cl_lock); + list_for_each_entry(lo, &clp->cl_layouts, plh_layouts) { + if ((args->cbl_recall_type == RETURN_FSID) && + memcmp(&NFS_SERVER(lo->plh_inode)->fsid, + &args->cbl_fsid, sizeof(struct nfs_fsid))) + continue; + if (!igrab(lo->plh_inode)) + continue; + get_layout_hdr(lo); + BUG_ON(!list_empty(&lo->plh_bulk_recall)); + list_add(&lo->plh_bulk_recall, &recall_list); + } + spin_unlock(&clp->cl_lock); + list_for_each_entry_safe(lo, tmp, + &recall_list, plh_bulk_recall) { + ino = lo->plh_inode; + spin_lock(&ino->i_lock); + set_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags); + if (mark_matching_lsegs_invalid(lo, &free_me_list, range.iomode)) + rv = NFS4ERR_DELAY; + list_del_init(&lo->plh_bulk_recall); + spin_unlock(&ino->i_lock); + put_layout_hdr(lo); + iput(ino); + } + pnfs_free_lseg_list(&free_me_list); + return rv; +} + +static u32 do_callback_layoutrecall(struct nfs_client *clp, + struct cb_layoutrecallargs *args) +{ + u32 res = NFS4ERR_DELAY; + + dprintk("%s enter, type=%i\n", __func__, args->cbl_recall_type); + if (test_and_set_bit(NFS4CLNT_LAYOUTRECALL, &clp->cl_state)) + goto out; + if (args->cbl_recall_type == RETURN_FILE) + res = initiate_file_draining(clp, args); + else + res = initiate_bulk_draining(clp, args); + clear_bit(NFS4CLNT_LAYOUTRECALL, &clp->cl_state); +out: + dprintk("%s returning %i\n", __func__, res); + return res; + +} + __be32 nfs4_callback_layoutrecall(struct cb_layoutrecallargs *args, void *dummy, struct cb_process_state *cps) { - return cpu_to_be32(NFS4ERR_NOTSUPP); /* STUB */ + u32 res; + + dprintk("%s: -->\n", __func__); + + if (cps->clp) + res = do_callback_layoutrecall(cps->clp, args); + else + res = NFS4ERR_OP_NOT_IN_SESSION; + + dprintk("%s: exit with status = %d\n", __func__, res); + return cpu_to_be32(res); } int nfs41_validate_delegation_stateid(struct nfs_delegation *delegation, const nfs4_stateid *stateid) diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 3b3829c3098f..8f169dc789db 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -44,6 +44,7 @@ enum nfs4_client_state { NFS4CLNT_RECLAIM_REBOOT, NFS4CLNT_RECLAIM_NOGRACE, NFS4CLNT_DELEGRETURN, + NFS4CLNT_LAYOUTRECALL, NFS4CLNT_SESSION_RESET, NFS4CLNT_RECALL_SLOT, }; diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 32b66468e5db..bf4186b8f2fc 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -178,7 +178,7 @@ EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver); */ /* Need to hold i_lock if caller does not already hold reference */ -static void +void get_layout_hdr(struct pnfs_layout_hdr *lo) { atomic_inc(&lo->plh_refcount); @@ -254,6 +254,7 @@ put_lseg_locked(struct pnfs_layout_segment *lseg, /* List does not take a reference, so no need for put here */ list_del_init(&lseg->pls_layout->plh_layouts); spin_unlock(&clp->cl_lock); + clear_bit(NFS_LAYOUT_BULK_RECALL, &lseg->pls_layout->plh_flags); } list_add(&lseg->pls_list, tmp_list); return 1; @@ -287,7 +288,7 @@ static int mark_lseg_invalid(struct pnfs_layout_segment *lseg, /* Returns count of number of matching invalid lsegs remaining in list * after call. */ -static int +int mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo, struct list_head *tmp_list, u32 iomode) @@ -310,7 +311,7 @@ mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo, return invalid - removed; } -static void +void pnfs_free_lseg_list(struct list_head *free_me) { struct pnfs_layout_segment *lseg, *tmp; @@ -363,23 +364,45 @@ pnfs_destroy_all_layouts(struct nfs_client *clp) } /* update lo->plh_stateid with new if is more recent */ -static void -pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, - const nfs4_stateid *new) +void +pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new, + bool update_barrier) { u32 oldseq, newseq; oldseq = be32_to_cpu(lo->plh_stateid.stateid.seqid); newseq = be32_to_cpu(new->stateid.seqid); - if ((int)(newseq - oldseq) > 0) + if ((int)(newseq - oldseq) > 0) { memcpy(&lo->plh_stateid, &new->stateid, sizeof(new->stateid)); + if (update_barrier) { + u32 new_barrier = be32_to_cpu(new->stateid.seqid); + + if ((int)(new_barrier - lo->plh_barrier)) + lo->plh_barrier = new_barrier; + } else { + /* Because of wraparound, we want to keep the barrier + * "close" to the current seqids. It needs to be + * within 2**31 to count as "behind", so if it + * gets too near that limit, give us a litle leeway + * and bring it to within 2**30. + * NOTE - and yes, this is all unsigned arithmetic. + */ + if (unlikely((newseq - lo->plh_barrier) > (3 << 29))) + lo->plh_barrier = newseq - (1 << 30); + } + } } /* lget is set to 1 if called from inside send_layoutget call chain */ static bool -pnfs_layoutgets_blocked(struct pnfs_layout_hdr *lo, int lget) -{ - return (list_empty(&lo->plh_segs) && +pnfs_layoutgets_blocked(struct pnfs_layout_hdr *lo, nfs4_stateid *stateid, + int lget) +{ + if ((stateid) && + (int)(lo->plh_barrier - be32_to_cpu(stateid->stateid.seqid)) >= 0) + return true; + return test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) || + (list_empty(&lo->plh_segs) && (atomic_read(&lo->plh_outstanding) > lget)); } @@ -391,7 +414,7 @@ pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo, dprintk("--> %s\n", __func__); spin_lock(&lo->plh_inode->i_lock); - if (pnfs_layoutgets_blocked(lo, 1)) { + if (pnfs_layoutgets_blocked(lo, NULL, 1)) { status = -EAGAIN; } else if (list_empty(&lo->plh_segs)) { int seq; @@ -510,6 +533,7 @@ alloc_init_layout_hdr(struct inode *ino) atomic_set(&lo->plh_refcount, 1); INIT_LIST_HEAD(&lo->plh_layouts); INIT_LIST_HEAD(&lo->plh_segs); + INIT_LIST_HEAD(&lo->plh_bulk_recall); lo->plh_inode = ino; return lo; } @@ -561,7 +585,7 @@ is_matching_lseg(struct pnfs_layout_segment *lseg, u32 iomode) * lookup range in layout */ static struct pnfs_layout_segment * -pnfs_has_layout(struct pnfs_layout_hdr *lo, u32 iomode) +pnfs_find_lseg(struct pnfs_layout_hdr *lo, u32 iomode) { struct pnfs_layout_segment *lseg, *ret = NULL; @@ -606,19 +630,22 @@ pnfs_update_layout(struct inode *ino, goto out_unlock; } - /* Check to see if the layout for the given range already exists */ - lseg = pnfs_has_layout(lo, iomode); - if (lseg) { - dprintk("%s: Using cached lseg %p for iomode %d)\n", - __func__, lseg, iomode); + /* Do we even need to bother with this? */ + if (test_bit(NFS4CLNT_LAYOUTRECALL, &clp->cl_state) || + test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) { + dprintk("%s matches recall, use MDS\n", __func__); goto out_unlock; } + /* Check to see if the layout for the given range already exists */ + lseg = pnfs_find_lseg(lo, iomode); + if (lseg) + goto out_unlock; /* if LAYOUTGET already failed once we don't try again */ if (test_bit(lo_fail_bit(iomode), &nfsi->layout->plh_flags)) goto out_unlock; - if (pnfs_layoutgets_blocked(lo, 0)) + if (pnfs_layoutgets_blocked(lo, NULL, 0)) goto out_unlock; atomic_inc(&lo->plh_outstanding); @@ -641,6 +668,7 @@ pnfs_update_layout(struct inode *ino, spin_lock(&clp->cl_lock); list_del_init(&lo->plh_layouts); spin_unlock(&clp->cl_lock); + clear_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags); } spin_unlock(&ino->i_lock); } @@ -662,6 +690,7 @@ pnfs_layout_process(struct nfs4_layoutget *lgp) struct nfs4_layoutget_res *res = &lgp->res; struct pnfs_layout_segment *lseg; struct inode *ino = lo->plh_inode; + struct nfs_client *clp = NFS_SERVER(ino)->nfs_client; int status = 0; /* Verify we got what we asked for. @@ -688,16 +717,32 @@ pnfs_layout_process(struct nfs4_layoutget *lgp) } spin_lock(&ino->i_lock); + if (test_bit(NFS4CLNT_LAYOUTRECALL, &clp->cl_state) || + test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) { + dprintk("%s forget reply due to recall\n", __func__); + goto out_forget_reply; + } + + if (pnfs_layoutgets_blocked(lo, &res->stateid, 1)) { + dprintk("%s forget reply due to state\n", __func__); + goto out_forget_reply; + } init_lseg(lo, lseg); lseg->pls_range = res->range; *lgp->lsegpp = lseg; pnfs_insert_layout(lo, lseg); /* Done processing layoutget. Set the layout stateid */ - pnfs_set_layout_stateid(lo, &res->stateid); + pnfs_set_layout_stateid(lo, &res->stateid, false); spin_unlock(&ino->i_lock); out: return status; + +out_forget_reply: + spin_unlock(&ino->i_lock); + lseg->pls_layout = lo; + NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg); + goto out; } /* diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 8aaab56b794f..f91d0d45551c 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -49,6 +49,7 @@ struct pnfs_layout_segment { enum { NFS_LAYOUT_RO_FAILED = 0, /* get ro layout failed stop trying */ NFS_LAYOUT_RW_FAILED, /* get rw layout failed stop trying */ + NFS_LAYOUT_BULK_RECALL, /* bulk recall affecting layout */ NFS_LAYOUT_DESTROYED, /* no new use of layout allowed */ }; @@ -67,9 +68,11 @@ struct pnfs_layoutdriver_type { struct pnfs_layout_hdr { atomic_t plh_refcount; struct list_head plh_layouts; /* other client layouts */ + struct list_head plh_bulk_recall; /* clnt list of bulk recalls */ struct list_head plh_segs; /* layout segments list */ nfs4_stateid plh_stateid; atomic_t plh_outstanding; /* number of RPCs out */ + u32 plh_barrier; /* ignore lower seqids */ unsigned long plh_flags; struct inode *plh_inode; }; @@ -139,18 +142,26 @@ extern int nfs4_proc_getdeviceinfo(struct nfs_server *server, extern int nfs4_proc_layoutget(struct nfs4_layoutget *lgp); /* pnfs.c */ +void get_layout_hdr(struct pnfs_layout_hdr *lo); struct pnfs_layout_segment * pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx, enum pnfs_iomode access_type); void set_pnfs_layoutdriver(struct nfs_server *, u32 id); void unset_pnfs_layoutdriver(struct nfs_server *); int pnfs_layout_process(struct nfs4_layoutget *lgp); +void pnfs_free_lseg_list(struct list_head *tmp_list); void pnfs_destroy_layout(struct nfs_inode *); void pnfs_destroy_all_layouts(struct nfs_client *); void put_layout_hdr(struct pnfs_layout_hdr *lo); +void pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, + const nfs4_stateid *new, + bool update_barrier); int pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo, struct nfs4_state *open_state); +int mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo, + struct list_head *tmp_list, + u32 iomode); static inline int lo_fail_bit(u32 iomode) -- cgit v1.2.2 From 36840370845629e6cb4324d1dd4aff6778670503 Mon Sep 17 00:00:00 2001 From: Alexandros Batsakis Date: Thu, 6 Jan 2011 11:36:31 +0000 Subject: pnfs: update nfs4_callback_recallany to handle layouts While here, update the code a bit. Signed-off-by: Alexandros Batsakis Signed-off-by: Fred Isaman Signed-off-by: Trond Myklebust --- fs/nfs/callback.h | 8 ++++++++ fs/nfs/callback_proc.c | 29 ++++++++++++++++++++++++++--- 2 files changed, 34 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/nfs/callback.h b/fs/nfs/callback.h index f6768ac09190..d3b44f9bd747 100644 --- a/fs/nfs/callback.h +++ b/fs/nfs/callback.h @@ -119,6 +119,14 @@ extern int nfs41_validate_delegation_stateid(struct nfs_delegation *delegation, #define RCA4_TYPE_MASK_RDATA_DLG 0 #define RCA4_TYPE_MASK_WDATA_DLG 1 +#define RCA4_TYPE_MASK_DIR_DLG 2 +#define RCA4_TYPE_MASK_FILE_LAYOUT 3 +#define RCA4_TYPE_MASK_BLK_LAYOUT 4 +#define RCA4_TYPE_MASK_OBJ_LAYOUT_MIN 8 +#define RCA4_TYPE_MASK_OBJ_LAYOUT_MAX 9 +#define RCA4_TYPE_MASK_OTHER_LAYOUT_MIN 12 +#define RCA4_TYPE_MASK_OTHER_LAYOUT_MAX 15 +#define RCA4_TYPE_MASK_ALL 0xf31f struct cb_recallanyargs { struct sockaddr *craa_addr; diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c index 6619c05b55a0..4bb91cb2620d 100644 --- a/fs/nfs/callback_proc.c +++ b/fs/nfs/callback_proc.c @@ -230,6 +230,17 @@ __be32 nfs4_callback_layoutrecall(struct cb_layoutrecallargs *args, return cpu_to_be32(res); } +static void pnfs_recall_all_layouts(struct nfs_client *clp) +{ + struct cb_layoutrecallargs args; + + /* Pretend we got a CB_LAYOUTRECALL(ALL) */ + memset(&args, 0, sizeof(args)); + args.cbl_recall_type = RETURN_ALL; + /* FIXME we ignore errors, what should we do? */ + do_callback_layoutrecall(clp, &args); +} + int nfs41_validate_delegation_stateid(struct nfs_delegation *delegation, const nfs4_stateid *stateid) { if (delegation == NULL) @@ -421,29 +432,41 @@ out: return status; } +static bool +validate_bitmap_values(unsigned long mask) +{ + return (mask & ~RCA4_TYPE_MASK_ALL) == 0; +} + __be32 nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy, struct cb_process_state *cps) { __be32 status; fmode_t flags = 0; - status = htonl(NFS4ERR_OP_NOT_IN_SESSION); + status = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION); if (!cps->clp) /* set in cb_sequence */ goto out; dprintk("NFS: RECALL_ANY callback request from %s\n", rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR)); + status = cpu_to_be32(NFS4ERR_INVAL); + if (!validate_bitmap_values(args->craa_type_mask)) + goto out; + + status = cpu_to_be32(NFS4_OK); if (test_bit(RCA4_TYPE_MASK_RDATA_DLG, (const unsigned long *) &args->craa_type_mask)) flags = FMODE_READ; if (test_bit(RCA4_TYPE_MASK_WDATA_DLG, (const unsigned long *) &args->craa_type_mask)) flags |= FMODE_WRITE; - + if (test_bit(RCA4_TYPE_MASK_FILE_LAYOUT, (const unsigned long *) + &args->craa_type_mask)) + pnfs_recall_all_layouts(cps->clp); if (flags) nfs_expire_all_delegation_types(cps->clp, flags); - status = htonl(NFS4_OK); out: dprintk("%s: exit with status = %d\n", __func__, ntohl(status)); return status; -- cgit v1.2.2 From f7e8917a67980924651a9e244510e63ef05c7755 Mon Sep 17 00:00:00 2001 From: Fred Isaman Date: Thu, 6 Jan 2011 11:36:32 +0000 Subject: pnfs: layout roc code A layout can request return-on-close. How this interacts with the forgetful model of never sending LAYOUTRETURNS is a bit ambiguous. We forget any layouts marked roc, and wait for them to be completely forgotten before continuing with the close. In addition, to compensate for races with any inflight LAYOUTGETs, and the fact that we do not get any layout stateid back from the server, we set the barrier to the worst case scenario of current_seqid + number of outstanding LAYOUTGETS. Signed-off-by: Fred Isaman Signed-off-by: Trond Myklebust --- fs/nfs/client.c | 12 ++++++++ fs/nfs/nfs4_fs.h | 2 +- fs/nfs/nfs4proc.c | 21 +++++++++++-- fs/nfs/nfs4state.c | 7 +++-- fs/nfs/pnfs.c | 86 +++++++++++++++++++++++++++++++++++++++++++++++++++++- fs/nfs/pnfs.h | 29 ++++++++++++++++++ 6 files changed, 151 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 11eb9934c747..684b67771199 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -244,6 +244,11 @@ static void nfs_cb_idr_remove_locked(struct nfs_client *clp) idr_remove(&cb_ident_idr, clp->cl_cb_ident); } +static void pnfs_init_server(struct nfs_server *server) +{ + rpc_init_wait_queue(&server->roc_rpcwaitq, "pNFS ROC"); +} + #else static void nfs4_shutdown_client(struct nfs_client *clp) { @@ -256,6 +261,11 @@ void nfs_cleanup_cb_ident_idr(void) static void nfs_cb_idr_remove_locked(struct nfs_client *clp) { } + +static void pnfs_init_server(struct nfs_server *server) +{ +} + #endif /* CONFIG_NFS_V4 */ /* @@ -1024,6 +1034,8 @@ static struct nfs_server *nfs_alloc_server(void) return NULL; } + pnfs_init_server(server); + return server; } diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 8f169dc789db..18d64cb5985b 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -236,7 +236,7 @@ extern int nfs4_proc_async_renew(struct nfs_client *, struct rpc_cred *); extern int nfs4_proc_renew(struct nfs_client *, struct rpc_cred *); extern int nfs4_init_clientid(struct nfs_client *, struct rpc_cred *); extern int nfs41_init_clientid(struct nfs_client *, struct rpc_cred *); -extern int nfs4_do_close(struct path *path, struct nfs4_state *state, gfp_t gfp_mask, int wait); +extern int nfs4_do_close(struct path *path, struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc); extern int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle); extern int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name, struct nfs4_fs_locations *fs_locations, struct page *page); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index a3549ce72ab2..88f590feeb72 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -1839,6 +1839,8 @@ struct nfs4_closedata { struct nfs_closeres res; struct nfs_fattr fattr; unsigned long timestamp; + bool roc; + u32 roc_barrier; }; static void nfs4_free_closedata(void *data) @@ -1846,6 +1848,8 @@ static void nfs4_free_closedata(void *data) struct nfs4_closedata *calldata = data; struct nfs4_state_owner *sp = calldata->state->owner; + if (calldata->roc) + pnfs_roc_release(calldata->state->inode); nfs4_put_open_state(calldata->state); nfs_free_seqid(calldata->arg.seqid); nfs4_put_state_owner(sp); @@ -1878,6 +1882,9 @@ static void nfs4_close_done(struct rpc_task *task, void *data) */ switch (task->tk_status) { case 0: + if (calldata->roc) + pnfs_roc_set_barrier(state->inode, + calldata->roc_barrier); nfs_set_open_stateid(state, &calldata->res.stateid, 0); renew_lease(server, calldata->timestamp); nfs4_close_clear_stateid_flags(state, @@ -1930,8 +1937,15 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data) return; } - if (calldata->arg.fmode == 0) + if (calldata->arg.fmode == 0) { task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE]; + if (calldata->roc && + pnfs_roc_drain(calldata->inode, &calldata->roc_barrier)) { + rpc_sleep_on(&NFS_SERVER(calldata->inode)->roc_rpcwaitq, + task, NULL); + return; + } + } nfs_fattr_init(calldata->res.fattr); calldata->timestamp = jiffies; @@ -1959,7 +1973,7 @@ static const struct rpc_call_ops nfs4_close_ops = { * * NOTE: Caller must be holding the sp->so_owner semaphore! */ -int nfs4_do_close(struct path *path, struct nfs4_state *state, gfp_t gfp_mask, int wait) +int nfs4_do_close(struct path *path, struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc) { struct nfs_server *server = NFS_SERVER(state->inode); struct nfs4_closedata *calldata; @@ -1994,6 +2008,7 @@ int nfs4_do_close(struct path *path, struct nfs4_state *state, gfp_t gfp_mask, i calldata->res.fattr = &calldata->fattr; calldata->res.seqid = calldata->arg.seqid; calldata->res.server = server; + calldata->roc = roc; path_get(path); calldata->path = *path; @@ -2011,6 +2026,8 @@ int nfs4_do_close(struct path *path, struct nfs4_state *state, gfp_t gfp_mask, i out_free_calldata: kfree(calldata); out: + if (roc) + pnfs_roc_release(state->inode); nfs4_put_open_state(state); nfs4_put_state_owner(sp); return status; diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 6891dedd80f1..286084f148e3 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -606,8 +606,11 @@ static void __nfs4_close(struct path *path, struct nfs4_state *state, if (!call_close) { nfs4_put_open_state(state); nfs4_put_state_owner(owner); - } else - nfs4_do_close(path, state, gfp_mask, wait); + } else { + bool roc = pnfs_roc(state->inode); + + nfs4_do_close(path, state, gfp_mask, wait, roc); + } } void nfs4_close_state(struct path *path, struct nfs4_state *state, fmode_t fmode) diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index bf4186b8f2fc..bc4089769735 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -256,6 +256,7 @@ put_lseg_locked(struct pnfs_layout_segment *lseg, spin_unlock(&clp->cl_lock); clear_bit(NFS_LAYOUT_BULK_RECALL, &lseg->pls_layout->plh_flags); } + rpc_wake_up(&NFS_SERVER(ino)->roc_rpcwaitq); list_add(&lseg->pls_list, tmp_list); return 1; } @@ -401,7 +402,8 @@ pnfs_layoutgets_blocked(struct pnfs_layout_hdr *lo, nfs4_stateid *stateid, if ((stateid) && (int)(lo->plh_barrier - be32_to_cpu(stateid->stateid.seqid)) >= 0) return true; - return test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) || + return lo->plh_block_lgets || + test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) || (list_empty(&lo->plh_segs) && (atomic_read(&lo->plh_outstanding) > lget)); } @@ -474,6 +476,83 @@ send_layoutget(struct pnfs_layout_hdr *lo, return lseg; } +bool pnfs_roc(struct inode *ino) +{ + struct pnfs_layout_hdr *lo; + struct pnfs_layout_segment *lseg, *tmp; + LIST_HEAD(tmp_list); + bool found = false; + + spin_lock(&ino->i_lock); + lo = NFS_I(ino)->layout; + if (!lo || !test_and_clear_bit(NFS_LAYOUT_ROC, &lo->plh_flags) || + test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) + goto out_nolayout; + list_for_each_entry_safe(lseg, tmp, &lo->plh_segs, pls_list) + if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) { + mark_lseg_invalid(lseg, &tmp_list); + found = true; + } + if (!found) + goto out_nolayout; + lo->plh_block_lgets++; + get_layout_hdr(lo); /* matched in pnfs_roc_release */ + spin_unlock(&ino->i_lock); + pnfs_free_lseg_list(&tmp_list); + return true; + +out_nolayout: + spin_unlock(&ino->i_lock); + return false; +} + +void pnfs_roc_release(struct inode *ino) +{ + struct pnfs_layout_hdr *lo; + + spin_lock(&ino->i_lock); + lo = NFS_I(ino)->layout; + lo->plh_block_lgets--; + put_layout_hdr_locked(lo); + spin_unlock(&ino->i_lock); +} + +void pnfs_roc_set_barrier(struct inode *ino, u32 barrier) +{ + struct pnfs_layout_hdr *lo; + + spin_lock(&ino->i_lock); + lo = NFS_I(ino)->layout; + if ((int)(barrier - lo->plh_barrier) > 0) + lo->plh_barrier = barrier; + spin_unlock(&ino->i_lock); +} + +bool pnfs_roc_drain(struct inode *ino, u32 *barrier) +{ + struct nfs_inode *nfsi = NFS_I(ino); + struct pnfs_layout_segment *lseg; + bool found = false; + + spin_lock(&ino->i_lock); + list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list) + if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) { + found = true; + break; + } + if (!found) { + struct pnfs_layout_hdr *lo = nfsi->layout; + u32 current_seqid = be32_to_cpu(lo->plh_stateid.stateid.seqid); + + /* Since close does not return a layout stateid for use as + * a barrier, we choose the worst-case barrier. + */ + *barrier = current_seqid + atomic_read(&lo->plh_outstanding); + } + spin_unlock(&ino->i_lock); + return found; +} + /* * Compare two layout segments for sorting into layout cache. * We want to preferentially return RW over RO layouts, so ensure those @@ -732,6 +811,11 @@ pnfs_layout_process(struct nfs4_layoutget *lgp) *lgp->lsegpp = lseg; pnfs_insert_layout(lo, lseg); + if (res->return_on_close) { + set_bit(NFS_LSEG_ROC, &lseg->pls_flags); + set_bit(NFS_LAYOUT_ROC, &lo->plh_flags); + } + /* Done processing layoutget. Set the layout stateid */ pnfs_set_layout_stateid(lo, &res->stateid, false); spin_unlock(&ino->i_lock); diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index f91d0d45551c..e2612ea0cbed 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -32,6 +32,7 @@ enum { NFS_LSEG_VALID = 0, /* cleared when lseg is recalled/returned */ + NFS_LSEG_ROC, /* roc bit received from server */ }; struct pnfs_layout_segment { @@ -50,6 +51,7 @@ enum { NFS_LAYOUT_RO_FAILED = 0, /* get ro layout failed stop trying */ NFS_LAYOUT_RW_FAILED, /* get rw layout failed stop trying */ NFS_LAYOUT_BULK_RECALL, /* bulk recall affecting layout */ + NFS_LAYOUT_ROC, /* some lseg had roc bit set */ NFS_LAYOUT_DESTROYED, /* no new use of layout allowed */ }; @@ -72,6 +74,7 @@ struct pnfs_layout_hdr { struct list_head plh_segs; /* layout segments list */ nfs4_stateid plh_stateid; atomic_t plh_outstanding; /* number of RPCs out */ + unsigned long plh_block_lgets; /* block LAYOUTGET if >0 */ u32 plh_barrier; /* ignore lower seqids */ unsigned long plh_flags; struct inode *plh_inode; @@ -162,6 +165,10 @@ int pnfs_choose_layoutget_stateid(nfs4_stateid *dst, int mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo, struct list_head *tmp_list, u32 iomode); +bool pnfs_roc(struct inode *ino); +void pnfs_roc_release(struct inode *ino); +void pnfs_roc_set_barrier(struct inode *ino, u32 barrier); +bool pnfs_roc_drain(struct inode *ino, u32 *barrier); static inline int lo_fail_bit(u32 iomode) @@ -193,6 +200,28 @@ pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx, return NULL; } +static inline bool +pnfs_roc(struct inode *ino) +{ + return false; +} + +static inline void +pnfs_roc_release(struct inode *ino) +{ +} + +static inline void +pnfs_roc_set_barrier(struct inode *ino, u32 barrier) +{ +} + +static inline bool +pnfs_roc_drain(struct inode *ino, u32 *barrier) +{ + return false; +} + static inline void set_pnfs_layoutdriver(struct nfs_server *s, u32 id) { } -- cgit v1.2.2 From fca5238ef3232cd0cf4bf0457e751b3bb20912a9 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Fri, 24 Dec 2010 01:32:32 +0000 Subject: NFS: Allow walking nfs_client.cl_superblocks list outside client.c We're about to move some fields from struct nfs_client to struct nfs_server. There is a many-to-one relationship between nfs_servers and nfs_clients. After these fields are moved to the nfs_server struct, to visit all of the data in these fields that is owned by one nfs_client, code will need to visit each nfs_server on the cl_superblocks list for that nfs_client. To serialize changes to the cl_superblocks list during these little expeditions, protect the list with RCU. Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- fs/nfs/client.c | 44 +++++++++++++++++++++++++------------------- fs/nfs/nfs4renewd.c | 9 +++++++-- 2 files changed, 32 insertions(+), 21 deletions(-) (limited to 'fs') diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 684b67771199..32b5fbfab35e 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -1003,6 +1003,27 @@ static void nfs_server_copy_userdata(struct nfs_server *target, struct nfs_serve target->options = source->options; } +static void nfs_server_insert_lists(struct nfs_server *server) +{ + struct nfs_client *clp = server->nfs_client; + + spin_lock(&nfs_client_lock); + list_add_tail_rcu(&server->client_link, &clp->cl_superblocks); + list_add_tail(&server->master_link, &nfs_volume_list); + spin_unlock(&nfs_client_lock); + +} + +static void nfs_server_remove_lists(struct nfs_server *server) +{ + spin_lock(&nfs_client_lock); + list_del_rcu(&server->client_link); + list_del(&server->master_link); + spin_unlock(&nfs_client_lock); + + synchronize_rcu(); +} + /* * Allocate and initialise a server record */ @@ -1046,11 +1067,8 @@ void nfs_free_server(struct nfs_server *server) { dprintk("--> nfs_free_server()\n"); + nfs_server_remove_lists(server); unset_pnfs_layoutdriver(server); - spin_lock(&nfs_client_lock); - list_del(&server->client_link); - list_del(&server->master_link); - spin_unlock(&nfs_client_lock); if (server->destroy != NULL) server->destroy(server); @@ -1125,11 +1143,7 @@ struct nfs_server *nfs_create_server(const struct nfs_parsed_mount_data *data, (unsigned long long) server->fsid.major, (unsigned long long) server->fsid.minor); - spin_lock(&nfs_client_lock); - list_add_tail(&server->client_link, &server->nfs_client->cl_superblocks); - list_add_tail(&server->master_link, &nfs_volume_list); - spin_unlock(&nfs_client_lock); - + nfs_server_insert_lists(server); server->mount_time = jiffies; nfs_free_fattr(fattr); return server; @@ -1454,11 +1468,7 @@ static int nfs4_server_common_setup(struct nfs_server *server, if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN) server->namelen = NFS4_MAXNAMLEN; - spin_lock(&nfs_client_lock); - list_add_tail(&server->client_link, &server->nfs_client->cl_superblocks); - list_add_tail(&server->master_link, &nfs_volume_list); - spin_unlock(&nfs_client_lock); - + nfs_server_insert_lists(server); server->mount_time = jiffies; out: nfs_free_fattr(fattr); @@ -1663,11 +1673,7 @@ struct nfs_server *nfs_clone_server(struct nfs_server *source, if (error < 0) goto out_free_server; - spin_lock(&nfs_client_lock); - list_add_tail(&server->client_link, &server->nfs_client->cl_superblocks); - list_add_tail(&server->master_link, &nfs_volume_list); - spin_unlock(&nfs_client_lock); - + nfs_server_insert_lists(server); server->mount_time = jiffies; nfs_free_fattr(fattr_fsinfo); diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c index 72b6c580af13..cde5650ee5a2 100644 --- a/fs/nfs/nfs4renewd.c +++ b/fs/nfs/nfs4renewd.c @@ -63,9 +63,14 @@ nfs4_renew_state(struct work_struct *work) ops = clp->cl_mvops->state_renewal_ops; dprintk("%s: start\n", __func__); - /* Are there any active superblocks? */ - if (list_empty(&clp->cl_superblocks)) + + rcu_read_lock(); + if (list_empty(&clp->cl_superblocks)) { + rcu_read_unlock(); goto out; + } + rcu_read_unlock(); + spin_lock(&clp->cl_lock); lease = clp->cl_lease_time; last = clp->cl_last_renewal; -- cgit v1.2.2 From 24d292b894273495f9664bb495e575f8cb7e8cac Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Fri, 24 Dec 2010 01:32:43 +0000 Subject: NFS: Move cl_state_owners and related fields to the nfs_server struct NFSv4 migration needs to reassociate state owners from the source to the destination nfs_server data structures. To make that easier, move the cl_state_owners field to the nfs_server struct. cl_openowner_id and cl_lockowner_id accompany this move, as they are used in conjunction with cl_state_owners. The cl_lock field in the parent nfs_client continues to protect all three of these fields. Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- fs/nfs/nfs4_fs.h | 2 +- fs/nfs/nfs4state.c | 251 ++++++++++++++++++++++++++++++++++++++--------------- 2 files changed, 181 insertions(+), 72 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 18d64cb5985b..7a7474073148 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -110,7 +110,7 @@ struct nfs_unique_id { struct nfs4_state_owner { struct nfs_unique_id so_owner_id; struct nfs_server *so_server; - struct rb_node so_client_node; + struct rb_node so_server_node; struct rpc_cred *so_cred; /* Associated cred */ diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 286084f148e3..2336d532cf66 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -105,14 +105,17 @@ static void nfs4_clear_machine_cred(struct nfs_client *clp) put_rpccred(cred); } -struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp) +static struct rpc_cred * +nfs4_get_renew_cred_server_locked(struct nfs_server *server) { + struct rpc_cred *cred = NULL; struct nfs4_state_owner *sp; struct rb_node *pos; - struct rpc_cred *cred = NULL; - for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) { - sp = rb_entry(pos, struct nfs4_state_owner, so_client_node); + for (pos = rb_first(&server->state_owners); + pos != NULL; + pos = rb_next(pos)) { + sp = rb_entry(pos, struct nfs4_state_owner, so_server_node); if (list_empty(&sp->so_states)) continue; cred = get_rpccred(sp->so_cred); @@ -121,6 +124,28 @@ struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp) return cred; } +/** + * nfs4_get_renew_cred_locked - Acquire credential for a renew operation + * @clp: client state handle + * + * Returns an rpc_cred with reference count bumped, or NULL. + * Caller must hold clp->cl_lock. + */ +struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp) +{ + struct rpc_cred *cred = NULL; + struct nfs_server *server; + + rcu_read_lock(); + list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { + cred = nfs4_get_renew_cred_server_locked(server); + if (cred != NULL) + break; + } + rcu_read_unlock(); + return cred; +} + #if defined(CONFIG_NFS_V4_1) static int nfs41_setup_state_renewal(struct nfs_client *clp) @@ -231,28 +256,56 @@ struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp) #endif /* CONFIG_NFS_V4_1 */ -struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp) +static struct rpc_cred * +nfs4_get_setclientid_cred_server(struct nfs_server *server) { + struct nfs_client *clp = server->nfs_client; + struct rpc_cred *cred = NULL; struct nfs4_state_owner *sp; struct rb_node *pos; + + spin_lock(&clp->cl_lock); + pos = rb_first(&server->state_owners); + if (pos != NULL) { + sp = rb_entry(pos, struct nfs4_state_owner, so_server_node); + cred = get_rpccred(sp->so_cred); + } + spin_unlock(&clp->cl_lock); + return cred; +} + +/** + * nfs4_get_setclientid_cred - Acquire credential for a setclientid operation + * @clp: client state handle + * + * Returns an rpc_cred with reference count bumped, or NULL. + */ +struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp) +{ + struct nfs_server *server; struct rpc_cred *cred; spin_lock(&clp->cl_lock); cred = nfs4_get_machine_cred_locked(clp); + spin_unlock(&clp->cl_lock); if (cred != NULL) goto out; - pos = rb_first(&clp->cl_state_owners); - if (pos != NULL) { - sp = rb_entry(pos, struct nfs4_state_owner, so_client_node); - cred = get_rpccred(sp->so_cred); + + rcu_read_lock(); + list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { + cred = nfs4_get_setclientid_cred_server(server); + if (cred != NULL) + break; } + rcu_read_unlock(); + out: - spin_unlock(&clp->cl_lock); return cred; } -static void nfs_alloc_unique_id(struct rb_root *root, struct nfs_unique_id *new, - __u64 minval, int maxbits) +static void nfs_alloc_unique_id_locked(struct rb_root *root, + struct nfs_unique_id *new, + __u64 minval, int maxbits) { struct rb_node **p, *parent; struct nfs_unique_id *pos; @@ -307,16 +360,15 @@ static void nfs_free_unique_id(struct rb_root *root, struct nfs_unique_id *id) } static struct nfs4_state_owner * -nfs4_find_state_owner(struct nfs_server *server, struct rpc_cred *cred) +nfs4_find_state_owner_locked(struct nfs_server *server, struct rpc_cred *cred) { - struct nfs_client *clp = server->nfs_client; - struct rb_node **p = &clp->cl_state_owners.rb_node, + struct rb_node **p = &server->state_owners.rb_node, *parent = NULL; struct nfs4_state_owner *sp, *res = NULL; while (*p != NULL) { parent = *p; - sp = rb_entry(parent, struct nfs4_state_owner, so_client_node); + sp = rb_entry(parent, struct nfs4_state_owner, so_server_node); if (server < sp->so_server) { p = &parent->rb_left; @@ -340,24 +392,17 @@ nfs4_find_state_owner(struct nfs_server *server, struct rpc_cred *cred) } static struct nfs4_state_owner * -nfs4_insert_state_owner(struct nfs_client *clp, struct nfs4_state_owner *new) +nfs4_insert_state_owner_locked(struct nfs4_state_owner *new) { - struct rb_node **p = &clp->cl_state_owners.rb_node, + struct nfs_server *server = new->so_server; + struct rb_node **p = &server->state_owners.rb_node, *parent = NULL; struct nfs4_state_owner *sp; while (*p != NULL) { parent = *p; - sp = rb_entry(parent, struct nfs4_state_owner, so_client_node); + sp = rb_entry(parent, struct nfs4_state_owner, so_server_node); - if (new->so_server < sp->so_server) { - p = &parent->rb_left; - continue; - } - if (new->so_server > sp->so_server) { - p = &parent->rb_right; - continue; - } if (new->so_cred < sp->so_cred) p = &parent->rb_left; else if (new->so_cred > sp->so_cred) @@ -367,18 +412,21 @@ nfs4_insert_state_owner(struct nfs_client *clp, struct nfs4_state_owner *new) return sp; } } - nfs_alloc_unique_id(&clp->cl_openowner_id, &new->so_owner_id, 1, 64); - rb_link_node(&new->so_client_node, parent, p); - rb_insert_color(&new->so_client_node, &clp->cl_state_owners); + nfs_alloc_unique_id_locked(&server->openowner_id, + &new->so_owner_id, 1, 64); + rb_link_node(&new->so_server_node, parent, p); + rb_insert_color(&new->so_server_node, &server->state_owners); return new; } static void -nfs4_remove_state_owner(struct nfs_client *clp, struct nfs4_state_owner *sp) +nfs4_remove_state_owner_locked(struct nfs4_state_owner *sp) { - if (!RB_EMPTY_NODE(&sp->so_client_node)) - rb_erase(&sp->so_client_node, &clp->cl_state_owners); - nfs_free_unique_id(&clp->cl_openowner_id, &sp->so_owner_id); + struct nfs_server *server = sp->so_server; + + if (!RB_EMPTY_NODE(&sp->so_server_node)) + rb_erase(&sp->so_server_node, &server->state_owners); + nfs_free_unique_id(&server->openowner_id, &sp->so_owner_id); } /* @@ -407,23 +455,32 @@ nfs4_alloc_state_owner(void) static void nfs4_drop_state_owner(struct nfs4_state_owner *sp) { - if (!RB_EMPTY_NODE(&sp->so_client_node)) { - struct nfs_client *clp = sp->so_server->nfs_client; + if (!RB_EMPTY_NODE(&sp->so_server_node)) { + struct nfs_server *server = sp->so_server; + struct nfs_client *clp = server->nfs_client; spin_lock(&clp->cl_lock); - rb_erase(&sp->so_client_node, &clp->cl_state_owners); - RB_CLEAR_NODE(&sp->so_client_node); + rb_erase(&sp->so_server_node, &server->state_owners); + RB_CLEAR_NODE(&sp->so_server_node); spin_unlock(&clp->cl_lock); } } -struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct rpc_cred *cred) +/** + * nfs4_get_state_owner - Look up a state owner given a credential + * @server: nfs_server to search + * @cred: RPC credential to match + * + * Returns a pointer to an instantiated nfs4_state_owner struct, or NULL. + */ +struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, + struct rpc_cred *cred) { struct nfs_client *clp = server->nfs_client; struct nfs4_state_owner *sp, *new; spin_lock(&clp->cl_lock); - sp = nfs4_find_state_owner(server, cred); + sp = nfs4_find_state_owner_locked(server, cred); spin_unlock(&clp->cl_lock); if (sp != NULL) return sp; @@ -433,7 +490,7 @@ struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct new->so_server = server; new->so_cred = cred; spin_lock(&clp->cl_lock); - sp = nfs4_insert_state_owner(clp, new); + sp = nfs4_insert_state_owner_locked(new); spin_unlock(&clp->cl_lock); if (sp == new) get_rpccred(cred); @@ -444,6 +501,11 @@ struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct return sp; } +/** + * nfs4_put_state_owner - Release a nfs4_state_owner + * @sp: state owner data to release + * + */ void nfs4_put_state_owner(struct nfs4_state_owner *sp) { struct nfs_client *clp = sp->so_server->nfs_client; @@ -451,7 +513,7 @@ void nfs4_put_state_owner(struct nfs4_state_owner *sp) if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock)) return; - nfs4_remove_state_owner(clp, sp); + nfs4_remove_state_owner_locked(sp); spin_unlock(&clp->cl_lock); rpc_destroy_wait_queue(&sp->so_sequence.wait); put_rpccred(cred); @@ -657,7 +719,8 @@ __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_p static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_pid, unsigned int type) { struct nfs4_lock_state *lsp; - struct nfs_client *clp = state->owner->so_server->nfs_client; + struct nfs_server *server = state->owner->so_server; + struct nfs_client *clp = server->nfs_client; lsp = kzalloc(sizeof(*lsp), GFP_NOFS); if (lsp == NULL) @@ -681,7 +744,7 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f return NULL; } spin_lock(&clp->cl_lock); - nfs_alloc_unique_id(&clp->cl_lockowner_id, &lsp->ls_id, 1, 64); + nfs_alloc_unique_id_locked(&server->lockowner_id, &lsp->ls_id, 1, 64); spin_unlock(&clp->cl_lock); INIT_LIST_HEAD(&lsp->ls_locks); return lsp; @@ -689,10 +752,11 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f static void nfs4_free_lock_state(struct nfs4_lock_state *lsp) { - struct nfs_client *clp = lsp->ls_state->owner->so_server->nfs_client; + struct nfs_server *server = lsp->ls_state->owner->so_server; + struct nfs_client *clp = server->nfs_client; spin_lock(&clp->cl_lock); - nfs_free_unique_id(&clp->cl_lockowner_id, &lsp->ls_id); + nfs_free_unique_id(&server->lockowner_id, &lsp->ls_id); spin_unlock(&clp->cl_lock); rpc_destroy_wait_queue(&lsp->ls_sequence.wait); kfree(lsp); @@ -1138,15 +1202,19 @@ static void nfs4_clear_open_state(struct nfs4_state *state) } } -static void nfs4_state_mark_reclaim_helper(struct nfs_client *clp, int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state)) +static void nfs4_reset_seqids(struct nfs_server *server, + int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state)) { + struct nfs_client *clp = server->nfs_client; struct nfs4_state_owner *sp; struct rb_node *pos; struct nfs4_state *state; - /* Reset all sequence ids to zero */ - for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) { - sp = rb_entry(pos, struct nfs4_state_owner, so_client_node); + spin_lock(&clp->cl_lock); + for (pos = rb_first(&server->state_owners); + pos != NULL; + pos = rb_next(pos)) { + sp = rb_entry(pos, struct nfs4_state_owner, so_server_node); sp->so_seqid.flags = 0; spin_lock(&sp->so_lock); list_for_each_entry(state, &sp->so_states, open_states) { @@ -1155,6 +1223,18 @@ static void nfs4_state_mark_reclaim_helper(struct nfs_client *clp, int (*mark_re } spin_unlock(&sp->so_lock); } + spin_unlock(&clp->cl_lock); +} + +static void nfs4_state_mark_reclaim_helper(struct nfs_client *clp, + int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state)) +{ + struct nfs_server *server; + + rcu_read_lock(); + list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) + nfs4_reset_seqids(server, mark_reclaim); + rcu_read_unlock(); } static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp) @@ -1172,25 +1252,41 @@ static void nfs4_reclaim_complete(struct nfs_client *clp, (void)ops->reclaim_complete(clp); } -static int nfs4_state_clear_reclaim_reboot(struct nfs_client *clp) +static void nfs4_clear_reclaim_server(struct nfs_server *server) { + struct nfs_client *clp = server->nfs_client; struct nfs4_state_owner *sp; struct rb_node *pos; struct nfs4_state *state; - if (!test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) - return 0; - - for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) { - sp = rb_entry(pos, struct nfs4_state_owner, so_client_node); + spin_lock(&clp->cl_lock); + for (pos = rb_first(&server->state_owners); + pos != NULL; + pos = rb_next(pos)) { + sp = rb_entry(pos, struct nfs4_state_owner, so_server_node); spin_lock(&sp->so_lock); list_for_each_entry(state, &sp->so_states, open_states) { - if (!test_and_clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags)) + if (!test_and_clear_bit(NFS_STATE_RECLAIM_REBOOT, + &state->flags)) continue; nfs4_state_mark_reclaim_nograce(clp, state); } spin_unlock(&sp->so_lock); } + spin_unlock(&clp->cl_lock); +} + +static int nfs4_state_clear_reclaim_reboot(struct nfs_client *clp) +{ + struct nfs_server *server; + + if (!test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) + return 0; + + rcu_read_lock(); + list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) + nfs4_clear_reclaim_server(server); + rcu_read_unlock(); nfs_delegation_reap_unclaimed(clp); return 1; @@ -1262,27 +1358,40 @@ static int nfs4_recovery_handle_error(struct nfs_client *clp, int error) static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recovery_ops *ops) { + struct nfs4_state_owner *sp; + struct nfs_server *server; struct rb_node *pos; int status = 0; restart: - spin_lock(&clp->cl_lock); - for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) { - struct nfs4_state_owner *sp = rb_entry(pos, struct nfs4_state_owner, so_client_node); - if (!test_and_clear_bit(ops->owner_flag_bit, &sp->so_flags)) - continue; - atomic_inc(&sp->so_count); - spin_unlock(&clp->cl_lock); - status = nfs4_reclaim_open_state(sp, ops); - if (status < 0) { - set_bit(ops->owner_flag_bit, &sp->so_flags); + rcu_read_lock(); + list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { + spin_lock(&clp->cl_lock); + for (pos = rb_first(&server->state_owners); + pos != NULL; + pos = rb_next(pos)) { + sp = rb_entry(pos, + struct nfs4_state_owner, so_server_node); + if (!test_and_clear_bit(ops->owner_flag_bit, + &sp->so_flags)) + continue; + atomic_inc(&sp->so_count); + spin_unlock(&clp->cl_lock); + rcu_read_unlock(); + + status = nfs4_reclaim_open_state(sp, ops); + if (status < 0) { + set_bit(ops->owner_flag_bit, &sp->so_flags); + nfs4_put_state_owner(sp); + return nfs4_recovery_handle_error(clp, status); + } + nfs4_put_state_owner(sp); - return nfs4_recovery_handle_error(clp, status); + goto restart; } - nfs4_put_state_owner(sp); - goto restart; + spin_unlock(&clp->cl_lock); } - spin_unlock(&clp->cl_lock); + rcu_read_unlock(); return status; } -- cgit v1.2.2 From dda4b225623f316335052828c24a16e2da313f8f Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Fri, 24 Dec 2010 01:32:54 +0000 Subject: NFS: Introduce nfs_detach_delegations() Clean up: Refactor code that takes clp->cl_lock and calls nfs_detach_delegations_locked() into its own function. While we're changing the call sites, get rid of the second parameter and the logic in nfs_detach_delegations_locked() that uses it, since callers always set that parameter of nfs_detach_delegations_locked() to NULL. Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- fs/nfs/delegation.c | 41 ++++++++++++++++++++--------------------- 1 file changed, 20 insertions(+), 21 deletions(-) (limited to 'fs') diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index 1fd62fc49be3..521d71b81825 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -175,9 +175,9 @@ static struct inode *nfs_delegation_grab_inode(struct nfs_delegation *delegation return inode; } -static struct nfs_delegation *nfs_detach_delegation_locked(struct nfs_inode *nfsi, - const nfs4_stateid *stateid, - struct nfs_client *clp) +static struct nfs_delegation * +nfs_detach_delegation_locked(struct nfs_inode *nfsi, + struct nfs_client *clp) { struct nfs_delegation *delegation = rcu_dereference_protected(nfsi->delegation, @@ -185,22 +185,29 @@ static struct nfs_delegation *nfs_detach_delegation_locked(struct nfs_inode *nfs if (delegation == NULL) goto nomatch; + spin_lock(&delegation->lock); - if (stateid != NULL && memcmp(delegation->stateid.data, stateid->data, - sizeof(delegation->stateid.data)) != 0) - goto nomatch_unlock; list_del_rcu(&delegation->super_list); delegation->inode = NULL; nfsi->delegation_state = 0; rcu_assign_pointer(nfsi->delegation, NULL); spin_unlock(&delegation->lock); return delegation; -nomatch_unlock: - spin_unlock(&delegation->lock); nomatch: return NULL; } +static struct nfs_delegation *nfs_detach_delegation(struct nfs_inode *nfsi, + struct nfs_client *clp) +{ + struct nfs_delegation *delegation; + + spin_lock(&clp->cl_lock); + delegation = nfs_detach_delegation_locked(nfsi, clp); + spin_unlock(&clp->cl_lock); + return delegation; +} + /* * Set up a delegation on an inode */ @@ -246,7 +253,7 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct delegation = NULL; goto out; } - freeme = nfs_detach_delegation_locked(nfsi, NULL, clp); + freeme = nfs_detach_delegation_locked(nfsi, clp); } list_add_rcu(&delegation->super_list, &clp->cl_delegations); nfsi->delegation_state = delegation->type; @@ -307,9 +314,7 @@ restart: inode = nfs_delegation_grab_inode(delegation); if (inode == NULL) continue; - spin_lock(&clp->cl_lock); - delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL, clp); - spin_unlock(&clp->cl_lock); + delegation = nfs_detach_delegation(NFS_I(inode), clp); rcu_read_unlock(); if (delegation != NULL) { filemap_flush(inode->i_mapping); @@ -338,9 +343,7 @@ void nfs_inode_return_delegation_noreclaim(struct inode *inode) struct nfs_delegation *delegation; if (rcu_access_pointer(nfsi->delegation) != NULL) { - spin_lock(&clp->cl_lock); - delegation = nfs_detach_delegation_locked(nfsi, NULL, clp); - spin_unlock(&clp->cl_lock); + delegation = nfs_detach_delegation(nfsi, clp); if (delegation != NULL) nfs_do_return_delegation(inode, delegation, 0); } @@ -354,9 +357,7 @@ int nfs_inode_return_delegation(struct inode *inode) int err = 0; if (rcu_access_pointer(nfsi->delegation) != NULL) { - spin_lock(&clp->cl_lock); - delegation = nfs_detach_delegation_locked(nfsi, NULL, clp); - spin_unlock(&clp->cl_lock); + delegation = nfs_detach_delegation(nfsi, clp); if (delegation != NULL) { nfs_wb_all(inode); err = __nfs_inode_return_delegation(inode, delegation, 1); @@ -530,9 +531,7 @@ restart: inode = nfs_delegation_grab_inode(delegation); if (inode == NULL) continue; - spin_lock(&clp->cl_lock); - delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL, clp); - spin_unlock(&clp->cl_lock); + delegation = nfs_detach_delegation(NFS_I(inode), clp); rcu_read_unlock(); if (delegation != NULL) nfs_free_delegation(delegation); -- cgit v1.2.2 From d3978bb325510f0a26ebd92f211b36c5f98b2306 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Fri, 24 Dec 2010 01:33:04 +0000 Subject: NFS: Move cl_delegations to the nfs_server struct Delegations are per-inode, not per-nfs_client. When a server file system is migrated, delegations on the client must be moved from the source to the destination nfs_server. Make it easier to manage a mount point's delegation list across a migration event by moving the list to the nfs_server struct. Clean up: I added documenting comments to public functions I changed in this patch. For consistency I added comments to all the other public functions in fs/nfs/delegation.c. Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- fs/nfs/client.c | 2 +- fs/nfs/delegation.c | 337 ++++++++++++++++++++++++++++++++++++++-------------- fs/nfs/delegation.h | 1 + fs/nfs/nfs4renewd.c | 2 +- 4 files changed, 250 insertions(+), 92 deletions(-) (limited to 'fs') diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 32b5fbfab35e..192f2f860265 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -172,7 +172,6 @@ static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_ if (err) goto error_cleanup; - INIT_LIST_HEAD(&clp->cl_delegations); spin_lock_init(&clp->cl_lock); INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state); rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client"); @@ -1040,6 +1039,7 @@ static struct nfs_server *nfs_alloc_server(void) /* Zero out the NFS state stuff */ INIT_LIST_HEAD(&server->client_link); INIT_LIST_HEAD(&server->master_link); + INIT_LIST_HEAD(&server->delegations); atomic_set(&server->active, 0); diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index 521d71b81825..364e4328f392 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -40,11 +40,23 @@ static void nfs_free_delegation(struct nfs_delegation *delegation) call_rcu(&delegation->rcu, nfs_free_delegation_callback); } +/** + * nfs_mark_delegation_referenced - set delegation's REFERENCED flag + * @delegation: delegation to process + * + */ void nfs_mark_delegation_referenced(struct nfs_delegation *delegation) { set_bit(NFS_DELEGATION_REFERENCED, &delegation->flags); } +/** + * nfs_have_delegation - check if inode has a delegation + * @inode: inode to check + * @flags: delegation types to check for + * + * Returns one if inode has the indicated delegation, otherwise zero. + */ int nfs_have_delegation(struct inode *inode, fmode_t flags) { struct nfs_delegation *delegation; @@ -119,10 +131,15 @@ again: return 0; } -/* - * Set up a delegation on an inode +/** + * nfs_inode_reclaim_delegation - process a delegation reclaim request + * @inode: inode to process + * @cred: credential to use for request + * @res: new delegation state from server + * */ -void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res) +void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, + struct nfs_openres *res) { struct nfs_delegation *delegation; struct rpc_cred *oldcred = NULL; @@ -177,11 +194,11 @@ static struct inode *nfs_delegation_grab_inode(struct nfs_delegation *delegation static struct nfs_delegation * nfs_detach_delegation_locked(struct nfs_inode *nfsi, - struct nfs_client *clp) + struct nfs_server *server) { struct nfs_delegation *delegation = rcu_dereference_protected(nfsi->delegation, - lockdep_is_held(&clp->cl_lock)); + lockdep_is_held(&server->nfs_client->cl_lock)); if (delegation == NULL) goto nomatch; @@ -198,22 +215,29 @@ nomatch: } static struct nfs_delegation *nfs_detach_delegation(struct nfs_inode *nfsi, - struct nfs_client *clp) + struct nfs_server *server) { + struct nfs_client *clp = server->nfs_client; struct nfs_delegation *delegation; spin_lock(&clp->cl_lock); - delegation = nfs_detach_delegation_locked(nfsi, clp); + delegation = nfs_detach_delegation_locked(nfsi, server); spin_unlock(&clp->cl_lock); return delegation; } -/* - * Set up a delegation on an inode +/** + * nfs_inode_set_delegation - set up a delegation on an inode + * @inode: inode to which delegation applies + * @cred: cred to use for subsequent delegation processing + * @res: new delegation state from server + * + * Returns zero on success, or a negative errno value. */ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res) { - struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; + struct nfs_server *server = NFS_SERVER(inode); + struct nfs_client *clp = server->nfs_client; struct nfs_inode *nfsi = NFS_I(inode); struct nfs_delegation *delegation, *old_delegation; struct nfs_delegation *freeme = NULL; @@ -234,7 +258,7 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct spin_lock(&clp->cl_lock); old_delegation = rcu_dereference_protected(nfsi->delegation, - lockdep_is_held(&clp->cl_lock)); + lockdep_is_held(&clp->cl_lock)); if (old_delegation != NULL) { if (memcmp(&delegation->stateid, &old_delegation->stateid, sizeof(old_delegation->stateid)) == 0 && @@ -253,9 +277,9 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct delegation = NULL; goto out; } - freeme = nfs_detach_delegation_locked(nfsi, clp); + freeme = nfs_detach_delegation_locked(nfsi, server); } - list_add_rcu(&delegation->super_list, &clp->cl_delegations); + list_add_rcu(&delegation->super_list, &server->delegations); nfsi->delegation_state = delegation->type; rcu_assign_pointer(nfsi->delegation, delegation); delegation = NULL; @@ -297,67 +321,85 @@ out: return err; } -/* - * Return all delegations that have been marked for return +/** + * nfs_client_return_marked_delegations - return previously marked delegations + * @clp: nfs_client to process + * + * Returns zero on success, or a negative errno value. */ int nfs_client_return_marked_delegations(struct nfs_client *clp) { struct nfs_delegation *delegation; + struct nfs_server *server; struct inode *inode; int err = 0; restart: rcu_read_lock(); - list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) { - if (!test_and_clear_bit(NFS_DELEGATION_RETURN, &delegation->flags)) - continue; - inode = nfs_delegation_grab_inode(delegation); - if (inode == NULL) - continue; - delegation = nfs_detach_delegation(NFS_I(inode), clp); - rcu_read_unlock(); - if (delegation != NULL) { - filemap_flush(inode->i_mapping); - err = __nfs_inode_return_delegation(inode, delegation, 0); + list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { + list_for_each_entry_rcu(delegation, &server->delegations, + super_list) { + if (!test_and_clear_bit(NFS_DELEGATION_RETURN, + &delegation->flags)) + continue; + inode = nfs_delegation_grab_inode(delegation); + if (inode == NULL) + continue; + delegation = nfs_detach_delegation(NFS_I(inode), + server); + rcu_read_unlock(); + + if (delegation != NULL) { + filemap_flush(inode->i_mapping); + err = __nfs_inode_return_delegation(inode, + delegation, 0); + } + iput(inode); + if (!err) + goto restart; + set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state); + return err; } - iput(inode); - if (!err) - goto restart; - set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state); - return err; } rcu_read_unlock(); return 0; } -/* - * This function returns the delegation without reclaiming opens - * or protecting against delegation reclaims. - * It is therefore really only safe to be called from - * nfs4_clear_inode() +/** + * nfs_inode_return_delegation_noreclaim - return delegation, don't reclaim opens + * @inode: inode to process + * + * Does not protect against delegation reclaims, therefore really only safe + * to be called from nfs4_clear_inode(). */ void nfs_inode_return_delegation_noreclaim(struct inode *inode) { - struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; + struct nfs_server *server = NFS_SERVER(inode); struct nfs_inode *nfsi = NFS_I(inode); struct nfs_delegation *delegation; if (rcu_access_pointer(nfsi->delegation) != NULL) { - delegation = nfs_detach_delegation(nfsi, clp); + delegation = nfs_detach_delegation(nfsi, server); if (delegation != NULL) nfs_do_return_delegation(inode, delegation, 0); } } +/** + * nfs_inode_return_delegation - synchronously return a delegation + * @inode: inode to process + * + * Returns zero on success, or a negative errno value. + */ int nfs_inode_return_delegation(struct inode *inode) { - struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; + struct nfs_server *server = NFS_SERVER(inode); struct nfs_inode *nfsi = NFS_I(inode); struct nfs_delegation *delegation; int err = 0; if (rcu_access_pointer(nfsi->delegation) != NULL) { - delegation = nfs_detach_delegation(nfsi, clp); + delegation = nfs_detach_delegation(nfsi, server); if (delegation != NULL) { nfs_wb_all(inode); err = __nfs_inode_return_delegation(inode, delegation, 1); @@ -366,46 +408,61 @@ int nfs_inode_return_delegation(struct inode *inode) return err; } -static void nfs_mark_return_delegation(struct nfs_client *clp, struct nfs_delegation *delegation) +static void nfs_mark_return_delegation(struct nfs_delegation *delegation) { + struct nfs_client *clp = NFS_SERVER(delegation->inode)->nfs_client; + set_bit(NFS_DELEGATION_RETURN, &delegation->flags); set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state); } -/* - * Return all delegations associated to a super block +/** + * nfs_super_return_all_delegations - return delegations for one superblock + * @sb: sb to process + * */ void nfs_super_return_all_delegations(struct super_block *sb) { - struct nfs_client *clp = NFS_SB(sb)->nfs_client; + struct nfs_server *server = NFS_SB(sb); + struct nfs_client *clp = server->nfs_client; struct nfs_delegation *delegation; if (clp == NULL) return; + rcu_read_lock(); - list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) { + list_for_each_entry_rcu(delegation, &server->delegations, super_list) { spin_lock(&delegation->lock); - if (delegation->inode != NULL && delegation->inode->i_sb == sb) - set_bit(NFS_DELEGATION_RETURN, &delegation->flags); + set_bit(NFS_DELEGATION_RETURN, &delegation->flags); spin_unlock(&delegation->lock); } rcu_read_unlock(); + if (nfs_client_return_marked_delegations(clp) != 0) nfs4_schedule_state_manager(clp); } -static -void nfs_client_mark_return_all_delegation_types(struct nfs_client *clp, fmode_t flags) +static void nfs_mark_return_all_delegation_types(struct nfs_server *server, + fmode_t flags) { struct nfs_delegation *delegation; - rcu_read_lock(); - list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) { + list_for_each_entry_rcu(delegation, &server->delegations, super_list) { if ((delegation->type == (FMODE_READ|FMODE_WRITE)) && !(flags & FMODE_WRITE)) continue; if (delegation->type & flags) - nfs_mark_return_delegation(clp, delegation); + nfs_mark_return_delegation(delegation); } +} + +static void nfs_client_mark_return_all_delegation_types(struct nfs_client *clp, + fmode_t flags) +{ + struct nfs_server *server; + + rcu_read_lock(); + list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) + nfs_mark_return_all_delegation_types(server, flags); rcu_read_unlock(); } @@ -420,19 +477,32 @@ static void nfs_delegation_run_state_manager(struct nfs_client *clp) nfs4_schedule_state_manager(clp); } +/** + * nfs_expire_all_delegation_types + * @clp: client to process + * @flags: delegation types to expire + * + */ void nfs_expire_all_delegation_types(struct nfs_client *clp, fmode_t flags) { nfs_client_mark_return_all_delegation_types(clp, flags); nfs_delegation_run_state_manager(clp); } +/** + * nfs_expire_all_delegations + * @clp: client to process + * + */ void nfs_expire_all_delegations(struct nfs_client *clp) { nfs_expire_all_delegation_types(clp, FMODE_READ|FMODE_WRITE); } -/* - * Return all delegations following an NFS4ERR_CB_PATH_DOWN error. +/** + * nfs_handle_cb_pathdown - return all delegations after NFS4ERR_CB_PATH_DOWN + * @clp: client to process + * */ void nfs_handle_cb_pathdown(struct nfs_client *clp) { @@ -441,29 +511,43 @@ void nfs_handle_cb_pathdown(struct nfs_client *clp) nfs_client_mark_return_all_delegations(clp); } -static void nfs_client_mark_return_unreferenced_delegations(struct nfs_client *clp) +static void nfs_mark_return_unreferenced_delegations(struct nfs_server *server) { struct nfs_delegation *delegation; - rcu_read_lock(); - list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) { + list_for_each_entry_rcu(delegation, &server->delegations, super_list) { if (test_and_clear_bit(NFS_DELEGATION_REFERENCED, &delegation->flags)) continue; - nfs_mark_return_delegation(clp, delegation); + nfs_mark_return_delegation(delegation); } - rcu_read_unlock(); } +/** + * nfs_expire_unreferenced_delegations - Eliminate unused delegations + * @clp: nfs_client to process + * + */ void nfs_expire_unreferenced_delegations(struct nfs_client *clp) { - nfs_client_mark_return_unreferenced_delegations(clp); + struct nfs_server *server; + + rcu_read_lock(); + list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) + nfs_mark_return_unreferenced_delegations(server); + rcu_read_unlock(); + nfs_delegation_run_state_manager(clp); } -/* - * Asynchronous delegation recall! +/** + * nfs_async_inode_return_delegation - asynchronously return a delegation + * @inode: inode to process + * @stateid: state ID information from CB_RECALL arguments + * + * Returns zero on success, or a negative errno value. */ -int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid) +int nfs_async_inode_return_delegation(struct inode *inode, + const nfs4_stateid *stateid) { struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; struct nfs_delegation *delegation; @@ -475,22 +559,21 @@ int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *s rcu_read_unlock(); return -ENOENT; } - - nfs_mark_return_delegation(clp, delegation); + nfs_mark_return_delegation(delegation); rcu_read_unlock(); + nfs_delegation_run_state_manager(clp); return 0; } -/* - * Retrieve the inode associated with a delegation - */ -struct inode *nfs_delegation_find_inode(struct nfs_client *clp, const struct nfs_fh *fhandle) +static struct inode * +nfs_delegation_find_inode_server(struct nfs_server *server, + const struct nfs_fh *fhandle) { struct nfs_delegation *delegation; struct inode *res = NULL; - rcu_read_lock(); - list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) { + + list_for_each_entry_rcu(delegation, &server->delegations, super_list) { spin_lock(&delegation->lock); if (delegation->inode != NULL && nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) { @@ -500,47 +583,121 @@ struct inode *nfs_delegation_find_inode(struct nfs_client *clp, const struct nfs if (res != NULL) break; } + return res; +} + +/** + * nfs_delegation_find_inode - retrieve the inode associated with a delegation + * @clp: client state handle + * @fhandle: filehandle from a delegation recall + * + * Returns pointer to inode matching "fhandle," or NULL if a matching inode + * cannot be found. + */ +struct inode *nfs_delegation_find_inode(struct nfs_client *clp, + const struct nfs_fh *fhandle) +{ + struct nfs_server *server; + struct inode *res = NULL; + + rcu_read_lock(); + list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { + res = nfs_delegation_find_inode_server(server, fhandle); + if (res != NULL) + break; + } rcu_read_unlock(); return res; } -/* - * Mark all delegations as needing to be reclaimed +static void nfs_delegation_mark_reclaim_server(struct nfs_server *server) +{ + struct nfs_delegation *delegation; + + list_for_each_entry_rcu(delegation, &server->delegations, super_list) + set_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags); +} + +/** + * nfs_delegation_mark_reclaim - mark all delegations as needing to be reclaimed + * @clp: nfs_client to process + * */ void nfs_delegation_mark_reclaim(struct nfs_client *clp) { - struct nfs_delegation *delegation; + struct nfs_server *server; + rcu_read_lock(); - list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) - set_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags); + list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) + nfs_delegation_mark_reclaim_server(server); rcu_read_unlock(); } -/* - * Reap all unclaimed delegations after reboot recovery is done +/** + * nfs_delegation_reap_unclaimed - reap unclaimed delegations after reboot recovery is done + * @clp: nfs_client to process + * */ void nfs_delegation_reap_unclaimed(struct nfs_client *clp) { struct nfs_delegation *delegation; + struct nfs_server *server; struct inode *inode; + restart: rcu_read_lock(); - list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) { - if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) == 0) - continue; - inode = nfs_delegation_grab_inode(delegation); - if (inode == NULL) - continue; - delegation = nfs_detach_delegation(NFS_I(inode), clp); - rcu_read_unlock(); - if (delegation != NULL) - nfs_free_delegation(delegation); - iput(inode); - goto restart; + list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { + list_for_each_entry_rcu(delegation, &server->delegations, + super_list) { + if (test_bit(NFS_DELEGATION_NEED_RECLAIM, + &delegation->flags) == 0) + continue; + inode = nfs_delegation_grab_inode(delegation); + if (inode == NULL) + continue; + delegation = nfs_detach_delegation(NFS_I(inode), + server); + rcu_read_unlock(); + + if (delegation != NULL) + nfs_free_delegation(delegation); + iput(inode); + goto restart; + } } rcu_read_unlock(); } +/** + * nfs_delegations_present - check for existence of delegations + * @clp: client state handle + * + * Returns one if there are any nfs_delegation structures attached + * to this nfs_client. + */ +int nfs_delegations_present(struct nfs_client *clp) +{ + struct nfs_server *server; + int ret = 0; + + rcu_read_lock(); + list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) + if (!list_empty(&server->delegations)) { + ret = 1; + break; + } + rcu_read_unlock(); + return ret; +} + +/** + * nfs4_copy_delegation_stateid - Copy inode's state ID information + * @dst: stateid data structure to fill in + * @inode: inode to check + * + * Returns one and fills in "dst->data" * if inode had a delegation, + * otherwise zero is returned. + */ int nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode) { struct nfs_inode *nfsi = NFS_I(inode); diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h index 2026304bda19..d9322e490c56 100644 --- a/fs/nfs/delegation.h +++ b/fs/nfs/delegation.h @@ -44,6 +44,7 @@ void nfs_expire_all_delegation_types(struct nfs_client *clp, fmode_t flags); void nfs_expire_unreferenced_delegations(struct nfs_client *clp); void nfs_handle_cb_pathdown(struct nfs_client *clp); int nfs_client_return_marked_delegations(struct nfs_client *clp); +int nfs_delegations_present(struct nfs_client *clp); void nfs_delegation_mark_reclaim(struct nfs_client *clp); void nfs_delegation_reap_unclaimed(struct nfs_client *clp); diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c index cde5650ee5a2..402143d75fc5 100644 --- a/fs/nfs/nfs4renewd.c +++ b/fs/nfs/nfs4renewd.c @@ -80,7 +80,7 @@ nfs4_renew_state(struct work_struct *work) cred = ops->get_state_renewal_cred_locked(clp); spin_unlock(&clp->cl_lock); if (cred == NULL) { - if (list_empty(&clp->cl_delegations)) { + if (!nfs_delegations_present(clp)) { set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); goto out; } -- cgit v1.2.2 From d035c36c58dd9183ad6aa7875dea89893faedb55 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 21 Dec 2010 10:45:27 -0500 Subject: NFSv4: Ensure continued open and lockowner name uniqueness In order to enable migration support, we will want to move some of the structures that are subject to migration into the struct nfs_server. In particular, if we are to move the state_owner and state_owner_id to being a per-filesystem structure, then we should label the resulting open/lock owners with a per-filesytem label to ensure global uniqueness. This patch does so by adding the super block s_dev to the open/lock owner name. Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 3 +++ fs/nfs/nfs4xdr.c | 14 ++++++++------ 2 files changed, 11 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 88f590feeb72..f2b92f6a7efb 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -3779,6 +3779,7 @@ static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock goto out; lsp = request->fl_u.nfs4_fl.owner; arg.lock_owner.id = lsp->ls_id.id; + arg.lock_owner.s_dev = server->s_dev; status = nfs4_call_sync(server, &msg, &arg, &res, 1); switch (status) { case 0: @@ -4024,6 +4025,7 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl, p->arg.lock_stateid = &lsp->ls_stateid; p->arg.lock_owner.clientid = server->nfs_client->cl_clientid; p->arg.lock_owner.id = lsp->ls_id.id; + p->arg.lock_owner.s_dev = server->s_dev; p->res.lock_seqid = p->arg.lock_seqid; p->lsp = lsp; p->server = server; @@ -4428,6 +4430,7 @@ void nfs4_release_lockowner(const struct nfs4_lock_state *lsp) return; args->lock_owner.clientid = server->nfs_client->cl_clientid; args->lock_owner.id = lsp->ls_id.id; + args->lock_owner.s_dev = server->s_dev; msg.rpc_argp = args; rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, args); } diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 3cbdd0c80a2d..8e496887ec61 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -71,8 +71,8 @@ static int nfs4_stat_to_errno(int); /* lock,open owner id: * we currently use size 2 (u64) out of (NFS4_OPAQUE_LIMIT >> 2) */ -#define open_owner_id_maxsz (1 + 4) -#define lock_owner_id_maxsz (1 + 4) +#define open_owner_id_maxsz (1 + 1 + 4) +#define lock_owner_id_maxsz (1 + 1 + 4) #define decode_lockowner_maxsz (1 + XDR_QUADLEN(IDMAP_NAMESZ)) #define compound_encode_hdr_maxsz (3 + (NFS4_MAXTAGLEN >> 2)) #define compound_decode_hdr_maxsz (3 + (NFS4_MAXTAGLEN >> 2)) @@ -1088,10 +1088,11 @@ static void encode_lockowner(struct xdr_stream *xdr, const struct nfs_lowner *lo { __be32 *p; - p = reserve_space(xdr, 28); + p = reserve_space(xdr, 32); p = xdr_encode_hyper(p, lowner->clientid); - *p++ = cpu_to_be32(16); + *p++ = cpu_to_be32(20); p = xdr_encode_opaque_fixed(p, "lock id:", 8); + *p++ = cpu_to_be32(lowner->s_dev); xdr_encode_hyper(p, lowner->id); } @@ -1210,10 +1211,11 @@ static inline void encode_openhdr(struct xdr_stream *xdr, const struct nfs_opena *p++ = cpu_to_be32(OP_OPEN); *p = cpu_to_be32(arg->seqid->sequence->counter); encode_share_access(xdr, arg->fmode); - p = reserve_space(xdr, 28); + p = reserve_space(xdr, 32); p = xdr_encode_hyper(p, arg->clientid); - *p++ = cpu_to_be32(16); + *p++ = cpu_to_be32(20); p = xdr_encode_opaque_fixed(p, "open id:", 8); + *p++ = cpu_to_be32(arg->server->s_dev); xdr_encode_hyper(p, arg->id); } -- cgit v1.2.2 From 6c23a9681c0fe7fb7dd331b39dda11926f43746e Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 7 Jan 2011 08:43:37 +0100 Subject: block: add internal hd part table references We can't use krefs since it's apparently restricted to very basic reference counting. This reverts commit e4a683c8. Signed-off-by: Jens Axboe --- fs/partitions/check.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/partitions/check.c b/fs/partitions/check.c index 48209f58522b..011520df71ae 100644 --- a/fs/partitions/check.c +++ b/fs/partitions/check.c @@ -381,10 +381,8 @@ static void delete_partition_rcu_cb(struct rcu_head *head) put_device(part_to_dev(part)); } -void __delete_partition(struct kref *ref) +void __delete_partition(struct hd_struct *part) { - struct hd_struct *part = container_of(ref, struct hd_struct, ref); - call_rcu(&part->rcu_head, delete_partition_rcu_cb); } @@ -406,7 +404,7 @@ void delete_partition(struct gendisk *disk, int partno) kobject_put(part->holder_dir); device_del(part_to_dev(part)); - kref_put(&part->ref, __delete_partition); + hd_struct_put(part); } static ssize_t whole_disk_show(struct device *dev, @@ -505,7 +503,7 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno, if (!dev_get_uevent_suppress(ddev)) kobject_uevent(&pdev->kobj, KOBJ_ADD); - kref_init(&p->ref); + hd_ref_init(p); return p; out_free_info: -- cgit v1.2.2 From e70d84501b630e390b4242fb4ac629b694c11484 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Mon, 27 Dec 2010 11:55:08 -0800 Subject: ocfs2: fix build for OCFS2_FS_STATS not enabled When CONFIG_OCFS2_FS_STATS is not enabled: fs/ocfs2/cluster/tcp.c:1254: error: implicit declaration of function 'o2net_update_recv_stats' Signed-off-by: Randy Dunlap Cc: Mark Fasheh Cc: Joel Becker Cc: ocfs2-devel@oss.oracle.com Signed-off-by: Joel Becker --- fs/ocfs2/cluster/tcp.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs') diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c index bc2309554d0b..3b11cb1e38fc 100644 --- a/fs/ocfs2/cluster/tcp.c +++ b/fs/ocfs2/cluster/tcp.c @@ -257,6 +257,8 @@ static void o2net_update_recv_stats(struct o2net_sock_container *sc) # define o2net_update_send_stats(a, b) +# define o2net_update_recv_stats(sc) + #endif /* CONFIG_OCFS2_FS_STATS */ static inline int o2net_reconnect_delay(void) -- cgit v1.2.2 From cc548166b2aadba7a566fb0a42884a9d2ff684b0 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 3 Jan 2011 09:00:20 +0300 Subject: ocfs2/cluster: dereferencing before checking in nst_seq_show() In the original code, we dereferenced "nst" before checking that it was non-NULL. I moved the check forward and pulled the code in an indent level. Signed-off-by: Dan Carpenter Signed-off-by: Joel Becker --- fs/ocfs2/cluster/netdebug.c | 47 +++++++++++++++++++++++---------------------- 1 file changed, 24 insertions(+), 23 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/cluster/netdebug.c b/fs/ocfs2/cluster/netdebug.c index 61df89cedded..3a5835904b3d 100644 --- a/fs/ocfs2/cluster/netdebug.c +++ b/fs/ocfs2/cluster/netdebug.c @@ -133,36 +133,37 @@ static int nst_seq_show(struct seq_file *seq, void *v) spin_lock(&o2net_debug_lock); nst = next_nst(dummy_nst); + if (!nst) + goto out; now = ktime_get(); sock = ktime_to_us(ktime_sub(now, nst->st_sock_time)); send = ktime_to_us(ktime_sub(now, nst->st_send_time)); status = ktime_to_us(ktime_sub(now, nst->st_status_time)); - if (nst != NULL) { - /* get_task_comm isn't exported. oh well. */ - seq_printf(seq, "%p:\n" - " pid: %lu\n" - " tgid: %lu\n" - " process name: %s\n" - " node: %u\n" - " sc: %p\n" - " message id: %d\n" - " message type: %u\n" - " message key: 0x%08x\n" - " sock acquiry: %lld usecs ago\n" - " send start: %lld usecs ago\n" - " wait start: %lld usecs ago\n", - nst, (unsigned long)task_pid_nr(nst->st_task), - (unsigned long)nst->st_task->tgid, - nst->st_task->comm, nst->st_node, - nst->st_sc, nst->st_id, nst->st_msg_type, - nst->st_msg_key, - (long long)sock, - (long long)send, - (long long)status); - } + /* get_task_comm isn't exported. oh well. */ + seq_printf(seq, "%p:\n" + " pid: %lu\n" + " tgid: %lu\n" + " process name: %s\n" + " node: %u\n" + " sc: %p\n" + " message id: %d\n" + " message type: %u\n" + " message key: 0x%08x\n" + " sock acquiry: %lld usecs ago\n" + " send start: %lld usecs ago\n" + " wait start: %lld usecs ago\n", + nst, (unsigned long)task_pid_nr(nst->st_task), + (unsigned long)nst->st_task->tgid, + nst->st_task->comm, nst->st_node, + nst->st_sc, nst->st_id, nst->st_msg_type, + nst->st_msg_key, + (long long)sock, + (long long)send, + (long long)status); +out: spin_unlock(&o2net_debug_lock); return 0; -- cgit v1.2.2 From aecf58661961a553c254cf14536f70349127affb Mon Sep 17 00:00:00 2001 From: Tao Ma Date: Thu, 23 Dec 2010 15:30:44 +0800 Subject: ocfs2: Remove unused truncate function from alloc.c Tristan Ye has done some refactoring against our truncate process, so some functions like ocfs2_prepare_truncate and ocfs2_free_truncate_context are no use and we'd better remove them. Signed-off-by: Tao Ma Signed-off-by: Joel Becker --- fs/ocfs2/alloc.c | 74 -------------------------------------------------------- fs/ocfs2/alloc.h | 4 --- 2 files changed, 78 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index 8ec418dd9e36..e4984e259cb6 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -565,7 +565,6 @@ static inline int ocfs2_et_sanity_check(struct ocfs2_extent_tree *et) return ret; } -static void ocfs2_free_truncate_context(struct ocfs2_truncate_context *tc); static int ocfs2_cache_extent_block_free(struct ocfs2_cached_dealloc_ctxt *ctxt, struct ocfs2_extent_block *eb); static void ocfs2_adjust_rightmost_records(handle_t *handle, @@ -7141,64 +7140,6 @@ bail: return status; } -/* - * Expects the inode to already be locked. - */ -int ocfs2_prepare_truncate(struct ocfs2_super *osb, - struct inode *inode, - struct buffer_head *fe_bh, - struct ocfs2_truncate_context **tc) -{ - int status; - unsigned int new_i_clusters; - struct ocfs2_dinode *fe; - struct ocfs2_extent_block *eb; - struct buffer_head *last_eb_bh = NULL; - - mlog_entry_void(); - - *tc = NULL; - - new_i_clusters = ocfs2_clusters_for_bytes(osb->sb, - i_size_read(inode)); - fe = (struct ocfs2_dinode *) fe_bh->b_data; - - mlog(0, "fe->i_clusters = %u, new_i_clusters = %u, fe->i_size =" - "%llu\n", le32_to_cpu(fe->i_clusters), new_i_clusters, - (unsigned long long)le64_to_cpu(fe->i_size)); - - *tc = kzalloc(sizeof(struct ocfs2_truncate_context), GFP_KERNEL); - if (!(*tc)) { - status = -ENOMEM; - mlog_errno(status); - goto bail; - } - ocfs2_init_dealloc_ctxt(&(*tc)->tc_dealloc); - - if (fe->id2.i_list.l_tree_depth) { - status = ocfs2_read_extent_block(INODE_CACHE(inode), - le64_to_cpu(fe->i_last_eb_blk), - &last_eb_bh); - if (status < 0) { - mlog_errno(status); - goto bail; - } - eb = (struct ocfs2_extent_block *) last_eb_bh->b_data; - } - - (*tc)->tc_last_eb_bh = last_eb_bh; - - status = 0; -bail: - if (status < 0) { - if (*tc) - ocfs2_free_truncate_context(*tc); - *tc = NULL; - } - mlog_exit_void(); - return status; -} - /* * 'start' is inclusive, 'end' is not. */ @@ -7273,18 +7214,3 @@ out_commit: out: return ret; } - -static void ocfs2_free_truncate_context(struct ocfs2_truncate_context *tc) -{ - /* - * The caller is responsible for completing deallocation - * before freeing the context. - */ - if (tc->tc_dealloc.c_first_suballocator != NULL) - mlog(ML_NOTICE, - "Truncate completion has non-empty dealloc context\n"); - - brelse(tc->tc_last_eb_bh); - - kfree(tc); -} diff --git a/fs/ocfs2/alloc.h b/fs/ocfs2/alloc.h index 55762b554b99..3bd08a03251c 100644 --- a/fs/ocfs2/alloc.h +++ b/fs/ocfs2/alloc.h @@ -228,10 +228,6 @@ struct ocfs2_truncate_context { int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle, u64 range_start, u64 range_end); -int ocfs2_prepare_truncate(struct ocfs2_super *osb, - struct inode *inode, - struct buffer_head *fe_bh, - struct ocfs2_truncate_context **tc); int ocfs2_commit_truncate(struct ocfs2_super *osb, struct inode *inode, struct buffer_head *di_bh); -- cgit v1.2.2 From 197a1eeb7f89c1d2ba93713398e9655899aa973e Mon Sep 17 00:00:00 2001 From: Steve French Date: Sun, 9 Jan 2011 23:26:56 +0000 Subject: [CIFS] Fix minor merge conflict in fs/cifs/dir.c Signed-off-by: Steve French --- fs/cifs/dir.c | 7 ------- 1 file changed, 7 deletions(-) (limited to 'fs') diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index ce8014345258..db2a58c00f7b 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -724,17 +724,10 @@ static int cifs_ci_compare(const struct dentry *parent, const struct dentry *dentry, const struct inode *inode, unsigned int len, const char *str, const struct qstr *name) { -<<<<<<< HEAD - struct nls_table *codepage = CIFS_SB(dentry->d_inode->i_sb)->local_nls; - - if ((a->len == b->len) && - (nls_strnicmp(codepage, a->name, b->name, a->len) == 0)) -======= struct nls_table *codepage = CIFS_SB(pinode->i_sb)->local_nls; if ((name->len == len) && (nls_strnicmp(codepage, name->name, str, len) == 0)) ->>>>>>> 0c21e3aaf6ae85bee804a325aa29c325209180fd return 0; return 1; } -- cgit v1.2.2 From a0f8b4fb4cab4bc32caaf34fc0a0c9d5dd369186 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Fri, 7 Jan 2011 11:30:28 -0500 Subject: cifs: remove unnecessary locking around sequence_number The server->sequence_number is already protected by the srv_mutex. The GlobalMid_lock is unneeded here. Reviewed-by: Shirish Pargaonkar Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/cifsencrypt.c | 6 ++---- fs/cifs/cifsglob.h | 2 +- 2 files changed, 3 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c index f856732161ab..66f3d50d0676 100644 --- a/fs/cifs/cifsencrypt.c +++ b/fs/cifs/cifsencrypt.c @@ -72,6 +72,7 @@ static int cifs_calculate_signature(const struct smb_hdr *cifs_pdu, return 0; } +/* must be called with server->srv_mutex held */ int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server, __u32 *pexpected_response_sequence_number) { @@ -84,14 +85,12 @@ int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server, if ((cifs_pdu->Flags2 & SMBFLG2_SECURITY_SIGNATURE) == 0) return rc; - spin_lock(&GlobalMid_Lock); cifs_pdu->Signature.Sequence.SequenceNumber = cpu_to_le32(server->sequence_number); cifs_pdu->Signature.Sequence.Reserved = 0; *pexpected_response_sequence_number = server->sequence_number++; server->sequence_number++; - spin_unlock(&GlobalMid_Lock); rc = cifs_calculate_signature(cifs_pdu, server, smb_signature); if (rc) @@ -149,6 +148,7 @@ static int cifs_calc_signature2(const struct kvec *iov, int n_vec, return rc; } +/* must be called with server->srv_mutex held */ int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server, __u32 *pexpected_response_sequence_number) { @@ -162,14 +162,12 @@ int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server, if ((cifs_pdu->Flags2 & SMBFLG2_SECURITY_SIGNATURE) == 0) return rc; - spin_lock(&GlobalMid_Lock); cifs_pdu->Signature.Sequence.SequenceNumber = cpu_to_le32(server->sequence_number); cifs_pdu->Signature.Sequence.Reserved = 0; *pexpected_response_sequence_number = server->sequence_number++; server->sequence_number++; - spin_unlock(&GlobalMid_Lock); rc = cifs_calc_signature2(iov, n_vec, server, smb_signature); if (rc) diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index dfd2d46275ab..e6590e69fb0e 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -207,7 +207,7 @@ struct TCP_Server_Info { char cryptkey[CIFS_CRYPTO_KEY_SIZE]; /* used by ntlm, ntlmv2 etc */ /* 16th byte of RFC1001 workstation name is always null */ char workstation_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL]; - __u32 sequence_number; /* needed for CIFS PDU signature */ + __u32 sequence_number; /* for signing, protected by srv_mutex */ struct session_key session_key; unsigned long lstrp; /* when we got last response from this server */ u16 dialect; /* dialect index that server chose */ -- cgit v1.2.2 From 1397f2ee4be65542fdc3460c7e8b6317779ea680 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Fri, 7 Jan 2011 11:30:28 -0500 Subject: cifs: replace some hardcoded values with preprocessor constants A number of places that deal with RFC1001/1002 negotiations have bare "15" or "16" values. Replace them with RFC_1001_NAME_LEN and RFC_1001_NAME_LEN_WITH_NULL. The patch also cleans up some checkpatch warnings for code surrounding the changes. This should apply cleanly on top of the patch to remove Local_System_Name. Reported-and-Reviwed-by: Shirish Pargaonkar Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/connect.c | 37 +++++++++++++++++++------------------ 1 file changed, 19 insertions(+), 18 deletions(-) (limited to 'fs') diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 41f002fb4a04..5e7a7bcc39a6 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -64,8 +64,8 @@ struct smb_vol { char *UNC; char *UNCip; char *iocharset; /* local code page for mapping to and from Unicode */ - char source_rfc1001_name[16]; /* netbios name of client */ - char target_rfc1001_name[16]; /* netbios name of server for Win9x/ME */ + char source_rfc1001_name[RFC1001_NAME_LEN_WITH_NULL]; /* clnt nb name */ + char target_rfc1001_name[RFC1001_NAME_LEN_WITH_NULL]; /* srvr nb name */ uid_t cred_uid; uid_t linux_uid; gid_t linux_gid; @@ -816,11 +816,11 @@ cifs_parse_mount_options(char *options, const char *devname, * informational, only used for servers that do not support * port 445 and it can be overridden at mount time */ - memset(vol->source_rfc1001_name, 0x20, 15); - for (i = 0; i < strnlen(nodename, 15); i++) + memset(vol->source_rfc1001_name, 0x20, RFC1001_NAME_LEN); + for (i = 0; i < strnlen(nodename, RFC1001_NAME_LEN); i++) vol->source_rfc1001_name[i] = toupper(nodename[i]); - vol->source_rfc1001_name[15] = 0; + vol->source_rfc1001_name[RFC1001_NAME_LEN] = 0; /* null target name indicates to use *SMBSERVR default called name if we end up sending RFC1001 session initialize */ vol->target_rfc1001_name[0] = 0; @@ -1167,22 +1167,22 @@ cifs_parse_mount_options(char *options, const char *devname, if (!value || !*value || (*value == ' ')) { cFYI(1, "invalid (empty) netbiosname"); } else { - memset(vol->source_rfc1001_name, 0x20, 15); - for (i = 0; i < 15; i++) { - /* BB are there cases in which a comma can be - valid in this workstation netbios name (and need - special handling)? */ - - /* We do not uppercase netbiosname for user */ + memset(vol->source_rfc1001_name, 0x20, + RFC1001_NAME_LEN); + /* + * FIXME: are there cases in which a comma can + * be valid in workstation netbios name (and + * need special handling)? + */ + for (i = 0; i < RFC1001_NAME_LEN; i++) { + /* don't ucase netbiosname for user */ if (value[i] == 0) break; - else - vol->source_rfc1001_name[i] = - value[i]; + vol->source_rfc1001_name[i] = value[i]; } /* The string has 16th byte zero still from set at top of the function */ - if ((i == 15) && (value[i] != 0)) + if (i == RFC1001_NAME_LEN && value[i] != 0) printk(KERN_WARNING "CIFS: netbiosname" " longer than 15 truncated.\n"); } @@ -1192,7 +1192,8 @@ cifs_parse_mount_options(char *options, const char *devname, cFYI(1, "empty server netbiosname specified"); } else { /* last byte, type, is 0x20 for servr type */ - memset(vol->target_rfc1001_name, 0x20, 16); + memset(vol->target_rfc1001_name, 0x20, + RFC1001_NAME_LEN_WITH_NULL); for (i = 0; i < 15; i++) { /* BB are there cases in which a comma can be @@ -1209,7 +1210,7 @@ cifs_parse_mount_options(char *options, const char *devname, } /* The string has 16th byte zero still from set at top of the function */ - if ((i == 15) && (value[i] != 0)) + if (i == RFC1001_NAME_LEN && value[i] != 0) printk(KERN_WARNING "CIFS: server net" "biosname longer than 15 truncated.\n"); } -- cgit v1.2.2 From b4d6fcf13f417464c13c6fde46e87c495ba6b6ee Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Fri, 7 Jan 2011 11:30:28 -0500 Subject: cifs: move "ntlmssp" and "local_leases" options out of experimental code I see no real need to leave these sorts of options under an EXPERIMENTAL ifdef. Since you need a mount option to turn this code on, that only blows out the testing matrix. local_leases has been under the EXPERIMENTAL tag for some time, but it's only the mount option that's under this label. Move it out from under this tag. The NTLMSSP code is also under EXPERIMENTAL, but it needs a mount option to turn it on, and in the future any distro will reasonably want this enabled. Go ahead and move it out from under the EXPERIMENTAL tag. Signed-off-by: Jeff Layton Acked-by: Suresh Jayaraman Signed-off-by: Steve French --- fs/cifs/cifssmb.c | 5 +-- fs/cifs/connect.c | 4 -- fs/cifs/sess.c | 114 ++++++++++++++++++++++++++---------------------------- 3 files changed, 55 insertions(+), 68 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 67acfb3acad2..2f6795e524d3 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -401,15 +401,12 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses) else if ((secFlags & CIFSSEC_AUTH_MASK) == CIFSSEC_MAY_KRB5) { cFYI(1, "Kerberos only mechanism, enable extended security"); pSMB->hdr.Flags2 |= SMBFLG2_EXT_SEC; - } -#ifdef CONFIG_CIFS_EXPERIMENTAL - else if ((secFlags & CIFSSEC_MUST_NTLMSSP) == CIFSSEC_MUST_NTLMSSP) + } else if ((secFlags & CIFSSEC_MUST_NTLMSSP) == CIFSSEC_MUST_NTLMSSP) pSMB->hdr.Flags2 |= SMBFLG2_EXT_SEC; else if ((secFlags & CIFSSEC_AUTH_MASK) == CIFSSEC_MAY_NTLMSSP) { cFYI(1, "NTLMSSP only mechanism, enable extended security"); pSMB->hdr.Flags2 |= SMBFLG2_EXT_SEC; } -#endif count = 0; for (i = 0; i < CIFS_NUM_PROT; i++) { diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 5e7a7bcc39a6..a65d311d163a 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -984,13 +984,11 @@ cifs_parse_mount_options(char *options, const char *devname, return 1; } else if (strnicmp(value, "krb5", 4) == 0) { vol->secFlg |= CIFSSEC_MAY_KRB5; -#ifdef CONFIG_CIFS_EXPERIMENTAL } else if (strnicmp(value, "ntlmsspi", 8) == 0) { vol->secFlg |= CIFSSEC_MAY_NTLMSSP | CIFSSEC_MUST_SIGN; } else if (strnicmp(value, "ntlmssp", 7) == 0) { vol->secFlg |= CIFSSEC_MAY_NTLMSSP; -#endif } else if (strnicmp(value, "ntlmv2i", 7) == 0) { vol->secFlg |= CIFSSEC_MAY_NTLMV2 | CIFSSEC_MUST_SIGN; @@ -1341,10 +1339,8 @@ cifs_parse_mount_options(char *options, const char *devname, vol->no_psx_acl = 0; } else if (strnicmp(data, "noacl", 5) == 0) { vol->no_psx_acl = 1; -#ifdef CONFIG_CIFS_EXPERIMENTAL } else if (strnicmp(data, "locallease", 6) == 0) { vol->local_lease = 1; -#endif } else if (strnicmp(data, "sign", 4) == 0) { vol->secFlg |= CIFSSEC_MUST_SIGN; } else if (strnicmp(data, "seal", 4) == 0) { diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index 54d9f76deff9..eb746486e49e 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c @@ -420,7 +420,6 @@ static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len, return 0; } -#ifdef CONFIG_CIFS_EXPERIMENTAL /* BB Move to ntlmssp.c eventually */ /* We do not malloc the blob, it is passed in pbuffer, because @@ -564,7 +563,6 @@ setup_ntlmv2_ret: *buflen = tmp - pbuffer; return rc; } -#endif int CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, @@ -806,74 +804,70 @@ ssetup_ntlmssp_authenticate: rc = -ENOSYS; goto ssetup_exit; #endif /* CONFIG_CIFS_UPCALL */ - } else { -#ifdef CONFIG_CIFS_EXPERIMENTAL - if (type == RawNTLMSSP) { - if ((pSMB->req.hdr.Flags2 & SMBFLG2_UNICODE) == 0) { - cERROR(1, "NTLMSSP requires Unicode support"); - rc = -ENOSYS; + } else if (type == RawNTLMSSP) { + if ((pSMB->req.hdr.Flags2 & SMBFLG2_UNICODE) == 0) { + cERROR(1, "NTLMSSP requires Unicode support"); + rc = -ENOSYS; + goto ssetup_exit; + } + + cFYI(1, "ntlmssp session setup phase %d", phase); + pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC; + capabilities |= CAP_EXTENDED_SECURITY; + pSMB->req.Capabilities |= cpu_to_le32(capabilities); + switch(phase) { + case NtLmNegotiate: + build_ntlmssp_negotiate_blob( + pSMB->req.SecurityBlob, ses); + iov[1].iov_len = sizeof(NEGOTIATE_MESSAGE); + iov[1].iov_base = pSMB->req.SecurityBlob; + pSMB->req.SecurityBlobLength = + cpu_to_le16(sizeof(NEGOTIATE_MESSAGE)); + break; + case NtLmAuthenticate: + /* + * 5 is an empirical value, large enough to hold + * authenticate message plus max 10 of av paris, + * domain, user, workstation names, flags, etc. + */ + ntlmsspblob = kzalloc( + 5*sizeof(struct _AUTHENTICATE_MESSAGE), + GFP_KERNEL); + if (!ntlmsspblob) { + cERROR(1, "Can't allocate NTLMSSP blob"); + rc = -ENOMEM; goto ssetup_exit; } - cFYI(1, "ntlmssp session setup phase %d", phase); - pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC; - capabilities |= CAP_EXTENDED_SECURITY; - pSMB->req.Capabilities |= cpu_to_le32(capabilities); - if (phase == NtLmNegotiate) { - build_ntlmssp_negotiate_blob( - pSMB->req.SecurityBlob, ses); - iov[1].iov_len = sizeof(NEGOTIATE_MESSAGE); - iov[1].iov_base = pSMB->req.SecurityBlob; - pSMB->req.SecurityBlobLength = - cpu_to_le16(sizeof(NEGOTIATE_MESSAGE)); - } else if (phase == NtLmAuthenticate) { - /* 5 is an empirical value, large enought to - * hold authenticate message, max 10 of - * av paris, doamin,user,workstation mames, - * flags etc.. - */ - ntlmsspblob = kzalloc( - 5*sizeof(struct _AUTHENTICATE_MESSAGE), - GFP_KERNEL); - if (!ntlmsspblob) { - cERROR(1, "Can't allocate NTLMSSP"); - rc = -ENOMEM; - goto ssetup_exit; - } - - rc = build_ntlmssp_auth_blob(ntlmsspblob, - &blob_len, ses, nls_cp); - if (rc) - goto ssetup_exit; - iov[1].iov_len = blob_len; - iov[1].iov_base = ntlmsspblob; - pSMB->req.SecurityBlobLength = - cpu_to_le16(blob_len); - /* Make sure that we tell the server that we - are using the uid that it just gave us back - on the response (challenge) */ - smb_buf->Uid = ses->Suid; - } else { - cERROR(1, "invalid phase %d", phase); - rc = -ENOSYS; + rc = build_ntlmssp_auth_blob(ntlmsspblob, + &blob_len, ses, nls_cp); + if (rc) goto ssetup_exit; - } - /* unicode strings must be word aligned */ - if ((iov[0].iov_len + iov[1].iov_len) % 2) { - *bcc_ptr = 0; - bcc_ptr++; - } - unicode_oslm_strings(&bcc_ptr, nls_cp); - } else { - cERROR(1, "secType %d not supported!", type); + iov[1].iov_len = blob_len; + iov[1].iov_base = ntlmsspblob; + pSMB->req.SecurityBlobLength = cpu_to_le16(blob_len); + /* + * Make sure that we tell the server that we are using + * the uid that it just gave us back on the response + * (challenge) + */ + smb_buf->Uid = ses->Suid; + break; + default: + cERROR(1, "invalid phase %d", phase); rc = -ENOSYS; goto ssetup_exit; } -#else + /* unicode strings must be word aligned */ + if ((iov[0].iov_len + iov[1].iov_len) % 2) { + *bcc_ptr = 0; + bcc_ptr++; + } + unicode_oslm_strings(&bcc_ptr, nls_cp); + } else { cERROR(1, "secType %d not supported!", type); rc = -ENOSYS; goto ssetup_exit; -#endif } iov[2].iov_base = str_area; -- cgit v1.2.2 From ca40b714b8f9f20118b7071cb7cf49954166dbdf Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Fri, 7 Jan 2011 11:30:29 -0500 Subject: cifs: show "acl" in DebugData Features when it's compiled in ...and while we're at it, reduce the number of calls into the seq_* functions by prepending spaces to strings. Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/cifs_debug.c | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c index 103ab8b605b0..ede98300a8cd 100644 --- a/fs/cifs/cifs_debug.c +++ b/fs/cifs/cifs_debug.c @@ -119,29 +119,27 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v) "Display Internal CIFS Data Structures for Debugging\n" "---------------------------------------------------\n"); seq_printf(m, "CIFS Version %s\n", CIFS_VERSION); - seq_printf(m, "Features: "); + seq_printf(m, "Features:"); #ifdef CONFIG_CIFS_DFS_UPCALL - seq_printf(m, "dfs"); - seq_putc(m, ' '); + seq_printf(m, " dfs"); #endif #ifdef CONFIG_CIFS_FSCACHE - seq_printf(m, "fscache"); - seq_putc(m, ' '); + seq_printf(m, " fscache"); #endif #ifdef CONFIG_CIFS_WEAK_PW_HASH - seq_printf(m, "lanman"); - seq_putc(m, ' '); + seq_printf(m, " lanman"); #endif #ifdef CONFIG_CIFS_POSIX - seq_printf(m, "posix"); - seq_putc(m, ' '); + seq_printf(m, " posix"); #endif #ifdef CONFIG_CIFS_UPCALL - seq_printf(m, "spnego"); - seq_putc(m, ' '); + seq_printf(m, " spnego"); #endif #ifdef CONFIG_CIFS_XATTR - seq_printf(m, "xattr"); + seq_printf(m, " xattr"); +#endif +#ifdef CONFIG_CIFS_ACL + seq_printf(m, " acl"); #endif seq_putc(m, '\n'); seq_printf(m, "Active VFS Requests: %d\n", GlobalTotalActiveXid); -- cgit v1.2.2 From d44a9fe2c8af3fee8edb203e9b11e507851c50fa Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Fri, 7 Jan 2011 11:30:29 -0500 Subject: cifs: switch cifs_open and cifs_create to use CIFSSMBUnixSetFileInfo We call CIFSSMBUnixSetPathInfo in these functions, but we have a filehandle since an open was just done. Switch these functions to use CIFSSMBUnixSetFileInfo instead. In practice, these codepaths are only used if posix opens are broken. Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/dir.c | 6 ++---- fs/cifs/file.c | 6 ++---- 2 files changed, 4 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index db2a58c00f7b..2e773825835e 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -293,10 +293,8 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode, args.uid = NO_CHANGE_64; args.gid = NO_CHANGE_64; } - CIFSSMBUnixSetPathInfo(xid, tcon, full_path, &args, - cifs_sb->local_nls, - cifs_sb->mnt_cifs_flags & - CIFS_MOUNT_MAP_SPECIAL_CHR); + CIFSSMBUnixSetFileInfo(xid, tcon, &args, fileHandle, + current->tgid); } else { /* BB implement mode setting via Windows security descriptors e.g. */ diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 97ddbf2fdfc3..d843631c028d 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -424,10 +424,8 @@ int cifs_open(struct inode *inode, struct file *file) .mtime = NO_CHANGE_64, .device = 0, }; - CIFSSMBUnixSetPathInfo(xid, tcon, full_path, &args, - cifs_sb->local_nls, - cifs_sb->mnt_cifs_flags & - CIFS_MOUNT_MAP_SPECIAL_CHR); + CIFSSMBUnixSetFileInfo(xid, tcon, &args, netfid, + pCifsFile->pid); } out: -- cgit v1.2.2 From 20054bd65703f7504a9daceabc2a060828fde36c Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Fri, 7 Jan 2011 11:30:27 -0500 Subject: cifs: use CreationTime like an i_generation field Reduce false inode collisions by using the CreationTime like an i_generation field. This way, even if the server ends up reusing a uniqueid after a delete/create cycle, we can avoid matching the inode incorrectly. Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/cifsfs.c | 2 ++ fs/cifs/cifsglob.h | 2 ++ fs/cifs/inode.c | 6 ++++++ fs/cifs/readdir.c | 1 + 4 files changed, 11 insertions(+) (limited to 'fs') diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 5abfedaa5e78..5e7075d5f139 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -329,6 +329,8 @@ cifs_alloc_inode(struct super_block *sb) cifs_inode->invalid_mapping = false; cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */ cifs_inode->server_eof = 0; + cifs_inode->uniqueid = 0; + cifs_inode->createtime = 0; /* Can not set i_flags here - they get immediately overwritten to zero by the VFS */ diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index e6590e69fb0e..606ca8bb7102 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -453,6 +453,7 @@ struct cifsInodeInfo { bool invalid_mapping:1; /* pagecache is invalid */ u64 server_eof; /* current file size on server */ u64 uniqueid; /* server inode number */ + u64 createtime; /* creation time on server */ #ifdef CONFIG_CIFS_FSCACHE struct fscache_cookie *fscache; #endif @@ -573,6 +574,7 @@ struct cifs_fattr { u64 cf_uniqueid; u64 cf_eof; u64 cf_bytes; + u64 cf_createtime; uid_t cf_uid; gid_t cf_gid; umode_t cf_mode; diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index a853a89857a5..0c7e36910e31 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -518,6 +518,7 @@ cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info, fattr->cf_eof = le64_to_cpu(info->EndOfFile); fattr->cf_bytes = le64_to_cpu(info->AllocationSize); + fattr->cf_createtime = le64_to_cpu(info->CreationTime); if (fattr->cf_cifsattrs & ATTR_DIRECTORY) { fattr->cf_mode = S_IFDIR | cifs_sb->mnt_dir_mode; @@ -779,6 +780,10 @@ cifs_find_inode(struct inode *inode, void *opaque) if (CIFS_I(inode)->uniqueid != fattr->cf_uniqueid) return 0; + /* use createtime like an i_generation field */ + if (CIFS_I(inode)->createtime != fattr->cf_createtime) + return 0; + /* don't match inode of different type */ if ((inode->i_mode & S_IFMT) != (fattr->cf_mode & S_IFMT)) return 0; @@ -796,6 +801,7 @@ cifs_init_inode(struct inode *inode, void *opaque) struct cifs_fattr *fattr = (struct cifs_fattr *) opaque; CIFS_I(inode)->uniqueid = fattr->cf_uniqueid; + CIFS_I(inode)->createtime = fattr->cf_createtime; return 0; } diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index ec5b68e3b928..76b1b37c9e6b 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c @@ -160,6 +160,7 @@ cifs_dir_info_to_fattr(struct cifs_fattr *fattr, FILE_DIRECTORY_INFO *info, fattr->cf_cifsattrs = le32_to_cpu(info->ExtFileAttributes); fattr->cf_eof = le64_to_cpu(info->EndOfFile); fattr->cf_bytes = le64_to_cpu(info->AllocationSize); + fattr->cf_createtime = le64_to_cpu(info->CreationTime); fattr->cf_atime = cifs_NTtimeToUnix(info->LastAccessTime); fattr->cf_ctime = cifs_NTtimeToUnix(info->ChangeTime); fattr->cf_mtime = cifs_NTtimeToUnix(info->LastWriteTime); -- cgit v1.2.2 From b004a5eb0babec7ef91558f73315ef49e5a1f285 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Tue, 9 Nov 2010 16:35:21 -0800 Subject: fs/nilfs2/super.c: Use printf extension %pV Using %pV reduces the number of printk calls and eliminates any possible message interleaving from other printk calls. Signed-off-by: Joe Perches Signed-off-by: Ryusuke Konishi --- fs/nilfs2/super.c | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index e2dcc9c733f7..f0ab826251c0 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c @@ -111,12 +111,17 @@ void nilfs_error(struct super_block *sb, const char *function, const char *fmt, ...) { struct nilfs_sb_info *sbi = NILFS_SB(sb); + struct va_format vaf; va_list args; va_start(args, fmt); - printk(KERN_CRIT "NILFS error (device %s): %s: ", sb->s_id, function); - vprintk(fmt, args); - printk("\n"); + + vaf.fmt = fmt; + vaf.va = &args; + + printk(KERN_CRIT "NILFS error (device %s): %s: %pV\n", + sb->s_id, function, &vaf); + va_end(args); if (!(sb->s_flags & MS_RDONLY)) { @@ -136,13 +141,17 @@ void nilfs_error(struct super_block *sb, const char *function, void nilfs_warning(struct super_block *sb, const char *function, const char *fmt, ...) { + struct va_format vaf; va_list args; va_start(args, fmt); - printk(KERN_WARNING "NILFS warning (device %s): %s: ", - sb->s_id, function); - vprintk(fmt, args); - printk("\n"); + + vaf.fmt = fmt; + vaf.va = &args; + + printk(KERN_WARNING "NILFS warning (device %s): %s: %pV\n", + sb->s_id, function, &vaf); + va_end(args); } -- cgit v1.2.2 From e828949e5b42bfd234ee537cdb7c5e3a577958a3 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Fri, 19 Nov 2010 15:26:20 +0900 Subject: nilfs2: call nilfs_error inside bmap routines Some functions using nilfs bmap routines can wrongly return invalid argument error (i.e. -EINVAL) that bmap returns as an internal code for btree corruption. This fixes the issue by catching and converting the internal EINVAL to EIO and calling nilfs_error function inside bmap routines. Signed-off-by: Ryusuke Konishi --- fs/nilfs2/bmap.c | 45 +++++++++++++++++++++++++++++++++++---------- fs/nilfs2/ifile.c | 11 +++-------- fs/nilfs2/inode.c | 19 +++++-------------- fs/nilfs2/mdt.c | 6 ------ fs/nilfs2/segment.c | 30 +++++------------------------- 5 files changed, 48 insertions(+), 63 deletions(-) (limited to 'fs') diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c index 8b782b062baa..4b7aeb34cc75 100644 --- a/fs/nilfs2/bmap.c +++ b/fs/nilfs2/bmap.c @@ -38,6 +38,19 @@ struct inode *nilfs_bmap_get_dat(const struct nilfs_bmap *bmap) return nilfs_dat_inode(NILFS_I_NILFS(bmap->b_inode)); } +static int nilfs_bmap_convert_error(struct nilfs_bmap *bmap, + const char *fname, int err) +{ + struct inode *inode = bmap->b_inode; + + if (err == -EINVAL) { + nilfs_error(inode->i_sb, fname, + "broken bmap (inode number=%lu)\n", inode->i_ino); + err = -EIO; + } + return err; +} + /** * nilfs_bmap_lookup_at_level - find a data block or node block * @bmap: bmap @@ -66,8 +79,10 @@ int nilfs_bmap_lookup_at_level(struct nilfs_bmap *bmap, __u64 key, int level, down_read(&bmap->b_sem); ret = bmap->b_ops->bop_lookup(bmap, key, level, ptrp); - if (ret < 0) + if (ret < 0) { + ret = nilfs_bmap_convert_error(bmap, __func__, ret); goto out; + } if (NILFS_BMAP_USE_VBN(bmap)) { ret = nilfs_dat_translate(nilfs_bmap_get_dat(bmap), *ptrp, &blocknr); @@ -88,7 +103,8 @@ int nilfs_bmap_lookup_contig(struct nilfs_bmap *bmap, __u64 key, __u64 *ptrp, down_read(&bmap->b_sem); ret = bmap->b_ops->bop_lookup_contig(bmap, key, ptrp, maxblocks); up_read(&bmap->b_sem); - return ret; + + return nilfs_bmap_convert_error(bmap, __func__, ret); } static int nilfs_bmap_do_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr) @@ -144,7 +160,8 @@ int nilfs_bmap_insert(struct nilfs_bmap *bmap, down_write(&bmap->b_sem); ret = nilfs_bmap_do_insert(bmap, key, rec); up_write(&bmap->b_sem); - return ret; + + return nilfs_bmap_convert_error(bmap, __func__, ret); } static int nilfs_bmap_do_delete(struct nilfs_bmap *bmap, __u64 key) @@ -180,9 +197,12 @@ int nilfs_bmap_last_key(struct nilfs_bmap *bmap, unsigned long *key) down_read(&bmap->b_sem); ret = bmap->b_ops->bop_last_key(bmap, &lastkey); - if (!ret) - *key = lastkey; up_read(&bmap->b_sem); + + if (ret < 0) + ret = nilfs_bmap_convert_error(bmap, __func__, ret); + else + *key = lastkey; return ret; } @@ -210,7 +230,8 @@ int nilfs_bmap_delete(struct nilfs_bmap *bmap, unsigned long key) down_write(&bmap->b_sem); ret = nilfs_bmap_do_delete(bmap, key); up_write(&bmap->b_sem); - return ret; + + return nilfs_bmap_convert_error(bmap, __func__, ret); } static int nilfs_bmap_do_truncate(struct nilfs_bmap *bmap, unsigned long key) @@ -261,7 +282,8 @@ int nilfs_bmap_truncate(struct nilfs_bmap *bmap, unsigned long key) down_write(&bmap->b_sem); ret = nilfs_bmap_do_truncate(bmap, key); up_write(&bmap->b_sem); - return ret; + + return nilfs_bmap_convert_error(bmap, __func__, ret); } /** @@ -300,7 +322,8 @@ int nilfs_bmap_propagate(struct nilfs_bmap *bmap, struct buffer_head *bh) down_write(&bmap->b_sem); ret = bmap->b_ops->bop_propagate(bmap, bh); up_write(&bmap->b_sem); - return ret; + + return nilfs_bmap_convert_error(bmap, __func__, ret); } /** @@ -344,7 +367,8 @@ int nilfs_bmap_assign(struct nilfs_bmap *bmap, down_write(&bmap->b_sem); ret = bmap->b_ops->bop_assign(bmap, bh, blocknr, binfo); up_write(&bmap->b_sem); - return ret; + + return nilfs_bmap_convert_error(bmap, __func__, ret); } /** @@ -373,7 +397,8 @@ int nilfs_bmap_mark(struct nilfs_bmap *bmap, __u64 key, int level) down_write(&bmap->b_sem); ret = bmap->b_ops->bop_mark(bmap, key, level); up_write(&bmap->b_sem); - return ret; + + return nilfs_bmap_convert_error(bmap, __func__, ret); } /** diff --git a/fs/nilfs2/ifile.c b/fs/nilfs2/ifile.c index 9f8a2da67f90..bfc73d3a30ed 100644 --- a/fs/nilfs2/ifile.c +++ b/fs/nilfs2/ifile.c @@ -149,14 +149,9 @@ int nilfs_ifile_get_inode_block(struct inode *ifile, ino_t ino, } err = nilfs_palloc_get_entry_block(ifile, ino, 0, out_bh); - if (unlikely(err)) { - if (err == -EINVAL) - nilfs_error(sb, __func__, "ifile is broken"); - else - nilfs_warning(sb, __func__, - "unable to read inode: %lu", - (unsigned long) ino); - } + if (unlikely(err)) + nilfs_warning(sb, __func__, "unable to read inode: %lu", + (unsigned long) ino); return err; } diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index 77b48c8fab17..550b1788981e 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c @@ -96,11 +96,6 @@ int nilfs_get_block(struct inode *inode, sector_t blkoff, inode->i_ino, (unsigned long long)blkoff); err = 0; - } else if (err == -EINVAL) { - nilfs_error(inode->i_sb, __func__, - "broken bmap (inode=%lu)\n", - inode->i_ino); - err = -EIO; } nilfs_transaction_abort(inode->i_sb); goto out; @@ -629,7 +624,7 @@ static void nilfs_truncate_bmap(struct nilfs_inode_info *ii, if (!test_bit(NILFS_I_BMAP, &ii->i_state)) return; - repeat: +repeat: ret = nilfs_bmap_last_key(ii->i_bmap, &b); if (ret == -ENOENT) return; @@ -646,14 +641,10 @@ static void nilfs_truncate_bmap(struct nilfs_inode_info *ii, nilfs_bmap_truncate(ii->i_bmap, b) == 0)) goto repeat; - failed: - if (ret == -EINVAL) - nilfs_error(ii->vfs_inode.i_sb, __func__, - "bmap is broken (ino=%lu)", ii->vfs_inode.i_ino); - else - nilfs_warning(ii->vfs_inode.i_sb, __func__, - "failed to truncate bmap (ino=%lu, err=%d)", - ii->vfs_inode.i_ino, ret); +failed: + nilfs_warning(ii->vfs_inode.i_sb, __func__, + "failed to truncate bmap (ino=%lu, err=%d)", + ii->vfs_inode.i_ino, ret); } void nilfs_truncate(struct inode *inode) diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c index 39a5b84e2c9f..f5d4b184eaf9 100644 --- a/fs/nilfs2/mdt.c +++ b/fs/nilfs2/mdt.c @@ -237,8 +237,6 @@ static int nilfs_mdt_read_block(struct inode *inode, unsigned long block, * * %-ENOENT - the specified block does not exist (hole block) * - * %-EINVAL - bmap is broken. (the caller should call nilfs_error()) - * * %-EROFS - Read only filesystem (for create mode) */ int nilfs_mdt_get_block(struct inode *inode, unsigned long blkoff, int create, @@ -273,8 +271,6 @@ int nilfs_mdt_get_block(struct inode *inode, unsigned long blkoff, int create, * %-ENOMEM - Insufficient memory available. * * %-EIO - I/O error - * - * %-EINVAL - bmap is broken. (the caller should call nilfs_error()) */ int nilfs_mdt_delete_block(struct inode *inode, unsigned long block) { @@ -350,8 +346,6 @@ int nilfs_mdt_forget_block(struct inode *inode, unsigned long block) * %-EIO - I/O error * * %-ENOENT - the specified block does not exist (hole block) - * - * %-EINVAL - bmap is broken. (the caller should call nilfs_error()) */ int nilfs_mdt_mark_block_dirty(struct inode *inode, unsigned long block) { diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 687d090cea34..d3d2f4396f72 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -504,17 +504,6 @@ static int nilfs_segctor_add_file_block(struct nilfs_sc_info *sci, return err; } -static int nilfs_handle_bmap_error(int err, const char *fname, - struct inode *inode, struct super_block *sb) -{ - if (err == -EINVAL) { - nilfs_error(sb, fname, "broken bmap (inode=%lu)\n", - inode->i_ino); - err = -EIO; - } - return err; -} - /* * Callback functions that enumerate, mark, and collect dirty blocks */ @@ -524,9 +513,8 @@ static int nilfs_collect_file_data(struct nilfs_sc_info *sci, int err; err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh); - if (unlikely(err < 0)) - return nilfs_handle_bmap_error(err, __func__, inode, - sci->sc_super); + if (err < 0) + return err; err = nilfs_segctor_add_file_block(sci, bh, inode, sizeof(struct nilfs_binfo_v)); @@ -539,13 +527,7 @@ static int nilfs_collect_file_node(struct nilfs_sc_info *sci, struct buffer_head *bh, struct inode *inode) { - int err; - - err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh); - if (unlikely(err < 0)) - return nilfs_handle_bmap_error(err, __func__, inode, - sci->sc_super); - return 0; + return nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh); } static int nilfs_collect_file_bmap(struct nilfs_sc_info *sci, @@ -588,9 +570,8 @@ static int nilfs_collect_dat_data(struct nilfs_sc_info *sci, int err; err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh); - if (unlikely(err < 0)) - return nilfs_handle_bmap_error(err, __func__, inode, - sci->sc_super); + if (err < 0) + return err; err = nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64)); if (!err) @@ -1563,7 +1544,6 @@ nilfs_segctor_update_payload_blocknr(struct nilfs_sc_info *sci, return 0; failed_bmap: - err = nilfs_handle_bmap_error(err, __func__, inode, sci->sc_super); return err; } -- cgit v1.2.2 From 27e6c7a3ce29ae5fa5bec4ed5917f8508bfac120 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Sun, 26 Dec 2010 16:28:28 +0900 Subject: nilfs2: mark buffer heads as delayed until the data is written to disk Nilfs does not allocate new blocks on disk until they are actually written to. To implement fiemap, we need to deal with such blocks. To allow successive fiemap patch to distinguish mapped but unallocated regions, this marks buffer heads of those new blocks as delayed and clears the flag after the blocks are written to disk. Signed-off-by: Ryusuke Konishi --- fs/nilfs2/inode.c | 1 + fs/nilfs2/segment.c | 2 ++ 2 files changed, 3 insertions(+) (limited to 'fs') diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index 550b1788981e..1a546a86d7a7 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c @@ -104,6 +104,7 @@ int nilfs_get_block(struct inode *inode, sector_t blkoff, nilfs_transaction_commit(inode->i_sb); /* never fails */ /* Error handling should be detailed */ set_buffer_new(bh_result); + set_buffer_delay(bh_result); map_bh(bh_result, inode->i_sb, 0); /* dbn must be changed to proper value */ } else if (ret == -ENOENT) { diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index d3d2f4396f72..efc7d0a1bbf7 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -1763,6 +1763,7 @@ static void nilfs_clear_copied_buffers(struct list_head *list, int err) if (!err) { set_buffer_uptodate(bh); clear_buffer_dirty(bh); + clear_buffer_delay(bh); clear_buffer_nilfs_volatile(bh); } brelse(bh); /* for b_assoc_buffers */ @@ -1889,6 +1890,7 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci) b_assoc_buffers) { set_buffer_uptodate(bh); clear_buffer_dirty(bh); + clear_buffer_delay(bh); clear_buffer_nilfs_volatile(bh); clear_buffer_nilfs_redirected(bh); if (bh == segbuf->sb_super_root) { -- cgit v1.2.2 From 622daaff0a8975fb5c5b95f24f3234550ba32e92 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Sun, 26 Dec 2010 16:38:43 +0900 Subject: nilfs2: fiemap support This adds fiemap to nilfs. Two new functions, nilfs_fiemap and nilfs_find_uncommitted_extent are added. nilfs_fiemap() implements the fiemap inode operation, and nilfs_find_uncommitted_extent() helps to get a range of data blocks whose physical location has not been determined. nilfs_fiemap() collects extent information by looping through nilfs_bmap_lookup_contig and nilfs_find_uncommitted_extent routines. Signed-off-by: Ryusuke Konishi --- fs/nilfs2/file.c | 1 + fs/nilfs2/inode.c | 131 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ fs/nilfs2/namei.c | 1 + fs/nilfs2/nilfs.h | 2 + fs/nilfs2/page.c | 84 ++++++++++++++++++++++++++++++++++ fs/nilfs2/page.h | 3 ++ 6 files changed, 222 insertions(+) (limited to 'fs') diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c index c9a30d7ff6fc..2f560c9fb808 100644 --- a/fs/nilfs2/file.c +++ b/fs/nilfs2/file.c @@ -155,6 +155,7 @@ const struct inode_operations nilfs_file_inode_operations = { .truncate = nilfs_truncate, .setattr = nilfs_setattr, .permission = nilfs_permission, + .fiemap = nilfs_fiemap, }; /* end of file */ diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index 1a546a86d7a7..b2a815033ee3 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c @@ -916,3 +916,134 @@ void nilfs_dirty_inode(struct inode *inode) nilfs_mark_inode_dirty(inode); nilfs_transaction_commit(inode->i_sb); /* never fails */ } + +int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, + __u64 start, __u64 len) +{ + struct the_nilfs *nilfs = NILFS_I_NILFS(inode); + __u64 logical = 0, phys = 0, size = 0; + __u32 flags = 0; + loff_t isize; + sector_t blkoff, end_blkoff; + sector_t delalloc_blkoff; + unsigned long delalloc_blklen; + unsigned int blkbits = inode->i_blkbits; + int ret, n; + + ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC); + if (ret) + return ret; + + mutex_lock(&inode->i_mutex); + + isize = i_size_read(inode); + + blkoff = start >> blkbits; + end_blkoff = (start + len - 1) >> blkbits; + + delalloc_blklen = nilfs_find_uncommitted_extent(inode, blkoff, + &delalloc_blkoff); + + do { + __u64 blkphy; + unsigned int maxblocks; + + if (delalloc_blklen && blkoff == delalloc_blkoff) { + if (size) { + /* End of the current extent */ + ret = fiemap_fill_next_extent( + fieinfo, logical, phys, size, flags); + if (ret) + break; + } + if (blkoff > end_blkoff) + break; + + flags = FIEMAP_EXTENT_MERGED | FIEMAP_EXTENT_DELALLOC; + logical = blkoff << blkbits; + phys = 0; + size = delalloc_blklen << blkbits; + + blkoff = delalloc_blkoff + delalloc_blklen; + delalloc_blklen = nilfs_find_uncommitted_extent( + inode, blkoff, &delalloc_blkoff); + continue; + } + + /* + * Limit the number of blocks that we look up so as + * not to get into the next delayed allocation extent. + */ + maxblocks = INT_MAX; + if (delalloc_blklen) + maxblocks = min_t(sector_t, delalloc_blkoff - blkoff, + maxblocks); + blkphy = 0; + + down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); + n = nilfs_bmap_lookup_contig( + NILFS_I(inode)->i_bmap, blkoff, &blkphy, maxblocks); + up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); + + if (n < 0) { + int past_eof; + + if (unlikely(n != -ENOENT)) + break; /* error */ + + /* HOLE */ + blkoff++; + past_eof = ((blkoff << blkbits) >= isize); + + if (size) { + /* End of the current extent */ + + if (past_eof) + flags |= FIEMAP_EXTENT_LAST; + + ret = fiemap_fill_next_extent( + fieinfo, logical, phys, size, flags); + if (ret) + break; + size = 0; + } + if (blkoff > end_blkoff || past_eof) + break; + } else { + if (size) { + if (phys && blkphy << blkbits == phys + size) { + /* The current extent goes on */ + size += n << blkbits; + } else { + /* Terminate the current extent */ + ret = fiemap_fill_next_extent( + fieinfo, logical, phys, size, + flags); + if (ret || blkoff > end_blkoff) + break; + + /* Start another extent */ + flags = FIEMAP_EXTENT_MERGED; + logical = blkoff << blkbits; + phys = blkphy << blkbits; + size = n << blkbits; + } + } else { + /* Start a new extent */ + flags = FIEMAP_EXTENT_MERGED; + logical = blkoff << blkbits; + phys = blkphy << blkbits; + size = n << blkbits; + } + blkoff += n; + } + cond_resched(); + } while (true); + + /* If ret is 1 then we just hit the end of the extent array */ + if (ret == 1) + ret = 0; + + mutex_unlock(&inode->i_mutex); + return ret; +} diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c index 6e9557ecf161..98034271cd02 100644 --- a/fs/nilfs2/namei.c +++ b/fs/nilfs2/namei.c @@ -577,6 +577,7 @@ const struct inode_operations nilfs_dir_inode_operations = { .rename = nilfs_rename, .setattr = nilfs_setattr, .permission = nilfs_permission, + .fiemap = nilfs_fiemap, }; const struct inode_operations nilfs_special_inode_operations = { diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h index 0ca98823db59..a0e21363e865 100644 --- a/fs/nilfs2/nilfs.h +++ b/fs/nilfs2/nilfs.h @@ -264,6 +264,8 @@ extern int nilfs_set_file_dirty(struct nilfs_sb_info *, struct inode *, unsigned); extern int nilfs_mark_inode_dirty(struct inode *); extern void nilfs_dirty_inode(struct inode *); +int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, + __u64 start, __u64 len); /* super.c */ extern struct inode *nilfs_alloc_inode(struct super_block *); diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c index a6c3c2e817f8..48a775ec1d2a 100644 --- a/fs/nilfs2/page.c +++ b/fs/nilfs2/page.c @@ -546,3 +546,87 @@ int __nilfs_clear_page_dirty(struct page *page) } return TestClearPageDirty(page); } + +/** + * nilfs_find_uncommitted_extent - find extent of uncommitted data + * @inode: inode + * @start_blk: start block offset (in) + * @blkoff: start offset of the found extent (out) + * + * This function searches an extent of buffers marked "delayed" which + * starts from a block offset equal to or larger than @start_blk. If + * such an extent was found, this will store the start offset in + * @blkoff and return its length in blocks. Otherwise, zero is + * returned. + */ +unsigned long nilfs_find_uncommitted_extent(struct inode *inode, + sector_t start_blk, + sector_t *blkoff) +{ + unsigned int i; + pgoff_t index; + unsigned int nblocks_in_page; + unsigned long length = 0; + sector_t b; + struct pagevec pvec; + struct page *page; + + if (inode->i_mapping->nrpages == 0) + return 0; + + index = start_blk >> (PAGE_CACHE_SHIFT - inode->i_blkbits); + nblocks_in_page = 1U << (PAGE_CACHE_SHIFT - inode->i_blkbits); + + pagevec_init(&pvec, 0); + +repeat: + pvec.nr = find_get_pages_contig(inode->i_mapping, index, PAGEVEC_SIZE, + pvec.pages); + if (pvec.nr == 0) + return length; + + if (length > 0 && pvec.pages[0]->index > index) + goto out; + + b = pvec.pages[0]->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); + i = 0; + do { + page = pvec.pages[i]; + + lock_page(page); + if (page_has_buffers(page)) { + struct buffer_head *bh, *head; + + bh = head = page_buffers(page); + do { + if (b < start_blk) + continue; + if (buffer_delay(bh)) { + if (length == 0) + *blkoff = b; + length++; + } else if (length > 0) { + goto out_locked; + } + } while (++b, bh = bh->b_this_page, bh != head); + } else { + if (length > 0) + goto out_locked; + + b += nblocks_in_page; + } + unlock_page(page); + + } while (++i < pagevec_count(&pvec)); + + index = page->index + 1; + pagevec_release(&pvec); + cond_resched(); + goto repeat; + +out_locked: + unlock_page(page); +out: + pagevec_release(&pvec); + return length; +} diff --git a/fs/nilfs2/page.h b/fs/nilfs2/page.h index fb9e8a8a2038..622df27cd891 100644 --- a/fs/nilfs2/page.h +++ b/fs/nilfs2/page.h @@ -66,6 +66,9 @@ void nilfs_mapping_init(struct address_space *mapping, struct backing_dev_info *bdi, const struct address_space_operations *aops); unsigned nilfs_page_count_clean_buffers(struct page *, unsigned, unsigned); +unsigned long nilfs_find_uncommitted_extent(struct inode *inode, + sector_t start_blk, + sector_t *blkoff); #define NILFS_PAGE_BUG(page, m, a...) \ do { nilfs_page_bug(page); BUG(); } while (0) -- cgit v1.2.2 From ae53a0a2ce2a89f1aa824a5fc6d2099907cfb409 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Sun, 26 Dec 2010 23:30:02 +0900 Subject: nilfs2: fix a checkpatch error in page.c Will correct the following checkpatch error: ERROR: trailing whitespace #494: FILE: page.c:494: + $ Signed-off-by: Ryusuke Konishi --- fs/nilfs2/page.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c index 48a775ec1d2a..0c432416cfef 100644 --- a/fs/nilfs2/page.c +++ b/fs/nilfs2/page.c @@ -491,7 +491,7 @@ unsigned nilfs_page_count_clean_buffers(struct page *page, } return nc; } - + void nilfs_mapping_init_once(struct address_space *mapping) { memset(mapping, 0, sizeof(*mapping)); -- cgit v1.2.2 From 888da23c2f70ca2e9897b725322a6456285ee9c4 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Mon, 27 Dec 2010 00:01:28 +0900 Subject: nilfs2: get rid of loaded flag from nilfs object NILFS_LOADED flag of the nilfs object is not used now, so this will remove it. Signed-off-by: Ryusuke Konishi --- fs/nilfs2/the_nilfs.c | 1 - fs/nilfs2/the_nilfs.h | 3 --- 2 files changed, 4 deletions(-) (limited to 'fs') diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c index 0254be2d73c6..95e444319f12 100644 --- a/fs/nilfs2/the_nilfs.c +++ b/fs/nilfs2/the_nilfs.c @@ -329,7 +329,6 @@ int load_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi) printk(KERN_INFO "NILFS: recovery complete.\n"); skip_recovery: - set_nilfs_loaded(nilfs); nilfs_clear_recovery_info(&ri); sbi->s_super->s_flags = s_flags; return 0; diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h index 69226e14b745..fd85e4c05c6b 100644 --- a/fs/nilfs2/the_nilfs.h +++ b/fs/nilfs2/the_nilfs.h @@ -36,8 +36,6 @@ /* the_nilfs struct */ enum { THE_NILFS_INIT = 0, /* Information from super_block is set */ - THE_NILFS_LOADED, /* Roll-back/roll-forward has done and - the latest checkpoint was loaded */ THE_NILFS_DISCONTINUED, /* 'next' pointer chain has broken */ THE_NILFS_GC_RUNNING, /* gc process is running */ THE_NILFS_SB_DIRTY, /* super block is dirty */ @@ -178,7 +176,6 @@ static inline int nilfs_##name(struct the_nilfs *nilfs) \ } THE_NILFS_FNS(INIT, init) -THE_NILFS_FNS(LOADED, loaded) THE_NILFS_FNS(DISCONTINUED, discontinued) THE_NILFS_FNS(GC_RUNNING, gc_running) THE_NILFS_FNS(SB_DIRTY, sb_dirty) -- cgit v1.2.2 From a7a8447edef2cfa9b28cc3892993d9fafb362671 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Mon, 27 Dec 2010 00:03:02 +0900 Subject: nilfs2: simplify nilfs_mdt_freeze_buffer nilfs_page_get_nth_block() function used in nilfs_mdt_freeze_buffer() always returns a valid buffer head, so its validity check can be removed. Signed-off-by: Ryusuke Konishi --- fs/nilfs2/mdt.c | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) (limited to 'fs') diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c index f5d4b184eaf9..6a0e2a189f60 100644 --- a/fs/nilfs2/mdt.c +++ b/fs/nilfs2/mdt.c @@ -493,31 +493,29 @@ int nilfs_mdt_freeze_buffer(struct inode *inode, struct buffer_head *bh) struct buffer_head *bh_frozen; struct page *page; int blkbits = inode->i_blkbits; - int ret = -ENOMEM; page = grab_cache_page(&shadow->frozen_data, bh->b_page->index); if (!page) - return ret; + return -ENOMEM; if (!page_has_buffers(page)) create_empty_buffers(page, 1 << blkbits, 0); bh_frozen = nilfs_page_get_nth_block(page, bh_offset(bh) >> blkbits); - if (bh_frozen) { - if (!buffer_uptodate(bh_frozen)) - nilfs_copy_buffer(bh_frozen, bh); - if (list_empty(&bh_frozen->b_assoc_buffers)) { - list_add_tail(&bh_frozen->b_assoc_buffers, - &shadow->frozen_buffers); - set_buffer_nilfs_redirected(bh); - } else { - brelse(bh_frozen); /* already frozen */ - } - ret = 0; + + if (!buffer_uptodate(bh_frozen)) + nilfs_copy_buffer(bh_frozen, bh); + if (list_empty(&bh_frozen->b_assoc_buffers)) { + list_add_tail(&bh_frozen->b_assoc_buffers, + &shadow->frozen_buffers); + set_buffer_nilfs_redirected(bh); + } else { + brelse(bh_frozen); /* already frozen */ } + unlock_page(page); page_cache_release(page); - return ret; + return 0; } struct buffer_head * -- cgit v1.2.2 From 06df0f999247a3153c3ec284c7ada36ef785eb97 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Mon, 27 Dec 2010 00:04:06 +0900 Subject: nilfs2: get rid of nilfs_mount_options structure Only mount_opt member is used in the nilfs_mount_options structure, and we can simplify it. Signed-off-by: Ryusuke Konishi --- fs/nilfs2/sb.h | 8 -------- fs/nilfs2/super.c | 6 +++--- 2 files changed, 3 insertions(+), 11 deletions(-) (limited to 'fs') diff --git a/fs/nilfs2/sb.h b/fs/nilfs2/sb.h index 35a07157b980..7a17715f215f 100644 --- a/fs/nilfs2/sb.h +++ b/fs/nilfs2/sb.h @@ -27,14 +27,6 @@ #include #include -/* - * Mount options - */ -struct nilfs_mount_options { - unsigned long mount_opt; - __u64 snapshot_cno; -}; - struct the_nilfs; struct nilfs_sc_info; diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index f0ab826251c0..6ea32d9b1b9d 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c @@ -1019,11 +1019,11 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data) struct nilfs_sb_info *sbi = NILFS_SB(sb); struct the_nilfs *nilfs = sbi->s_nilfs; unsigned long old_sb_flags; - struct nilfs_mount_options old_opts; + unsigned long old_mount_opt; int err; old_sb_flags = sb->s_flags; - old_opts.mount_opt = sbi->s_mount_opt; + old_mount_opt = sbi->s_mount_opt; if (!parse_options(data, sb, 1)) { err = -EINVAL; @@ -1092,7 +1092,7 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data) restore_opts: sb->s_flags = old_sb_flags; - sbi->s_mount_opt = old_opts.mount_opt; + sbi->s_mount_opt = old_mount_opt; return err; } -- cgit v1.2.2 From bcbc8c648d6cc88f771435d8031c1a13e00945ed Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Mon, 27 Dec 2010 00:05:49 +0900 Subject: nilfs2: do not pass sbi to functions which can get it from inode This removes argument for passing nilfs_sb_info structure from nilfs_set_file_dirty and nilfs_load_inode_block functions. We can get a pointer to the structure from inodes. [Stephen Rothwell : fix conflict with commit b74c79e99389cd79b31fcc08f82c24e492e63c7e] Signed-off-by: Ryusuke Konishi --- fs/nilfs2/dir.c | 3 +-- fs/nilfs2/inode.c | 18 ++++++++---------- fs/nilfs2/nilfs.h | 6 ++---- fs/nilfs2/recovery.c | 2 +- 4 files changed, 12 insertions(+), 17 deletions(-) (limited to 'fs') diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c index cb003c8ee1f6..9d45773b79e6 100644 --- a/fs/nilfs2/dir.c +++ b/fs/nilfs2/dir.c @@ -91,7 +91,6 @@ static void nilfs_commit_chunk(struct page *page, unsigned from, unsigned to) { struct inode *dir = mapping->host; - struct nilfs_sb_info *sbi = NILFS_SB(dir->i_sb); loff_t pos = page_offset(page) + from; unsigned len = to - from; unsigned nr_dirty, copied; @@ -103,7 +102,7 @@ static void nilfs_commit_chunk(struct page *page, i_size_write(dir, pos + copied); if (IS_DIRSYNC(dir)) nilfs_set_transaction_flag(NILFS_TI_SYNC); - err = nilfs_set_file_dirty(sbi, dir, nr_dirty); + err = nilfs_set_file_dirty(dir, nr_dirty); WARN_ON(err); /* do not happen */ unlock_page(page); } diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index b2a815033ee3..43416470e07b 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c @@ -181,10 +181,9 @@ static int nilfs_set_page_dirty(struct page *page) if (ret) { struct inode *inode = page->mapping->host; - struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb); unsigned nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits); - nilfs_set_file_dirty(sbi, inode, nr_dirty); + nilfs_set_file_dirty(inode, nr_dirty); } return ret; } @@ -225,7 +224,7 @@ static int nilfs_write_end(struct file *file, struct address_space *mapping, start + copied); copied = generic_write_end(file, mapping, pos, len, copied, page, fsdata); - nilfs_set_file_dirty(NILFS_SB(inode->i_sb), inode, nr_dirty); + nilfs_set_file_dirty(inode, nr_dirty); err = nilfs_transaction_commit(inode->i_sb); return err ? : copied; } @@ -674,7 +673,7 @@ void nilfs_truncate(struct inode *inode) nilfs_set_transaction_flag(NILFS_TI_SYNC); nilfs_mark_inode_dirty(inode); - nilfs_set_file_dirty(NILFS_SB(sb), inode, 0); + nilfs_set_file_dirty(inode, 0); nilfs_transaction_commit(sb); /* May construct a logical segment and may fail in sync mode. But truncate has no return value. */ @@ -792,9 +791,9 @@ int nilfs_permission(struct inode *inode, int mask, unsigned int flags) return generic_permission(inode, mask, flags, NULL); } -int nilfs_load_inode_block(struct nilfs_sb_info *sbi, struct inode *inode, - struct buffer_head **pbh) +int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh) { + struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb); struct nilfs_inode_info *ii = NILFS_I(inode); int err; @@ -835,9 +834,9 @@ int nilfs_inode_dirty(struct inode *inode) return ret; } -int nilfs_set_file_dirty(struct nilfs_sb_info *sbi, struct inode *inode, - unsigned nr_dirty) +int nilfs_set_file_dirty(struct inode *inode, unsigned nr_dirty) { + struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb); struct nilfs_inode_info *ii = NILFS_I(inode); atomic_add(nr_dirty, &sbi->s_nilfs->ns_ndirtyblks); @@ -870,11 +869,10 @@ int nilfs_set_file_dirty(struct nilfs_sb_info *sbi, struct inode *inode, int nilfs_mark_inode_dirty(struct inode *inode) { - struct nilfs_sb_info *sbi = NILFS_SB(inode->i_sb); struct buffer_head *ibh; int err; - err = nilfs_load_inode_block(sbi, inode, &ibh); + err = nilfs_load_inode_block(inode, &ibh); if (unlikely(err)) { nilfs_warning(inode->i_sb, __func__, "failed to reget inode block.\n"); diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h index a0e21363e865..981183d01250 100644 --- a/fs/nilfs2/nilfs.h +++ b/fs/nilfs2/nilfs.h @@ -257,11 +257,9 @@ extern void nilfs_truncate(struct inode *); extern void nilfs_evict_inode(struct inode *); extern int nilfs_setattr(struct dentry *, struct iattr *); int nilfs_permission(struct inode *inode, int mask, unsigned int flags); -extern int nilfs_load_inode_block(struct nilfs_sb_info *, struct inode *, - struct buffer_head **); +int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh); extern int nilfs_inode_dirty(struct inode *); -extern int nilfs_set_file_dirty(struct nilfs_sb_info *, struct inode *, - unsigned); +int nilfs_set_file_dirty(struct inode *inode, unsigned nr_dirty); extern int nilfs_mark_inode_dirty(struct inode *); extern void nilfs_dirty_inode(struct inode *); int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c index 5d2711c28da7..3dfcd3b7d389 100644 --- a/fs/nilfs2/recovery.c +++ b/fs/nilfs2/recovery.c @@ -535,7 +535,7 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs, if (unlikely(err)) goto failed_page; - err = nilfs_set_file_dirty(sbi, inode, 1); + err = nilfs_set_file_dirty(inode, 1); if (unlikely(err)) goto failed_page; -- cgit v1.2.2 From 365e215ce1f154e288ff0f7c9acbdf5421f57949 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Mon, 27 Dec 2010 00:07:30 +0900 Subject: nilfs2: unfold nilfs_dat_inode function nilfs_dat_inode function was a wrapper to switch between normal dat inode and gcdat, a clone of the dat inode for garbage collection. This function got obsolete when the gcdat inode was removed, and now we can access the dat inode directly from a nilfs object. So, we will unfold the wrapper and remove it. Signed-off-by: Ryusuke Konishi --- fs/nilfs2/bmap.c | 2 +- fs/nilfs2/btnode.c | 3 +-- fs/nilfs2/inode.c | 11 +++++------ fs/nilfs2/ioctl.c | 12 +++++------- fs/nilfs2/nilfs.h | 5 ----- fs/nilfs2/segment.c | 11 +++++------ fs/nilfs2/the_nilfs.c | 5 ++--- 7 files changed, 19 insertions(+), 30 deletions(-) (limited to 'fs') diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c index 4b7aeb34cc75..3ee67c67cc52 100644 --- a/fs/nilfs2/bmap.c +++ b/fs/nilfs2/bmap.c @@ -35,7 +35,7 @@ struct inode *nilfs_bmap_get_dat(const struct nilfs_bmap *bmap) { - return nilfs_dat_inode(NILFS_I_NILFS(bmap->b_inode)); + return NILFS_I_NILFS(bmap->b_inode)->ns_dat; } static int nilfs_bmap_convert_error(struct nilfs_bmap *bmap, diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c index 5115814cb745..388e9e8f5286 100644 --- a/fs/nilfs2/btnode.c +++ b/fs/nilfs2/btnode.c @@ -104,8 +104,7 @@ int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr, if (pblocknr == 0) { pblocknr = blocknr; if (inode->i_ino != NILFS_DAT_INO) { - struct inode *dat = - nilfs_dat_inode(NILFS_I_NILFS(inode)); + struct inode *dat = NILFS_I_NILFS(inode)->ns_dat; /* blocknr is a virtual block number */ err = nilfs_dat_translate(dat, blocknr, &pblocknr); diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index 43416470e07b..2fd440d8d6b8 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c @@ -58,7 +58,7 @@ int nilfs_get_block(struct inode *inode, sector_t blkoff, struct nilfs_inode_info *ii = NILFS_I(inode); __u64 blknum = 0; int err = 0, ret; - struct inode *dat = nilfs_dat_inode(NILFS_I_NILFS(inode)); + struct inode *dat = NILFS_I_NILFS(inode)->ns_dat; unsigned maxblocks = bh_result->b_size >> inode->i_blkbits; down_read(&NILFS_MDT(dat)->mi_sem); @@ -420,13 +420,12 @@ static int __nilfs_read_inode(struct super_block *sb, struct nilfs_root *root, unsigned long ino, struct inode *inode) { - struct nilfs_sb_info *sbi = NILFS_SB(sb); - struct inode *dat = nilfs_dat_inode(sbi->s_nilfs); + struct the_nilfs *nilfs = NILFS_SB(sb)->s_nilfs; struct buffer_head *bh; struct nilfs_inode *raw_inode; int err; - down_read(&NILFS_MDT(dat)->mi_sem); /* XXX */ + down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); err = nilfs_ifile_get_inode_block(root->ifile, ino, &bh); if (unlikely(err)) goto bad_inode; @@ -456,7 +455,7 @@ static int __nilfs_read_inode(struct super_block *sb, } nilfs_ifile_unmap_inode(root->ifile, ino, bh); brelse(bh); - up_read(&NILFS_MDT(dat)->mi_sem); /* XXX */ + up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); nilfs_set_inode_flags(inode); return 0; @@ -465,7 +464,7 @@ static int __nilfs_read_inode(struct super_block *sb, brelse(bh); bad_inode: - up_read(&NILFS_MDT(dat)->mi_sem); /* XXX */ + up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); return err; } diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c index b185e937a335..496738963fdb 100644 --- a/fs/nilfs2/ioctl.c +++ b/fs/nilfs2/ioctl.c @@ -233,7 +233,7 @@ nilfs_ioctl_do_get_vinfo(struct the_nilfs *nilfs, __u64 *posp, int flags, int ret; down_read(&nilfs->ns_segctor_sem); - ret = nilfs_dat_get_vinfo(nilfs_dat_inode(nilfs), buf, size, nmembs); + ret = nilfs_dat_get_vinfo(nilfs->ns_dat, buf, size, nmembs); up_read(&nilfs->ns_segctor_sem); return ret; } @@ -242,8 +242,7 @@ static ssize_t nilfs_ioctl_do_get_bdescs(struct the_nilfs *nilfs, __u64 *posp, int flags, void *buf, size_t size, size_t nmembs) { - struct inode *dat = nilfs_dat_inode(nilfs); - struct nilfs_bmap *bmap = NILFS_I(dat)->i_bmap; + struct nilfs_bmap *bmap = NILFS_I(nilfs->ns_dat)->i_bmap; struct nilfs_bdesc *bdescs = buf; int ret, i; @@ -421,7 +420,7 @@ static int nilfs_ioctl_free_vblocknrs(struct the_nilfs *nilfs, size_t nmembs = argv->v_nmembs; int ret; - ret = nilfs_dat_freev(nilfs_dat_inode(nilfs), buf, nmembs); + ret = nilfs_dat_freev(nilfs->ns_dat, buf, nmembs); return (ret < 0) ? ret : nmembs; } @@ -430,8 +429,7 @@ static int nilfs_ioctl_mark_blocks_dirty(struct the_nilfs *nilfs, struct nilfs_argv *argv, void *buf) { size_t nmembs = argv->v_nmembs; - struct inode *dat = nilfs_dat_inode(nilfs); - struct nilfs_bmap *bmap = NILFS_I(dat)->i_bmap; + struct nilfs_bmap *bmap = NILFS_I(nilfs->ns_dat)->i_bmap; struct nilfs_bdesc *bdescs = buf; int ret, i; @@ -450,7 +448,7 @@ static int nilfs_ioctl_mark_blocks_dirty(struct the_nilfs *nilfs, /* skip dead block */ continue; if (bdescs[i].bd_level == 0) { - ret = nilfs_mdt_mark_block_dirty(dat, + ret = nilfs_mdt_mark_block_dirty(nilfs->ns_dat, bdescs[i].bd_offset); if (ret < 0) { WARN_ON(ret == -ENOENT); diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h index 981183d01250..777e8fd04304 100644 --- a/fs/nilfs2/nilfs.h +++ b/fs/nilfs2/nilfs.h @@ -190,11 +190,6 @@ static inline int nilfs_doing_construction(void) return nilfs_test_transaction_flag(NILFS_TI_WRITER); } -static inline struct inode *nilfs_dat_inode(const struct the_nilfs *nilfs) -{ - return nilfs->ns_dat; -} - /* * function prototype */ diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index efc7d0a1bbf7..55ebae5c7f39 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -757,9 +757,8 @@ static int nilfs_test_metadata_dirty(struct the_nilfs *nilfs, ret++; if (nilfs_mdt_fetch_dirty(nilfs->ns_sufile)) ret++; - if (ret || nilfs_doing_gc()) - if (nilfs_mdt_fetch_dirty(nilfs_dat_inode(nilfs))) - ret++; + if ((ret || nilfs_doing_gc()) && nilfs_mdt_fetch_dirty(nilfs->ns_dat)) + ret++; return ret; } @@ -795,7 +794,7 @@ static void nilfs_segctor_clear_metadata_dirty(struct nilfs_sc_info *sci) nilfs_mdt_clear_dirty(sci->sc_root->ifile); nilfs_mdt_clear_dirty(nilfs->ns_cpfile); nilfs_mdt_clear_dirty(nilfs->ns_sufile); - nilfs_mdt_clear_dirty(nilfs_dat_inode(nilfs)); + nilfs_mdt_clear_dirty(nilfs->ns_dat); } static int nilfs_segctor_create_checkpoint(struct nilfs_sc_info *sci) @@ -904,7 +903,7 @@ static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci, nilfs->ns_nongc_ctime : sci->sc_seg_ctime); raw_sr->sr_flags = 0; - nilfs_write_inode_common(nilfs_dat_inode(nilfs), (void *)raw_sr + + nilfs_write_inode_common(nilfs->ns_dat, (void *)raw_sr + NILFS_SR_DAT_OFFSET(isz), 1); nilfs_write_inode_common(nilfs->ns_cpfile, (void *)raw_sr + NILFS_SR_CPFILE_OFFSET(isz), 1); @@ -1160,7 +1159,7 @@ static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode) sci->sc_stage.scnt++; /* Fall through */ case NILFS_ST_DAT: dat_stage: - err = nilfs_segctor_scan_file(sci, nilfs_dat_inode(nilfs), + err = nilfs_segctor_scan_file(sci, nilfs->ns_dat, &nilfs_sc_dat_ops); if (unlikely(err)) break; diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c index 95e444319f12..ad4ac607cf57 100644 --- a/fs/nilfs2/the_nilfs.c +++ b/fs/nilfs2/the_nilfs.c @@ -650,12 +650,11 @@ int nilfs_discard_segments(struct the_nilfs *nilfs, __u64 *segnump, int nilfs_count_free_blocks(struct the_nilfs *nilfs, sector_t *nblocks) { - struct inode *dat = nilfs_dat_inode(nilfs); unsigned long ncleansegs; - down_read(&NILFS_MDT(dat)->mi_sem); /* XXX */ + down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); ncleansegs = nilfs_sufile_get_ncleansegs(nilfs->ns_sufile); - up_read(&NILFS_MDT(dat)->mi_sem); /* XXX */ + up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); *nblocks = (sector_t)ncleansegs * nilfs->ns_blocks_per_segment; return 0; } -- cgit v1.2.2 From 39191628ed169510db2f3f472e1ec14e08f9690f Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sat, 8 Jan 2011 19:36:21 -0800 Subject: fs: fix namei.c kernel-doc notation Fix new kernel-doc notation warnings in fs/namei.c and spell ECHILD correctly. Warning(fs/namei.c:218): No description found for parameter 'flags' Warning(fs/namei.c:425): Excess function parameter 'Returns' description in 'nameidata_drop_rcu' Warning(fs/namei.c:478): Excess function parameter 'Returns' description in 'nameidata_dentry_drop_rcu' Warning(fs/namei.c:540): Excess function parameter 'Returns' description in 'nameidata_drop_rcu_last' Signed-off-by: Randy Dunlap Cc: Nick Piggin Signed-off-by: Linus Torvalds --- fs/namei.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index 19433cdba011..24ece10470b6 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -202,7 +202,7 @@ static int acl_permission_check(struct inode *inode, int mask, unsigned int flag * @inode: inode to check access rights for * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC) * @check_acl: optional callback to check for Posix ACLs - * @flags IPERM_FLAG_ flags. + * @flags: IPERM_FLAG_ flags. * * Used to check for read/write/execute permissions on a file. * We use "fsuid" for this, letting us set arbitrary permissions @@ -407,7 +407,7 @@ void path_put_long(struct path *path) /** * nameidata_drop_rcu - drop this nameidata out of rcu-walk * @nd: nameidata pathwalk data to drop - * @Returns: 0 on success, -ECHLID on failure + * Returns: 0 on success, -ECHILD on failure * * Path walking has 2 modes, rcu-walk and ref-walk (see * Documentation/filesystems/path-lookup.txt). __drop_rcu* functions attempt @@ -468,7 +468,7 @@ static inline int nameidata_drop_rcu_maybe(struct nameidata *nd) * nameidata_dentry_drop_rcu - drop nameidata and dentry out of rcu-walk * @nd: nameidata pathwalk data to drop * @dentry: dentry to drop - * @Returns: 0 on success, -ECHLID on failure + * Returns: 0 on success, -ECHILD on failure * * nameidata_dentry_drop_rcu attempts to drop the current nd->path and nd->root, * and dentry into ref-walk. @dentry must be a path found by a do_lookup call on @@ -530,7 +530,7 @@ static inline int nameidata_dentry_drop_rcu_maybe(struct nameidata *nd, struct d /** * nameidata_drop_rcu_last - drop nameidata ending path walk out of rcu-walk * @nd: nameidata pathwalk data to drop - * @Returns: 0 on success, -ECHLID on failure + * Returns: 0 on success, -ECHILD on failure * * nameidata_drop_rcu_last attempts to drop the current nd->path into ref-walk. * nd->path should be the final element of the lookup, so nd->root is discarded. -- cgit v1.2.2 From 57cc7215b70856dc6bae8e55b00ecd7b1d7429b1 Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Mon, 10 Jan 2011 08:18:25 +0200 Subject: headers: kobject.h redux Remove kobject.h from files which don't need it, notably, sched.h and fs.h. Signed-off-by: Alexey Dobriyan Signed-off-by: Linus Torvalds --- fs/gfs2/incore.h | 1 + fs/nilfs2/super.c | 1 - fs/sysfs/inode.c | 1 + fs/sysfs/sysfs.h | 1 + 4 files changed, 3 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index 8d3d2b4a0a7d..a79790c06275 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h @@ -11,6 +11,7 @@ #define __INCORE_DOT_H__ #include +#include #include #include #include diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index 6ea32d9b1b9d..70dfdd532b83 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c @@ -47,7 +47,6 @@ #include #include #include -#include #include #include #include "nilfs.h" diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c index 30ac27345586..0a12eb89cd32 100644 --- a/fs/sysfs/inode.c +++ b/fs/sysfs/inode.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include "sysfs.h" diff --git a/fs/sysfs/sysfs.h b/fs/sysfs/sysfs.h index ffaaa816bfba..3d28af31d863 100644 --- a/fs/sysfs/sysfs.h +++ b/fs/sysfs/sysfs.h @@ -9,6 +9,7 @@ */ #include +#include #include struct sysfs_open_dirent; -- cgit v1.2.2 From 4f531501e44206862735e81ddf2b70d0dcf6acf6 Mon Sep 17 00:00:00 2001 From: Lukas Czerner Date: Mon, 10 Jan 2011 12:04:55 -0500 Subject: ext4: fix possible overflow in ext4_trim_fs() When determining last group through ext4_get_group_no_and_offset() the result may be wrong in cases when range->start and range-len are too big, because it may overflow when summing up those two numbers. Fix that by checking range->len and limit its value to ext4_blocks_count(). This commit was tested by myself with expected result. Signed-off-by: Lukas Czerner --- fs/ext4/mballoc.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'fs') diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 46d5414f59c1..7c603a02633e 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -4819,6 +4819,7 @@ ext4_grpblk_t ext4_trim_all_free(struct super_block *sb, struct ext4_buddy *e4b, int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) { struct ext4_buddy e4b; + ext4_fsblk_t blocks_count = ext4_blocks_count(EXT4_SB(sb)->s_es); ext4_group_t first_group, last_group; ext4_group_t group, ngroups = ext4_get_groups_count(sb); ext4_grpblk_t cnt = 0, first_block, last_block; @@ -4830,6 +4831,11 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) minlen = range->minlen >> sb->s_blocksize_bits; trimmed = 0; + if (start >= blocks_count) + return -EINVAL; + if (start + len > blocks_count) + len = blocks_count - start; + if (unlikely(minlen > EXT4_BLOCKS_PER_GROUP(sb))) return -EINVAL; -- cgit v1.2.2 From 932596366760e3f0dac9998665af1c49afcc4285 Mon Sep 17 00:00:00 2001 From: Lukas Czerner Date: Mon, 10 Jan 2011 12:09:59 -0500 Subject: ext4: remove warning message from ext4_issue_discard helper ext4_issue_discard is supposed to be helper for calling discard, however in case that underlying device does not support discard it prints out the warning message and clears the DISCARD t_mount_opt flag. Since it can be (and is) used by others, it should not do anything and let the caller to handle the error case. This commit removes warning message and flag setting from ext4_issue_discard and use it just in place where it is really needed (release_blocks_on_commit). FITRIM ioctl should not set any flags nor it should print out warning messages, so get rid of the warning as well. Signed-off-by: Lukas Czerner --- fs/ext4/mballoc.c | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 7c603a02633e..12b604abc2fe 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -2608,18 +2608,12 @@ int ext4_mb_release(struct super_block *sb) static inline int ext4_issue_discard(struct super_block *sb, ext4_group_t block_group, ext4_grpblk_t block, int count) { - int ret; ext4_fsblk_t discard_block; discard_block = block + ext4_group_first_block_no(sb, block_group); trace_ext4_discard_blocks(sb, (unsigned long long) discard_block, count); - ret = sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0); - if (ret == -EOPNOTSUPP) { - ext4_warning(sb, "discard not supported, disabling"); - clear_opt(sb, DISCARD); - } - return ret; + return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0); } /* @@ -2631,7 +2625,7 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn) struct super_block *sb = journal->j_private; struct ext4_buddy e4b; struct ext4_group_info *db; - int err, count = 0, count2 = 0; + int err, ret, count = 0, count2 = 0; struct ext4_free_data *entry; struct list_head *l, *ltmp; @@ -2641,9 +2635,15 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn) mb_debug(1, "gonna free %u blocks in group %u (0x%p):", entry->count, entry->group, entry); - if (test_opt(sb, DISCARD)) - ext4_issue_discard(sb, entry->group, + if (test_opt(sb, DISCARD)) { + ret = ext4_issue_discard(sb, entry->group, entry->start_blk, entry->count); + if (unlikely(ret == -EOPNOTSUPP)) { + ext4_warning(sb, "discard not supported, " + "disabling"); + clear_opt(sb, DISCARD); + } + } err = ext4_mb_load_buddy(sb, entry->group, &e4b); /* we expect to find existing buddy because it's pinned */ @@ -4722,8 +4722,6 @@ static int ext4_trim_extent(struct super_block *sb, int start, int count, ext4_unlock_group(sb, group); ret = ext4_issue_discard(sb, group, start, count); - if (ret) - ext4_std_error(sb, ret); ext4_lock_group(sb, group); mb_free_blocks(NULL, e4b, start, ex.fe_len); -- cgit v1.2.2 From eaeef86718249f5c75b1370f77a9bc11f196a01c Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Mon, 10 Jan 2011 12:10:07 -0500 Subject: ext4: clean up ext4_xattr_list()'s error code checking and return strategy Any time you see code that tries to add error codes together, you should want to claw your eyes out... Signed-off-by: "Theodore Ts'o" --- fs/ext4/xattr.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) (limited to 'fs') diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index fa4b899da4b3..ca6ca14a827d 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -427,23 +427,23 @@ cleanup: static int ext4_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size) { - int i_error, b_error; + int ret, ret2; down_read(&EXT4_I(dentry->d_inode)->xattr_sem); - i_error = ext4_xattr_ibody_list(dentry, buffer, buffer_size); - if (i_error < 0) { - b_error = 0; - } else { - if (buffer) { - buffer += i_error; - buffer_size -= i_error; - } - b_error = ext4_xattr_block_list(dentry, buffer, buffer_size); - if (b_error < 0) - i_error = 0; + ret = ret2 = ext4_xattr_ibody_list(dentry, buffer, buffer_size); + if (ret < 0) + goto errout; + if (buffer) { + buffer += ret; + buffer_size -= ret; } + ret = ext4_xattr_block_list(dentry, buffer, buffer_size); + if (ret < 0) + goto errout; + ret += ret2; +errout: up_read(&EXT4_I(dentry->d_inode)->xattr_sem); - return i_error + b_error; + return ret; } /* -- cgit v1.2.2 From 6e9510b0e0de657ca7c7bfb10ced80b4d237dd58 Mon Sep 17 00:00:00 2001 From: Wang Sheng-Hui Date: Mon, 10 Jan 2011 12:10:30 -0500 Subject: ext2,ext3,ext4: clarify comment for extN_xattr_set_handle Signed-off-by: Wang Sheng-Hui Signed-off-by: "Theodore Ts'o" --- fs/ext2/xattr.c | 2 +- fs/ext3/xattr.c | 2 +- fs/ext4/xattr.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c index f84700be3274..f3ffbf1cc8d0 100644 --- a/fs/ext2/xattr.c +++ b/fs/ext2/xattr.c @@ -355,7 +355,7 @@ static void ext2_xattr_update_super_block(struct super_block *sb) /* * ext2_xattr_set() * - * Create, replace or remove an extended attribute for this inode. Buffer + * Create, replace or remove an extended attribute for this inode. Value * is NULL to remove an existing extended attribute, and non-NULL to * either replace an existing extended attribute, or create a new extended * attribute. The flags XATTR_REPLACE and XATTR_CREATE diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c index e69dc6dfaa89..32e6cc23bd9a 100644 --- a/fs/ext3/xattr.c +++ b/fs/ext3/xattr.c @@ -925,7 +925,7 @@ ext3_xattr_ibody_set(handle_t *handle, struct inode *inode, /* * ext3_xattr_set_handle() * - * Create, replace or remove an extended attribute for this inode. Buffer + * Create, replace or remove an extended attribute for this inode. Value * is NULL to remove an existing extended attribute, and non-NULL to * either replace an existing extended attribute, or create a new extended * attribute. The flags XATTR_REPLACE and XATTR_CREATE diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index ca6ca14a827d..fc32176eee39 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -947,7 +947,7 @@ ext4_xattr_ibody_set(handle_t *handle, struct inode *inode, /* * ext4_xattr_set_handle() * - * Create, replace or remove an extended attribute for this inode. Buffer + * Create, replace or remove an extended attribute for this inode. Value * is NULL to remove an existing extended attribute, and non-NULL to * either replace an existing extended attribute, or create a new extended * attribute. The flags XATTR_REPLACE and XATTR_CREATE -- cgit v1.2.2 From 1f605b302724120777a1c38743cb20e2c8807333 Mon Sep 17 00:00:00 2001 From: Wang Sheng-Hui Date: Mon, 10 Jan 2011 12:10:37 -0500 Subject: ext2: remove dead code in ext2_xattr_get Reviewed-by: Dan Carpenter Signed-off-by: Wang Sheng-Hui Signed-off-by: "Theodore Ts'o" --- fs/ext2/xattr.c | 8 -------- 1 file changed, 8 deletions(-) (limited to 'fs') diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c index f3ffbf1cc8d0..c2e4dce984d2 100644 --- a/fs/ext2/xattr.c +++ b/fs/ext2/xattr.c @@ -199,14 +199,6 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_get", goto found; entry = next; } - /* Check the remaining name entries */ - while (!IS_LAST_ENTRY(entry)) { - struct ext2_xattr_entry *next = - EXT2_XATTR_NEXT(entry); - if ((char *)next >= end) - goto bad_block; - entry = next; - } if (ext2_xattr_cache_insert(bh)) ea_idebug(inode, "cache insert failed"); error = -ENODATA; -- cgit v1.2.2 From 13195184a8bc119dbd2f905db325a453047971cb Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 10 Jan 2011 12:10:44 -0500 Subject: ext4: test the correct variable in ext4_init_pageio() This is a copy and paste error. The intent was to check "io_page_cachep". We tested "io_page_cachep" earlier. Signed-off-by: Dan Carpenter Signed-off-by: "Theodore Ts'o" --- fs/ext4/page-io.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index 0f5dfe0e83e7..7270dcfca92a 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c @@ -44,7 +44,7 @@ int __init ext4_init_pageio(void) if (io_page_cachep == NULL) return -ENOMEM; io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT); - if (io_page_cachep == NULL) { + if (io_end_cachep == NULL) { kmem_cache_destroy(io_page_cachep); return -ENOMEM; } -- cgit v1.2.2 From f9a62d090cf47fae2fe6f6bd8eb9f24482573fd8 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 10 Jan 2011 12:10:50 -0500 Subject: ext4: use IS_ERR() to check for errors in ext4_error_file d_path() returns an ERR_PTR and it doesn't return NULL. This is in ext4_error_file() and no one actually calls ext4_error_file(). Signed-off-by: Dan Carpenter --- fs/ext4/super.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ext4/super.c b/fs/ext4/super.c index c228da112de0..d49e3b1ec41e 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -437,7 +437,7 @@ void ext4_error_file(struct file *file, const char *function, save_error_info(inode->i_sb, function, line); va_start(args, fmt); path = d_path(&(file->f_path), pathname, sizeof(pathname)); - if (!path) + if (IS_ERR(path)) path = "(unknown)"; printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: inode #%lu " -- cgit v1.2.2 From f7c21177af0b32a2cd9ee36189637f0c1f0e1e17 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Mon, 10 Jan 2011 12:10:55 -0500 Subject: ext4: Use ext4_error_file() to print the pathname to the corrupted inode Where the file pointer is available, use ext4_error_file() instead of ext4_error_inode(). Signed-off-by: "Theodore Ts'o" --- fs/ext4/dir.c | 30 ++++++++++++++++++++---------- fs/ext4/ext4.h | 15 ++++++++------- fs/ext4/namei.c | 10 +++++----- fs/ext4/super.c | 28 ++++++++++++++++------------ 4 files changed, 49 insertions(+), 34 deletions(-) (limited to 'fs') diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c index bd5d74d06399..164c56092e58 100644 --- a/fs/ext4/dir.c +++ b/fs/ext4/dir.c @@ -66,7 +66,7 @@ static unsigned char get_dtype(struct super_block *sb, int filetype) * Note: this is the opposite of what ext2 and ext3 historically returned... */ int __ext4_check_dir_entry(const char *function, unsigned int line, - struct inode *dir, + struct inode *dir, struct file *filp, struct ext4_dir_entry_2 *de, struct buffer_head *bh, unsigned int offset) @@ -90,12 +90,21 @@ int __ext4_check_dir_entry(const char *function, unsigned int line, else return 0; - ext4_error_inode(dir, function, line, bh->b_blocknr, - "bad entry in directory: %s - " - "offset=%u(%u), inode=%u, rec_len=%d, name_len=%d", - error_msg, (unsigned) (offset%bh->b_size), offset, - le32_to_cpu(de->inode), - rlen, de->name_len); + if (filp) + ext4_error_file(filp, function, line, bh ? bh->b_blocknr : 0, + "bad entry in directory: %s - offset=%u(%u), " + "inode=%u, rec_len=%d, name_len=%d", + error_msg, (unsigned) (offset%bh->b_size), + offset, le32_to_cpu(de->inode), + rlen, de->name_len); + else + ext4_error_inode(dir, function, line, bh ? bh->b_blocknr : 0, + "bad entry in directory: %s - offset=%u(%u), " + "inode=%u, rec_len=%d, name_len=%d", + error_msg, (unsigned) (offset%bh->b_size), + offset, le32_to_cpu(de->inode), + rlen, de->name_len); + return 1; } @@ -158,8 +167,9 @@ static int ext4_readdir(struct file *filp, */ if (!bh) { if (!dir_has_error) { - EXT4_ERROR_INODE(inode, "directory " - "contains a hole at offset %Lu", + EXT4_ERROR_FILE(filp, 0, + "directory contains a " + "hole at offset %llu", (unsigned long long) filp->f_pos); dir_has_error = 1; } @@ -200,7 +210,7 @@ revalidate: while (!error && filp->f_pos < inode->i_size && offset < sb->s_blocksize) { de = (struct ext4_dir_entry_2 *) (bh->b_data + offset); - if (ext4_check_dir_entry(inode, de, + if (ext4_check_dir_entry(inode, filp, de, bh, offset)) { /* * On error, skip the f_pos to the next block diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 8104ab7eb7d4..2a739255ee05 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -62,8 +62,8 @@ #define EXT4_ERROR_INODE_BLOCK(inode, block, fmt, a...) \ ext4_error_inode((inode), __func__, __LINE__, (block), (fmt), ## a) -#define EXT4_ERROR_FILE(file, fmt, a...) \ - ext4_error_file(__func__, __LINE__, (file), (fmt), ## a) +#define EXT4_ERROR_FILE(file, block, fmt, a...) \ + ext4_error_file((file), __func__, __LINE__, (block), (fmt), ## a) /* data type for block offset of block group */ typedef int ext4_grpblk_t; @@ -1640,11 +1640,12 @@ extern unsigned ext4_init_block_bitmap(struct super_block *sb, /* dir.c */ extern int __ext4_check_dir_entry(const char *, unsigned int, struct inode *, + struct file *, struct ext4_dir_entry_2 *, struct buffer_head *, unsigned int); -#define ext4_check_dir_entry(dir, de, bh, offset) \ - unlikely(__ext4_check_dir_entry(__func__, __LINE__, (dir), (de), \ - (bh), (offset))) +#define ext4_check_dir_entry(dir, filp, de, bh, offset) \ + unlikely(__ext4_check_dir_entry(__func__, __LINE__, (dir), (filp), \ + (de), (bh), (offset))) extern int ext4_htree_store_dirent(struct file *dir_file, __u32 hash, __u32 minor_hash, struct ext4_dir_entry_2 *dirent); @@ -1751,8 +1752,8 @@ extern void ext4_error_inode(struct inode *, const char *, unsigned int, ext4_fsblk_t, const char *, ...) __attribute__ ((format (printf, 5, 6))); extern void ext4_error_file(struct file *, const char *, unsigned int, - const char *, ...) - __attribute__ ((format (printf, 4, 5))); + ext4_fsblk_t, const char *, ...) + __attribute__ ((format (printf, 5, 6))); extern void __ext4_std_error(struct super_block *, const char *, unsigned int, int); extern void __ext4_abort(struct super_block *, const char *, unsigned int, diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index e275464f7754..96a594d86a19 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -581,7 +581,7 @@ static int htree_dirblock_to_tree(struct file *dir_file, dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(0)); for (; de < top; de = ext4_next_entry(de, dir->i_sb->s_blocksize)) { - if (ext4_check_dir_entry(dir, de, bh, + if (ext4_check_dir_entry(dir, NULL, de, bh, (block<i_sb)) + ((char *)de - bh->b_data))) { /* On error, skip the f_pos to the next block. */ @@ -820,7 +820,7 @@ static inline int search_dirblock(struct buffer_head *bh, if ((char *) de + namelen <= dlimit && ext4_match (namelen, name, de)) { /* found a match - just to be sure, do a full check */ - if (ext4_check_dir_entry(dir, de, bh, offset)) + if (ext4_check_dir_entry(dir, NULL, de, bh, offset)) return -1; *res_dir = de; return 1; @@ -1269,7 +1269,7 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry, de = (struct ext4_dir_entry_2 *)bh->b_data; top = bh->b_data + blocksize - reclen; while ((char *) de <= top) { - if (ext4_check_dir_entry(dir, de, bh, offset)) + if (ext4_check_dir_entry(dir, NULL, de, bh, offset)) return -EIO; if (ext4_match(namelen, name, de)) return -EEXIST; @@ -1636,7 +1636,7 @@ static int ext4_delete_entry(handle_t *handle, pde = NULL; de = (struct ext4_dir_entry_2 *) bh->b_data; while (i < bh->b_size) { - if (ext4_check_dir_entry(dir, de, bh, i)) + if (ext4_check_dir_entry(dir, NULL, de, bh, i)) return -EIO; if (de == de_del) { BUFFER_TRACE(bh, "get_write_access"); @@ -1919,7 +1919,7 @@ static int empty_dir(struct inode *inode) } de = (struct ext4_dir_entry_2 *) bh->b_data; } - if (ext4_check_dir_entry(inode, de, bh, offset)) { + if (ext4_check_dir_entry(inode, NULL, de, bh, offset)) { de = (struct ext4_dir_entry_2 *)(bh->b_data + sb->s_blocksize); offset = (offset | (sb->s_blocksize - 1)) + 1; diff --git a/fs/ext4/super.c b/fs/ext4/super.c index d49e3b1ec41e..7728a4ca3d6c 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -406,28 +406,31 @@ void ext4_error_inode(struct inode *inode, const char *function, const char *fmt, ...) { va_list args; + struct va_format vaf; struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es; es->s_last_error_ino = cpu_to_le32(inode->i_ino); es->s_last_error_block = cpu_to_le64(block); save_error_info(inode->i_sb, function, line); va_start(args, fmt); + vaf.fmt = fmt; + vaf.va = &args; printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: inode #%lu: ", inode->i_sb->s_id, function, line, inode->i_ino); if (block) - printk("block %llu: ", block); - printk("comm %s: ", current->comm); - vprintk(fmt, args); - printk("\n"); + printk(KERN_CONT "block %llu: ", block); + printk(KERN_CONT "comm %s: %pV\n", current->comm, &vaf); va_end(args); ext4_handle_error(inode->i_sb); } void ext4_error_file(struct file *file, const char *function, - unsigned int line, const char *fmt, ...) + unsigned int line, ext4_fsblk_t block, + const char *fmt, ...) { va_list args; + struct va_format vaf; struct ext4_super_block *es; struct inode *inode = file->f_dentry->d_inode; char pathname[80], *path; @@ -435,17 +438,18 @@ void ext4_error_file(struct file *file, const char *function, es = EXT4_SB(inode->i_sb)->s_es; es->s_last_error_ino = cpu_to_le32(inode->i_ino); save_error_info(inode->i_sb, function, line); - va_start(args, fmt); path = d_path(&(file->f_path), pathname, sizeof(pathname)); if (IS_ERR(path)) path = "(unknown)"; printk(KERN_CRIT - "EXT4-fs error (device %s): %s:%d: inode #%lu " - "(comm %s path %s): ", - inode->i_sb->s_id, function, line, inode->i_ino, - current->comm, path); - vprintk(fmt, args); - printk("\n"); + "EXT4-fs error (device %s): %s:%d: inode #%lu: ", + inode->i_sb->s_id, function, line, inode->i_ino); + if (block) + printk(KERN_CONT "block %llu: ", block); + va_start(args, fmt); + vaf.fmt = fmt; + vaf.va = &args; + printk(KERN_CONT "comm %s: path %s: %pV\n", current->comm, path, &vaf); va_end(args); ext4_handle_error(inode->i_sb); -- cgit v1.2.2 From f1dffc4c5431c6bd8972489636573c5cd09ab672 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Mon, 10 Jan 2011 12:11:00 -0500 Subject: ext4: ext4_ext_migrate should use NULL not 0 ext4_ext_migrate() calls ext4_new_inode() and passes 0 instead of a pointer to a struct qstr. This patch uses NULL, to make it obvious to the caller that this was a pointer. Signed-off-by: Eric Paris Signed-off-by: "Theodore Ts'o" --- fs/ext4/migrate.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c index 25f3a974b725..b0a126f23c20 100644 --- a/fs/ext4/migrate.c +++ b/fs/ext4/migrate.c @@ -496,7 +496,7 @@ int ext4_ext_migrate(struct inode *inode) goal = (((inode->i_ino - 1) / EXT4_INODES_PER_GROUP(inode->i_sb)) * EXT4_INODES_PER_GROUP(inode->i_sb)) + 1; tmp_inode = ext4_new_inode(handle, inode->i_sb->s_root->d_inode, - S_IFREG, 0, goal); + S_IFREG, NULL, goal); if (IS_ERR(tmp_inode)) { retval = -ENOMEM; ext4_journal_stop(handle); -- cgit v1.2.2 From dabd991f9d8e3232bb4531c920daddac8d10d313 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Mon, 10 Jan 2011 12:11:16 -0500 Subject: ext4: add more error checks to ext4_mkdir() Check return value of ext4_journal_get_write_access, ext4_journal_dirty_metadata and ext4_mark_inode_dirty. Move brelse() under 'out_stop' to release bh properly in case of journal error. Signed-off-by: Namhyung Kim Signed-off-by: "Theodore Ts'o" --- fs/ext4/namei.c | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 96a594d86a19..6dfc5b9de3e6 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -1789,7 +1789,7 @@ static int ext4_mkdir(struct inode *dir, struct dentry *dentry, int mode) { handle_t *handle; struct inode *inode; - struct buffer_head *dir_block; + struct buffer_head *dir_block = NULL; struct ext4_dir_entry_2 *de; unsigned int blocksize = dir->i_sb->s_blocksize; int err, retries = 0; @@ -1822,7 +1822,9 @@ retry: if (!dir_block) goto out_clear_inode; BUFFER_TRACE(dir_block, "get_write_access"); - ext4_journal_get_write_access(handle, dir_block); + err = ext4_journal_get_write_access(handle, dir_block); + if (err) + goto out_clear_inode; de = (struct ext4_dir_entry_2 *) dir_block->b_data; de->inode = cpu_to_le32(inode->i_ino); de->name_len = 1; @@ -1839,10 +1841,12 @@ retry: ext4_set_de_type(dir->i_sb, de, S_IFDIR); inode->i_nlink = 2; BUFFER_TRACE(dir_block, "call ext4_handle_dirty_metadata"); - ext4_handle_dirty_metadata(handle, dir, dir_block); - brelse(dir_block); - ext4_mark_inode_dirty(handle, inode); - err = ext4_add_entry(handle, dentry, inode); + err = ext4_handle_dirty_metadata(handle, dir, dir_block); + if (err) + goto out_clear_inode; + err = ext4_mark_inode_dirty(handle, inode); + if (!err) + err = ext4_add_entry(handle, dentry, inode); if (err) { out_clear_inode: clear_nlink(inode); @@ -1853,10 +1857,13 @@ out_clear_inode: } ext4_inc_count(handle, dir); ext4_update_dx_flag(dir); - ext4_mark_inode_dirty(handle, dir); + err = ext4_mark_inode_dirty(handle, dir); + if (err) + goto out_clear_inode; d_instantiate(dentry, inode); unlock_new_inode(inode); out_stop: + brelse(dir_block); ext4_journal_stop(handle); if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries)) goto retry; -- cgit v1.2.2 From ad4fb9cafe100a4a9de6e0529015e584d94ac8dc Mon Sep 17 00:00:00 2001 From: Kazuya Mio Date: Mon, 10 Jan 2011 12:12:28 -0500 Subject: ext4: fix 32bit overflow in ext4_ext_find_goal() ext4_ext_find_goal() returns an ideal physical block number that the block allocator tries to allocate first. However, if a required file offset is smaller than the existing extent's one, ext4_ext_find_goal() returns a wrong block number because it may overflow at "block - le32_to_cpu(ex->ee_block)". This patch fixes the problem. ext4_ext_find_goal() will also return a wrong block number in case a file offset of the existing extent is too big. In this case, the ideal physical block number is fixed in ext4_mb_initialize_context(), so it's no problem. reproduce: # dd if=/dev/zero of=/mnt/mp1/tmp bs=127M count=1 oflag=sync # dd if=/dev/zero of=/mnt/mp1/file bs=512K count=1 seek=1 oflag=sync # filefrag -v /mnt/mp1/file Filesystem type is: ef53 File size of /mnt/mp1/file is 1048576 (256 blocks, blocksize 4096) ext logical physical expected length flags 0 128 67456 128 eof /mnt/mp1/file: 2 extents found # rm -rf /mnt/mp1/tmp # echo $((512*4096)) > /sys/fs/ext4/loop0/mb_stream_req # dd if=/dev/zero of=/mnt/mp1/file bs=512K count=1 oflag=sync conv=notrunc result (linux-2.6.37-rc2 + ext4 patch queue): # filefrag -v /mnt/mp1/file Filesystem type is: ef53 File size of /mnt/mp1/file is 1048576 (256 blocks, blocksize 4096) ext logical physical expected length flags 0 0 33280 128 1 128 67456 33407 128 eof /mnt/mp1/file: 2 extents found result(apply this patch): # filefrag -v /mnt/mp1/file Filesystem type is: ef53 File size of /mnt/mp1/file is 1048576 (256 blocks, blocksize 4096) ext logical physical expected length flags 0 0 66560 128 1 128 67456 66687 128 eof /mnt/mp1/file: 2 extents found Signed-off-by: Kazuya Mio Signed-off-by: "Theodore Ts'o" --- fs/ext4/extents.c | 30 ++++++++++++++++++++++++++---- 1 file changed, 26 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 0554c48cb1fd..d53e20f53103 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -117,11 +117,33 @@ static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode, struct ext4_extent *ex; depth = path->p_depth; - /* try to predict block placement */ + /* + * Try to predict block placement assuming that we are + * filling in a file which will eventually be + * non-sparse --- i.e., in the case of libbfd writing + * an ELF object sections out-of-order but in a way + * the eventually results in a contiguous object or + * executable file, or some database extending a table + * space file. However, this is actually somewhat + * non-ideal if we are writing a sparse file such as + * qemu or KVM writing a raw image file that is going + * to stay fairly sparse, since it will end up + * fragmenting the file system's free space. Maybe we + * should have some hueristics or some way to allow + * userspace to pass a hint to file system, + * especiially if the latter case turns out to be + * common. + */ ex = path[depth].p_ext; - if (ex) - return (ext4_ext_pblock(ex) + - (block - le32_to_cpu(ex->ee_block))); + if (ex) { + ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex); + ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block); + + if (block > ext_block) + return ext_pblk + (block - ext_block); + else + return ext_pblk - (ext_block - block); + } /* it looks like index is empty; * try to find starting block from index itself */ -- cgit v1.2.2 From f232109773ff5b0c840a6761d74940b9cf0d66ec Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Mon, 10 Jan 2011 12:12:36 -0500 Subject: ext4: replace i_delalloc_reserved_flag with EXT4_STATE_DELALLOC_RESERVED Remove the short element i_delalloc_reserved_flag from the ext4_inode_info structure and replace it a new bit in i_state_flags. Since we have an ext4_inode_info for every ext4 inode cached in the inode cache, any savings we can produce here is a very good thing from a memory utilization perspective. Signed-off-by: "Theodore Ts'o" --- fs/ext4/balloc.c | 3 ++- fs/ext4/ext4.h | 2 +- fs/ext4/inode.c | 6 +++--- fs/ext4/mballoc.c | 5 +++-- fs/ext4/super.c | 1 - 5 files changed, 9 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index 14c3af26c671..adf96b822781 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c @@ -592,7 +592,8 @@ ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode, * Account for the allocated meta blocks. We will never * fail EDQUOT for metdata, but we do account for it. */ - if (!(*errp) && EXT4_I(inode)->i_delalloc_reserved_flag) { + if (!(*errp) && + ext4_test_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED)) { spin_lock(&EXT4_I(inode)->i_block_reservation_lock); EXT4_I(inode)->i_allocated_meta_blocks += ar.len; spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 2a739255ee05..b7ee66ff9962 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -828,7 +828,6 @@ struct ext4_inode_info { unsigned int i_reserved_data_blocks; unsigned int i_reserved_meta_blocks; unsigned int i_allocated_meta_blocks; - unsigned short i_delalloc_reserved_flag; sector_t i_da_metadata_calc_last_lblock; int i_da_metadata_calc_len; @@ -1235,6 +1234,7 @@ enum { EXT4_STATE_EXT_MIGRATE, /* Inode is migrating */ EXT4_STATE_DIO_UNWRITTEN, /* need convert on dio done*/ EXT4_STATE_NEWENTRY, /* File just added to dir */ + EXT4_STATE_DELALLOC_RESERVED, /* blks already reserved for delalloc */ }; #define EXT4_INODE_BIT_FNS(name, field) \ diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index c0fe426d444a..ac08460921aa 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -1330,7 +1330,7 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode, * avoid double accounting */ if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) - EXT4_I(inode)->i_delalloc_reserved_flag = 1; + ext4_set_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED); /* * We need to check for EXT4 here because migrate * could have changed the inode type in between @@ -1360,7 +1360,7 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode, ext4_da_update_reserve_space(inode, retval, 1); } if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) - EXT4_I(inode)->i_delalloc_reserved_flag = 0; + ext4_clear_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED); up_write((&EXT4_I(inode)->i_data_sem)); if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { @@ -2249,7 +2249,7 @@ static void mpage_da_map_and_submit(struct mpage_da_data *mpd) * affects functions in many different parts of the allocation * call path. This flag exists primarily because we don't * want to change *many* call functions, so ext4_map_blocks() - * will set the magic i_delalloc_reserved_flag once the + * will set the EXT4_STATE_DELALLOC_RESERVED flag once the * inode's allocation semaphore is taken. * * If the blocks in questions were delalloc blocks, set diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 12b604abc2fe..d47a80ec231d 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -4283,7 +4283,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, * EDQUOT check, as blocks and quotas have been already * reserved when data being copied into pagecache. */ - if (EXT4_I(ar->inode)->i_delalloc_reserved_flag) + if (ext4_test_inode_state(ar->inode, EXT4_STATE_DELALLOC_RESERVED)) ar->flags |= EXT4_MB_DELALLOC_RESERVED; else { /* Without delayed allocation we need to verify @@ -4380,7 +4380,8 @@ out: if (inquota && ar->len < inquota) dquot_free_block(ar->inode, inquota - ar->len); if (!ar->len) { - if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag) + if (!ext4_test_inode_state(ar->inode, + EXT4_STATE_DELALLOC_RESERVED)) /* release all the reserved blocks if non delalloc */ percpu_counter_sub(&sbi->s_dirtyblocks_counter, reserv_blks); diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 7728a4ca3d6c..f5960d673e4e 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -828,7 +828,6 @@ static struct inode *ext4_alloc_inode(struct super_block *sb) ei->i_reserved_meta_blocks = 0; ei->i_allocated_meta_blocks = 0; ei->i_da_metadata_calc_len = 0; - ei->i_delalloc_reserved_flag = 0; spin_lock_init(&(ei->i_block_reservation_lock)); #ifdef CONFIG_QUOTA ei->i_reserved_quota = 0; -- cgit v1.2.2 From 01f49d0b9d0209dc1194255b11601e4b94447b36 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Mon, 10 Jan 2011 12:13:03 -0500 Subject: ext4: use ext4_lblk_t instead of sector_t for logical blocks This fixes a number of places where we used sector_t instead of ext4_lblk_t for logical blocks, which for ext4 are still 32-bit data types. No point wasting space in the ext4_inode_info structure, and requiring 64-bit arithmetic on 32-bit systems, when it isn't necessary. Signed-off-by: "Theodore Ts'o" --- fs/ext4/ext4.h | 2 +- fs/ext4/ext4_extents.h | 2 +- fs/ext4/extents.c | 2 +- fs/ext4/inode.c | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index b7ee66ff9962..746a59853a07 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -828,7 +828,7 @@ struct ext4_inode_info { unsigned int i_reserved_data_blocks; unsigned int i_reserved_meta_blocks; unsigned int i_allocated_meta_blocks; - sector_t i_da_metadata_calc_last_lblock; + ext4_lblk_t i_da_metadata_calc_last_lblock; int i_da_metadata_calc_len; /* on-disk additional length */ diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h index 28ce70fd9cd0..dfdda1766927 100644 --- a/fs/ext4/ext4_extents.h +++ b/fs/ext4/ext4_extents.h @@ -278,7 +278,7 @@ static inline void ext4_idx_store_pblock(struct ext4_extent_idx *ix, } extern int ext4_ext_calc_metadata_amount(struct inode *inode, - sector_t lblocks); + ext4_lblk_t lblocks); extern int ext4_extent_tree_init(handle_t *, struct inode *); extern int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int num, diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index d53e20f53103..f1a4354ea3cf 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -266,7 +266,7 @@ static inline int ext4_ext_space_root_idx(struct inode *inode, int check) * to allocate @blocks * Worse case is one block per extent */ -int ext4_ext_calc_metadata_amount(struct inode *inode, sector_t lblock) +int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock) { struct ext4_inode_info *ei = EXT4_I(inode); int idxs, num = 0; diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index ac08460921aa..3ae83137cf34 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -1091,7 +1091,7 @@ static int ext4_indirect_calc_metadata_amount(struct inode *inode, * Calculate the number of metadata blocks need to reserve * to allocate a block located at @lblock */ -static int ext4_calc_metadata_amount(struct inode *inode, sector_t lblock) +static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock) { if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) return ext4_ext_calc_metadata_amount(inode, lblock); @@ -1888,7 +1888,7 @@ static int ext4_journalled_write_end(struct file *file, /* * Reserve a single block located at lblock */ -static int ext4_da_reserve_space(struct inode *inode, sector_t lblock) +static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock) { int retries = 0; struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); -- cgit v1.2.2 From b05e6ae58a13b56e3e11882c1fc71948c9b29760 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Mon, 10 Jan 2011 12:13:26 -0500 Subject: ext4: drop ec_type from the ext4_ext_cache structure We can encode the ec_type information by using ee_len == 0 to denote EXT4_EXT_CACHE_NO, ee_start == 0 to denote EXT4_EXT_CACHE_GAP, and if neither is true, then the cache type must be EXT4_EXT_CACHE_EXTENT. This allows us to reduce the size of ext4_ext_inode by another 8 bytes. (ec_type is 4 bytes, plus another 4 bytes of padding) Signed-off-by: "Theodore Ts'o" --- fs/ext4/ext4.h | 3 ++- fs/ext4/ext4_extents.h | 6 +----- fs/ext4/extents.c | 37 +++++++++++++++---------------------- 3 files changed, 18 insertions(+), 28 deletions(-) (limited to 'fs') diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 746a59853a07..de937fc10503 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -738,12 +738,13 @@ do { \ /* * storage for cached extent + * If ec_len == 0, then the cache is invalid. + * If ec_start == 0, then the cache represents a gap (null mapping) */ struct ext4_ext_cache { ext4_fsblk_t ec_start; ext4_lblk_t ec_block; __u32 ec_len; /* must be 32bit to return holes */ - __u32 ec_type; }; /* diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h index dfdda1766927..2e29abb30f76 100644 --- a/fs/ext4/ext4_extents.h +++ b/fs/ext4/ext4_extents.h @@ -119,10 +119,6 @@ struct ext4_ext_path { * structure for external API */ -#define EXT4_EXT_CACHE_NO 0 -#define EXT4_EXT_CACHE_GAP 1 -#define EXT4_EXT_CACHE_EXTENT 2 - /* * to be called by ext4_ext_walk_space() * negative retcode - error @@ -197,7 +193,7 @@ static inline unsigned short ext_depth(struct inode *inode) static inline void ext4_ext_invalidate_cache(struct inode *inode) { - EXT4_I(inode)->i_cached_extent.ec_type = EXT4_EXT_CACHE_NO; + EXT4_I(inode)->i_cached_extent.ec_len = 0; } static inline void ext4_ext_mark_uninitialized(struct ext4_extent *ext) diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index f1a4354ea3cf..9081d1060a5f 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -1894,12 +1894,10 @@ static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block, cbex.ec_block = start; cbex.ec_len = end - start; cbex.ec_start = 0; - cbex.ec_type = EXT4_EXT_CACHE_GAP; } else { cbex.ec_block = le32_to_cpu(ex->ee_block); cbex.ec_len = ext4_ext_get_actual_len(ex); cbex.ec_start = ext4_ext_pblock(ex); - cbex.ec_type = EXT4_EXT_CACHE_EXTENT; } if (unlikely(cbex.ec_len == 0)) { @@ -1939,13 +1937,12 @@ static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block, static void ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block, - __u32 len, ext4_fsblk_t start, int type) + __u32 len, ext4_fsblk_t start) { struct ext4_ext_cache *cex; BUG_ON(len == 0); spin_lock(&EXT4_I(inode)->i_block_reservation_lock); cex = &EXT4_I(inode)->i_cached_extent; - cex->ec_type = type; cex->ec_block = block; cex->ec_len = len; cex->ec_start = start; @@ -1998,15 +1995,18 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path, } ext_debug(" -> %u:%lu\n", lblock, len); - ext4_ext_put_in_cache(inode, lblock, len, 0, EXT4_EXT_CACHE_GAP); + ext4_ext_put_in_cache(inode, lblock, len, 0); } +/* + * Return 0 if cache is invalid; 1 if the cache is valid + */ static int ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block, struct ext4_extent *ex) { struct ext4_ext_cache *cex; - int ret = EXT4_EXT_CACHE_NO; + int ret = 0; /* * We borrow i_block_reservation_lock to protect i_cached_extent @@ -2015,11 +2015,9 @@ ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block, cex = &EXT4_I(inode)->i_cached_extent; /* has cache valid data? */ - if (cex->ec_type == EXT4_EXT_CACHE_NO) + if (cex->ec_len == 0) goto errout; - BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP && - cex->ec_type != EXT4_EXT_CACHE_EXTENT); if (in_range(block, cex->ec_block, cex->ec_len)) { ex->ee_block = cpu_to_le32(cex->ec_block); ext4_ext_store_pblock(ex, cex->ec_start); @@ -2027,7 +2025,7 @@ ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block, ext_debug("%u cached by %u:%u:%llu\n", block, cex->ec_block, cex->ec_len, cex->ec_start); - ret = cex->ec_type; + ret = 1; } errout: spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); @@ -3298,7 +3296,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, struct ext4_extent_header *eh; struct ext4_extent newex, *ex; ext4_fsblk_t newblock; - int err = 0, depth, ret, cache_type; + int err = 0, depth, ret; unsigned int allocated = 0; struct ext4_allocation_request ar; ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio; @@ -3307,9 +3305,8 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, map->m_lblk, map->m_len, inode->i_ino); /* check in cache */ - cache_type = ext4_ext_in_cache(inode, map->m_lblk, &newex); - if (cache_type) { - if (cache_type == EXT4_EXT_CACHE_GAP) { + if (ext4_ext_in_cache(inode, map->m_lblk, &newex)) { + if (!newex.ee_start_lo && !newex.ee_start_hi) { if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { /* * block isn't allocated yet and @@ -3318,7 +3315,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, goto out2; } /* we should allocate requested block */ - } else if (cache_type == EXT4_EXT_CACHE_EXTENT) { + } else { /* block is already allocated */ newblock = map->m_lblk - le32_to_cpu(newex.ee_block) @@ -3327,8 +3324,6 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, allocated = ext4_ext_get_actual_len(&newex) - (map->m_lblk - le32_to_cpu(newex.ee_block)); goto out; - } else { - BUG(); } } @@ -3379,8 +3374,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, /* Do not put uninitialized extent in the cache */ if (!ext4_ext_is_uninitialized(ex)) { ext4_ext_put_in_cache(inode, ee_block, - ee_len, ee_start, - EXT4_EXT_CACHE_EXTENT); + ee_len, ee_start); goto out; } ret = ext4_ext_handle_uninitialized_extents(handle, @@ -3512,8 +3506,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, * when it is _not_ an uninitialized extent. */ if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) { - ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock, - EXT4_EXT_CACHE_EXTENT); + ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock); ext4_update_inode_fsync_trans(handle, inode, 1); } else ext4_update_inode_fsync_trans(handle, inode, 0); @@ -3789,7 +3782,7 @@ static int ext4_ext_fiemap_cb(struct inode *inode, struct ext4_ext_path *path, logical = (__u64)newex->ec_block << blksize_bits; - if (newex->ec_type == EXT4_EXT_CACHE_GAP) { + if (newex->ec_start == 0) { pgoff_t offset; struct page *page; struct buffer_head *bh = NULL; -- cgit v1.2.2 From 8a2005d3f84457b7d7d8646ab5195341d9e5f06a Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Mon, 10 Jan 2011 12:13:42 -0500 Subject: ext4: reorder ext4_inode_info structure elements to remove unneeded padding By reordering the elements in the ext4_inode_info structure, we can reduce the padding needed on an x86_64 system by 16 bytes. Signed-off-by: "Theodore Ts'o" --- fs/ext4/ext4.h | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index de937fc10503..50e3d24483fb 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -763,10 +763,10 @@ struct ext4_inode_info { * near to their parent directory's inode. */ ext4_group_t i_block_group; + ext4_lblk_t i_dir_start_lookup; unsigned long i_state_flags; /* Dynamic state flags */ unsigned long i_flags; - ext4_lblk_t i_dir_start_lookup; #ifdef CONFIG_EXT4_FS_XATTR /* * Extended attributes can be read independently of the main file @@ -835,7 +835,6 @@ struct ext4_inode_info { /* on-disk additional length */ __u16 i_extra_isize; - spinlock_t i_block_reservation_lock; #ifdef CONFIG_QUOTA /* quota space reservation, managed internally by quota code */ qsize_t i_reserved_quota; @@ -844,9 +843,11 @@ struct ext4_inode_info { /* completed IOs that might need unwritten extents handling */ struct list_head i_completed_io_list; spinlock_t i_completed_io_lock; + atomic_t i_ioend_count; /* Number of outstanding io_end structs */ /* current io_end structure for async DIO write*/ ext4_io_end_t *cur_aio_dio; - atomic_t i_ioend_count; /* Number of outstanding io_end structs */ + + spinlock_t i_block_reservation_lock; /* * Transactions that contain inode's metadata needed to complete -- cgit v1.2.2 From 353eb83c1422c6326eaab30ce044a179c6018169 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Mon, 10 Jan 2011 12:18:25 -0500 Subject: ext4: drop i_state_flags on architectures with 64-bit longs We can store the dynamic inode state flags in the high bits of EXT4_I(inode)->i_flags, and eliminate i_state_flags. This saves 8 bytes from the size of ext4_inode_info structure, which when multiplied by the number of the number of in the inode cache, can save a lot of memory. Signed-off-by: "Theodore Ts'o" --- fs/ext4/ext4.h | 28 ++++++++++++++++++++++------ fs/ext4/ialloc.c | 2 +- fs/ext4/inode.c | 4 ++-- 3 files changed, 25 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 50e3d24483fb..2fb531cfd48b 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -764,7 +764,9 @@ struct ext4_inode_info { */ ext4_group_t i_block_group; ext4_lblk_t i_dir_start_lookup; +#if (BITS_PER_LONG < 64) unsigned long i_state_flags; /* Dynamic state flags */ +#endif unsigned long i_flags; #ifdef CONFIG_EXT4_FS_XATTR @@ -1239,22 +1241,36 @@ enum { EXT4_STATE_DELALLOC_RESERVED, /* blks already reserved for delalloc */ }; -#define EXT4_INODE_BIT_FNS(name, field) \ +#define EXT4_INODE_BIT_FNS(name, field, offset) \ static inline int ext4_test_inode_##name(struct inode *inode, int bit) \ { \ - return test_bit(bit, &EXT4_I(inode)->i_##field); \ + return test_bit(bit + (offset), &EXT4_I(inode)->i_##field); \ } \ static inline void ext4_set_inode_##name(struct inode *inode, int bit) \ { \ - set_bit(bit, &EXT4_I(inode)->i_##field); \ + set_bit(bit + (offset), &EXT4_I(inode)->i_##field); \ } \ static inline void ext4_clear_inode_##name(struct inode *inode, int bit) \ { \ - clear_bit(bit, &EXT4_I(inode)->i_##field); \ + clear_bit(bit + (offset), &EXT4_I(inode)->i_##field); \ } -EXT4_INODE_BIT_FNS(flag, flags) -EXT4_INODE_BIT_FNS(state, state_flags) +EXT4_INODE_BIT_FNS(flag, flags, 0) +#if (BITS_PER_LONG < 64) +EXT4_INODE_BIT_FNS(state, state_flags, 0) + +static inline void ext4_clear_state_flags(struct ext4_inode_info *ei) +{ + (ei)->i_state_flags = 0; +} +#else +EXT4_INODE_BIT_FNS(state, flags, 32) + +static inline void ext4_clear_state_flags(struct ext4_inode_info *ei) +{ + /* We depend on the fact that callers will set i_flags */ +} +#endif #else /* Assume that user mode programs are passing in an ext4fs superblock, not * a kernel struct super_block. This will allow us to call the feature-test diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index 1ce240a23ebb..eb9097aec6f0 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -1027,7 +1027,7 @@ got: inode->i_generation = sbi->s_next_generation++; spin_unlock(&sbi->s_next_gen_lock); - ei->i_state_flags = 0; + ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */ ext4_set_inode_state(inode, EXT4_STATE_NEW); ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize; diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 3ae83137cf34..0801ee6a173e 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -4868,7 +4868,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) } inode->i_nlink = le16_to_cpu(raw_inode->i_links_count); - ei->i_state_flags = 0; + ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */ ei->i_dir_start_lookup = 0; ei->i_dtime = le32_to_cpu(raw_inode->i_dtime); /* We now have enough fields to check if the inode was active or not. @@ -5127,7 +5127,7 @@ static int ext4_do_update_inode(handle_t *handle, if (ext4_inode_blocks_set(handle, raw_inode, ei)) goto out_brelse; raw_inode->i_dtime = cpu_to_le32(ei->i_dtime); - raw_inode->i_flags = cpu_to_le32(ei->i_flags); + raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF); if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != cpu_to_le32(EXT4_OS_HURD)) raw_inode->i_file_acl_high = -- cgit v1.2.2 From 8aefcd557d26d0023a36f9ec5afbf55e59f8f26b Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Mon, 10 Jan 2011 12:29:43 -0500 Subject: ext4: dynamically allocate the jbd2_inode in ext4_inode_info as necessary Replace the jbd2_inode structure (which is 48 bytes) with a pointer and only allocate the jbd2_inode when it is needed --- that is, when the file system has a journal present and the inode has been opened for writing. This allows us to further slim down the ext4_inode_info structure. Signed-off-by: "Theodore Ts'o" --- fs/ext4/ext4.h | 2 +- fs/ext4/ext4_jbd2.h | 2 +- fs/ext4/file.c | 22 ++++++++++++++++++++++ fs/ext4/inode.c | 17 ++++++++++++----- fs/ext4/super.c | 16 +++++++--------- fs/jbd2/journal.c | 20 +++++++++++++------- 6 files changed, 56 insertions(+), 23 deletions(-) (limited to 'fs') diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 2fb531cfd48b..32b7daa41a42 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -811,7 +811,7 @@ struct ext4_inode_info { */ struct rw_semaphore i_data_sem; struct inode vfs_inode; - struct jbd2_inode jinode; + struct jbd2_inode *jinode; struct ext4_ext_cache i_cached_extent; /* diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h index b0bd792c58c5..d8b992e658c1 100644 --- a/fs/ext4/ext4_jbd2.h +++ b/fs/ext4/ext4_jbd2.h @@ -253,7 +253,7 @@ static inline int ext4_journal_force_commit(journal_t *journal) static inline int ext4_jbd2_file_inode(handle_t *handle, struct inode *inode) { if (ext4_handle_valid(handle)) - return jbd2_journal_file_inode(handle, &EXT4_I(inode)->jinode); + return jbd2_journal_file_inode(handle, EXT4_I(inode)->jinode); return 0; } diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 5a5c55ddceef..bb003dc9ffff 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -104,6 +104,7 @@ static int ext4_file_open(struct inode * inode, struct file * filp) { struct super_block *sb = inode->i_sb; struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); + struct ext4_inode_info *ei = EXT4_I(inode); struct vfsmount *mnt = filp->f_path.mnt; struct path path; char buf[64], *cp; @@ -127,6 +128,27 @@ static int ext4_file_open(struct inode * inode, struct file * filp) ext4_mark_super_dirty(sb); } } + /* + * Set up the jbd2_inode if we are opening the inode for + * writing and the journal is present + */ + if (sbi->s_journal && !ei->jinode && (filp->f_mode & FMODE_WRITE)) { + struct jbd2_inode *jinode = jbd2_alloc_inode(GFP_KERNEL); + + spin_lock(&inode->i_lock); + if (!ei->jinode) { + if (!jinode) { + spin_unlock(&inode->i_lock); + return -ENOMEM; + } + ei->jinode = jinode; + jbd2_journal_init_jbd_inode(ei->jinode, inode); + jinode = NULL; + } + spin_unlock(&inode->i_lock); + if (unlikely(jinode != NULL)) + jbd2_free_inode(jinode); + } return dquot_file_open(inode, filp); } diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 0801ee6a173e..2693fcda30d8 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -55,10 +55,17 @@ static inline int ext4_begin_ordered_truncate(struct inode *inode, loff_t new_size) { trace_ext4_begin_ordered_truncate(inode, new_size); - return jbd2_journal_begin_ordered_truncate( - EXT4_SB(inode->i_sb)->s_journal, - &EXT4_I(inode)->jinode, - new_size); + /* + * If jinode is zero, then we never opened the file for + * writing, so there's no need to call + * jbd2_journal_begin_ordered_truncate() since there's no + * outstanding writes we need to flush. + */ + if (!EXT4_I(inode)->jinode) + return 0; + return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode), + EXT4_I(inode)->jinode, + new_size); } static void ext4_invalidatepage(struct page *page, unsigned long offset); @@ -4054,7 +4061,7 @@ int ext4_block_truncate_page(handle_t *handle, if (ext4_should_journal_data(inode)) { err = ext4_handle_dirty_metadata(handle, inode, bh); } else { - if (ext4_should_order_data(inode)) + if (ext4_should_order_data(inode) && EXT4_I(inode)->jinode) err = ext4_jbd2_file_inode(handle, inode); mark_buffer_dirty(bh); } diff --git a/fs/ext4/super.c b/fs/ext4/super.c index f5960d673e4e..1cd4326c530b 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -818,12 +818,6 @@ static struct inode *ext4_alloc_inode(struct super_block *sb) memset(&ei->i_cached_extent, 0, sizeof(struct ext4_ext_cache)); INIT_LIST_HEAD(&ei->i_prealloc_list); spin_lock_init(&ei->i_prealloc_lock); - /* - * Note: We can be called before EXT4_SB(sb)->s_journal is set, - * therefore it can be null here. Don't check it, just initialize - * jinode. - */ - jbd2_journal_init_jbd_inode(&ei->jinode, &ei->vfs_inode); ei->i_reserved_data_blocks = 0; ei->i_reserved_meta_blocks = 0; ei->i_allocated_meta_blocks = 0; @@ -832,6 +826,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb) #ifdef CONFIG_QUOTA ei->i_reserved_quota = 0; #endif + ei->jinode = NULL; INIT_LIST_HEAD(&ei->i_completed_io_list); spin_lock_init(&ei->i_completed_io_lock); ei->cur_aio_dio = NULL; @@ -900,9 +895,12 @@ void ext4_clear_inode(struct inode *inode) end_writeback(inode); dquot_drop(inode); ext4_discard_preallocations(inode); - if (EXT4_JOURNAL(inode)) - jbd2_journal_release_jbd_inode(EXT4_SB(inode->i_sb)->s_journal, - &EXT4_I(inode)->jinode); + if (EXT4_I(inode)->jinode) { + jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode), + EXT4_I(inode)->jinode); + jbd2_free_inode(EXT4_I(inode)->jinode); + EXT4_I(inode)->jinode = NULL; + } } static inline void ext4_show_quota_options(struct seq_file *seq, diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index 2447bd86f801..9e4686900f18 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -94,6 +94,7 @@ EXPORT_SYMBOL(jbd2_journal_file_inode); EXPORT_SYMBOL(jbd2_journal_init_jbd_inode); EXPORT_SYMBOL(jbd2_journal_release_jbd_inode); EXPORT_SYMBOL(jbd2_journal_begin_ordered_truncate); +EXPORT_SYMBOL(jbd2_inode_cache); static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *); static void __journal_abort_soft (journal_t *journal, int errno); @@ -2286,17 +2287,19 @@ static void __exit jbd2_remove_jbd_stats_proc_entry(void) #endif -struct kmem_cache *jbd2_handle_cache; +struct kmem_cache *jbd2_handle_cache, *jbd2_inode_cache; static int __init journal_init_handle_cache(void) { - jbd2_handle_cache = kmem_cache_create("jbd2_journal_handle", - sizeof(handle_t), - 0, /* offset */ - SLAB_TEMPORARY, /* flags */ - NULL); /* ctor */ + jbd2_handle_cache = KMEM_CACHE(jbd2_journal_handle, SLAB_TEMPORARY); if (jbd2_handle_cache == NULL) { - printk(KERN_EMERG "JBD: failed to create handle cache\n"); + printk(KERN_EMERG "JBD2: failed to create handle cache\n"); + return -ENOMEM; + } + jbd2_inode_cache = KMEM_CACHE(jbd2_inode, 0); + if (jbd2_inode_cache == NULL) { + printk(KERN_EMERG "JBD2: failed to create inode cache\n"); + kmem_cache_destroy(jbd2_handle_cache); return -ENOMEM; } return 0; @@ -2306,6 +2309,9 @@ static void jbd2_journal_destroy_handle_cache(void) { if (jbd2_handle_cache) kmem_cache_destroy(jbd2_handle_cache); + if (jbd2_inode_cache) + kmem_cache_destroy(jbd2_inode_cache); + } /* -- cgit v1.2.2 From 6c5a6cb998854f3c579ecb2bc1423d302bcb1b76 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Mon, 10 Jan 2011 12:30:17 -0500 Subject: ext4: fix uninitialized variable in ext4_register_li_request fs/ext4/super.c: In function 'ext4_register_li_request': fs/ext4/super.c:2936: warning: 'ret' may be used uninitialized in this function It looks buggy to me, too. Cc: Lukas Czerner Cc: stable@kernel.org Signed-off-by: Andrew Morton Signed-off-by: "Theodore Ts'o" --- fs/ext4/super.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 1cd4326c530b..757cb24c0256 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -2922,7 +2922,7 @@ static int ext4_register_li_request(struct super_block *sb, struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_li_request *elr; ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count; - int ret; + int ret = 0; if (sbi->s_li_request != NULL) return 0; -- cgit v1.2.2 From ca6e909f9bebe709bc65a3ee605ce32969db0452 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 10 Jan 2011 12:30:39 -0500 Subject: ext4: fix trimming of a single group When ext4_trim_fs() is called to trim a part of a single group, the logic will wrongly set last block of the interval to 'len' instead of 'first_block + len'. Thus a shorter interval is possibly trimmed. Fix it. CC: Lukas Czerner Cc: stable@kernel.org Signed-off-by: Jan Kara Signed-off-by: "Theodore Ts'o" --- fs/ext4/mballoc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index d47a80ec231d..21ee30b86de5 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -4860,7 +4860,7 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) if (len >= EXT4_BLOCKS_PER_GROUP(sb)) len -= (EXT4_BLOCKS_PER_GROUP(sb) - first_block); else - last_block = len; + last_block = first_block + len; if (e4b.bd_info->bb_free >= minlen) { cnt = ext4_trim_all_free(sb, &e4b, first_block, -- cgit v1.2.2 From b40971426a837e9dc9c66e1b6bbcb3874eafe4e0 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Mon, 10 Jan 2011 12:46:59 -0500 Subject: ext4: add error checking to calls to ext4_handle_dirty_metadata() Call ext4_std_error() in various places when we can't bail out cleanly, so the file system can be marked as in error. Signed-off-by: "Theodore Ts'o" --- fs/ext4/inode.c | 21 +++++++++++++++---- fs/ext4/namei.c | 32 ++++++++++++++++++++++------ fs/ext4/resize.c | 64 ++++++++++++++++++++++++++++++++++++++++---------------- 3 files changed, 89 insertions(+), 28 deletions(-) (limited to 'fs') diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 2693fcda30d8..84b616269265 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -4185,6 +4185,7 @@ static int ext4_clear_blocks(handle_t *handle, struct inode *inode, { __le32 *p; int flags = EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_VALIDATED; + int err; if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) flags |= EXT4_FREE_BLOCKS_METADATA; @@ -4200,11 +4201,23 @@ static int ext4_clear_blocks(handle_t *handle, struct inode *inode, if (try_to_extend_transaction(handle, inode)) { if (bh) { BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); - ext4_handle_dirty_metadata(handle, inode, bh); + err = ext4_handle_dirty_metadata(handle, inode, bh); + if (unlikely(err)) { + ext4_std_error(inode->i_sb, err); + return 1; + } + } + err = ext4_mark_inode_dirty(handle, inode); + if (unlikely(err)) { + ext4_std_error(inode->i_sb, err); + return 1; + } + err = ext4_truncate_restart_trans(handle, inode, + blocks_for_truncate(inode)); + if (unlikely(err)) { + ext4_std_error(inode->i_sb, err); + return 1; } - ext4_mark_inode_dirty(handle, inode); - ext4_truncate_restart_trans(handle, inode, - blocks_for_truncate(inode)); if (bh) { BUFFER_TRACE(bh, "retaking write access"); ext4_journal_get_write_access(handle, bh); diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 6dfc5b9de3e6..5485390d32c5 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -1602,7 +1602,11 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry, if (err) goto journal_error; } - ext4_handle_dirty_metadata(handle, inode, frames[0].bh); + err = ext4_handle_dirty_metadata(handle, inode, frames[0].bh); + if (err) { + ext4_std_error(inode->i_sb, err); + goto cleanup; + } } de = do_split(handle, dir, &bh, frame, &hinfo, &err); if (!de) @@ -1630,7 +1634,7 @@ static int ext4_delete_entry(handle_t *handle, { struct ext4_dir_entry_2 *de, *pde; unsigned int blocksize = dir->i_sb->s_blocksize; - int i; + int i, err; i = 0; pde = NULL; @@ -1640,7 +1644,11 @@ static int ext4_delete_entry(handle_t *handle, return -EIO; if (de == de_del) { BUFFER_TRACE(bh, "get_write_access"); - ext4_journal_get_write_access(handle, bh); + err = ext4_journal_get_write_access(handle, bh); + if (unlikely(err)) { + ext4_std_error(dir->i_sb, err); + return err; + } if (pde) pde->rec_len = ext4_rec_len_to_disk( ext4_rec_len_from_disk(pde->rec_len, @@ -1652,7 +1660,11 @@ static int ext4_delete_entry(handle_t *handle, de->inode = 0; dir->i_version++; BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); - ext4_handle_dirty_metadata(handle, dir, bh); + err = ext4_handle_dirty_metadata(handle, dir, bh); + if (unlikely(err)) { + ext4_std_error(dir->i_sb, err); + return err; + } return 0; } i += ext4_rec_len_from_disk(de->rec_len, blocksize); @@ -2414,7 +2426,11 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, ext4_current_time(new_dir); ext4_mark_inode_dirty(handle, new_dir); BUFFER_TRACE(new_bh, "call ext4_handle_dirty_metadata"); - ext4_handle_dirty_metadata(handle, new_dir, new_bh); + retval = ext4_handle_dirty_metadata(handle, new_dir, new_bh); + if (unlikely(retval)) { + ext4_std_error(new_dir->i_sb, retval); + goto end_rename; + } brelse(new_bh); new_bh = NULL; } @@ -2466,7 +2482,11 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, PARENT_INO(dir_bh->b_data, new_dir->i_sb->s_blocksize) = cpu_to_le32(new_dir->i_ino); BUFFER_TRACE(dir_bh, "call ext4_handle_dirty_metadata"); - ext4_handle_dirty_metadata(handle, old_dir, dir_bh); + retval = ext4_handle_dirty_metadata(handle, old_dir, dir_bh); + if (retval) { + ext4_std_error(old_dir->i_sb, retval); + goto end_rename; + } ext4_dec_count(handle, old_dir); if (new_inode) { /* checked empty_dir above, can't have another parent, diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c index dc963929de65..7faf47dde7fb 100644 --- a/fs/ext4/resize.c +++ b/fs/ext4/resize.c @@ -220,7 +220,11 @@ static int setup_new_group_blocks(struct super_block *sb, memcpy(gdb->b_data, sbi->s_group_desc[i]->b_data, gdb->b_size); set_buffer_uptodate(gdb); unlock_buffer(gdb); - ext4_handle_dirty_metadata(handle, NULL, gdb); + err = ext4_handle_dirty_metadata(handle, NULL, gdb); + if (unlikely(err)) { + brelse(gdb); + goto exit_bh; + } ext4_set_bit(bit, bh->b_data); brelse(gdb); } @@ -253,7 +257,11 @@ static int setup_new_group_blocks(struct super_block *sb, ext4_mark_bitmap_end(input->blocks_count, sb->s_blocksize * 8, bh->b_data); - ext4_handle_dirty_metadata(handle, NULL, bh); + err = ext4_handle_dirty_metadata(handle, NULL, bh); + if (unlikely(err)) { + ext4_std_error(sb, err); + goto exit_bh; + } brelse(bh); /* Mark unused entries in inode bitmap used */ ext4_debug("clear inode bitmap %#04llx (+%llu)\n", @@ -265,7 +273,9 @@ static int setup_new_group_blocks(struct super_block *sb, ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8, bh->b_data); - ext4_handle_dirty_metadata(handle, NULL, bh); + err = ext4_handle_dirty_metadata(handle, NULL, bh); + if (unlikely(err)) + ext4_std_error(sb, err); exit_bh: brelse(bh); @@ -417,17 +427,21 @@ static int add_new_gdb(handle_t *handle, struct inode *inode, goto exit_dind; } - if ((err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh))) + err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh); + if (unlikely(err)) goto exit_dind; - if ((err = ext4_journal_get_write_access(handle, *primary))) + err = ext4_journal_get_write_access(handle, *primary); + if (unlikely(err)) goto exit_sbh; - if ((err = ext4_journal_get_write_access(handle, dind))) - goto exit_primary; + err = ext4_journal_get_write_access(handle, dind); + if (unlikely(err)) + ext4_std_error(sb, err); /* ext4_reserve_inode_write() gets a reference on the iloc */ - if ((err = ext4_reserve_inode_write(handle, inode, &iloc))) + err = ext4_reserve_inode_write(handle, inode, &iloc); + if (unlikely(err)) goto exit_dindj; n_group_desc = kmalloc((gdb_num + 1) * sizeof(struct buffer_head *), @@ -449,12 +463,20 @@ static int add_new_gdb(handle_t *handle, struct inode *inode, * reserved inode, and will become GDT blocks (primary and backup). */ data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)] = 0; - ext4_handle_dirty_metadata(handle, NULL, dind); - brelse(dind); + err = ext4_handle_dirty_metadata(handle, NULL, dind); + if (unlikely(err)) { + ext4_std_error(sb, err); + goto exit_inode; + } inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >> 9; ext4_mark_iloc_dirty(handle, inode, &iloc); memset((*primary)->b_data, 0, sb->s_blocksize); - ext4_handle_dirty_metadata(handle, NULL, *primary); + err = ext4_handle_dirty_metadata(handle, NULL, *primary); + if (unlikely(err)) { + ext4_std_error(sb, err); + goto exit_inode; + } + brelse(dind); o_group_desc = EXT4_SB(sb)->s_group_desc; memcpy(n_group_desc, o_group_desc, @@ -465,19 +487,19 @@ static int add_new_gdb(handle_t *handle, struct inode *inode, kfree(o_group_desc); le16_add_cpu(&es->s_reserved_gdt_blocks, -1); - ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh); + err = ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh); + if (err) + ext4_std_error(sb, err); - return 0; + return err; exit_inode: /* ext4_journal_release_buffer(handle, iloc.bh); */ brelse(iloc.bh); exit_dindj: /* ext4_journal_release_buffer(handle, dind); */ -exit_primary: - /* ext4_journal_release_buffer(handle, *primary); */ exit_sbh: - /* ext4_journal_release_buffer(handle, *primary); */ + /* ext4_journal_release_buffer(handle, EXT4_SB(sb)->s_sbh); */ exit_dind: brelse(dind); exit_bh: @@ -660,7 +682,9 @@ static void update_backups(struct super_block *sb, memset(bh->b_data + size, 0, rest); set_buffer_uptodate(bh); unlock_buffer(bh); - ext4_handle_dirty_metadata(handle, NULL, bh); + err = ext4_handle_dirty_metadata(handle, NULL, bh); + if (unlikely(err)) + ext4_std_error(sb, err); brelse(bh); } if ((err2 = ext4_journal_stop(handle)) && !err) @@ -878,7 +902,11 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input) /* Update the global fs size fields */ sbi->s_groups_count++; - ext4_handle_dirty_metadata(handle, NULL, primary); + err = ext4_handle_dirty_metadata(handle, NULL, primary); + if (unlikely(err)) { + ext4_std_error(sb, err); + goto exit_journal; + } /* Update the reserved block counts only once the new group is * active. */ -- cgit v1.2.2 From 3889fd57ea3c58209354862523275774fca9db03 Mon Sep 17 00:00:00 2001 From: Jiaying Zhang Date: Mon, 10 Jan 2011 12:47:05 -0500 Subject: ext4: flush the i_completed_io_list during ext4_truncate Ted first found the bug when running 2.6.36 kernel with dioread_nolock mount option that xfstests #13 complained about wrong file size during fsck. However, the bug exists in the older kernels as well although it is somehow harder to trigger. The problem is that ext4_end_io_work() can happen after we have truncated an inode to a smaller size. Then when ext4_end_io_work() calls ext4_convert_unwritten_extents(), we may reallocate some blocks that have been truncated, so the inode size becomes inconsistent with the allocated blocks. The following patch flushes the i_completed_io_list during truncate to reduce the risk that some pending end_io requests are executed later and convert already truncated blocks to initialized. Note that although the fix helps reduce the problem a lot there may still be a race window between vmtruncate() and ext4_end_io_work(). The fundamental problem is that if vmtruncate() is called without either i_mutex or i_alloc_sem held, it can race with an ongoing write request so that the io_end request is processed later when the corresponding blocks have been truncated. Ted and I have discussed the problem offline and we saw a few ways to fix the race completely: a) We guarantee that i_mutex lock and i_alloc_sem write lock are both hold whenever vmtruncate() is called. The i_mutex lock prevents any new write requests from entering writeback and the i_alloc_sem prevents the race from ext4_page_mkwrite(). Currently we hold both locks if vmtruncate() is called from do_truncate(), which is probably the most common case. However, there are places where we may call vmtruncate() without holding either i_mutex or i_alloc_sem. I would like to ask for other people's opinions on what locks are expected to be held before calling vmtruncate(). There seems a disagreement among the callers of that function. b) We change the ext4 write path so that we change the extent tree to contain the newly allocated blocks and update i_size both at the same time --- when the write of the data blocks is completed. c) We add some additional locking to synchronize vmtruncate() and ext4_end_io_work(). This approach may have performance implications so we need to be careful. All of the above proposals may require more substantial changes, so we may consider to take the following patch as a bandaid. Signed-off-by: Jiaying Zhang Signed-off-by: "Theodore Ts'o" --- fs/ext4/ext4.h | 1 + fs/ext4/extents.c | 6 ++++++ fs/ext4/fsync.c | 4 ++-- 3 files changed, 9 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 32b7daa41a42..bab2387fba43 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -1671,6 +1671,7 @@ extern void ext4_htree_free_dir_info(struct dir_private_info *p); /* fsync.c */ extern int ext4_sync_file(struct file *, int); +extern int ext4_flush_completed_IO(struct inode *); /* hash.c */ extern int ext4fs_dirhash(const char *name, int len, struct diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 9081d1060a5f..627f7ae94ae5 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -3533,6 +3533,12 @@ void ext4_ext_truncate(struct inode *inode) handle_t *handle; int err = 0; + /* + * finish any pending end_io work so we won't run the risk of + * converting any truncated blocks to initialized later + */ + ext4_flush_completed_IO(inode); + /* * probably first extent we're gonna free will be last in block */ diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c index c1a7bc923cf6..7829b287822a 100644 --- a/fs/ext4/fsync.c +++ b/fs/ext4/fsync.c @@ -75,7 +75,7 @@ static void dump_completed_IO(struct inode * inode) * to written. * The function return the number of pending IOs on success. */ -static int flush_completed_IO(struct inode *inode) +extern int ext4_flush_completed_IO(struct inode *inode) { ext4_io_end_t *io; struct ext4_inode_info *ei = EXT4_I(inode); @@ -169,7 +169,7 @@ int ext4_sync_file(struct file *file, int datasync) if (inode->i_sb->s_flags & MS_RDONLY) return 0; - ret = flush_completed_IO(inode); + ret = ext4_flush_completed_IO(inode); if (ret < 0) return ret; -- cgit v1.2.2 From a5196f8cdfbf6ccb20f093aaf48852d6d23b4e0b Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Mon, 10 Jan 2011 12:47:07 -0500 Subject: ext4: remove ext4_mb_return_to_preallocation() This function was never implemented, except for a BUG_ON which was tripping when ext4 is run without a journal. The problem is that although the comment asserts that "truncate (which is the only way to free block) discards all preallocations", ext4_free_blocks() is also called in various error recovery paths when blocks have been allocated, but for various reasons, we were not able to use those data blocks (for example, because we ran out of memory while trying to manipulate the extent tree, or some other similar situation). In addition to the fact that this function isn't implemented except for the incorrect BUG_ON, the single caller of this function, ext4_free_blocks(), doesn't use it all if the journal is enabled. So remove the (stub) function entirely for now. If we decide it's better to add it back, it's only going to be useful with a relatively large number of code changes anyway. Google-Bug-Id: 3236408 Cc: Jiaying Zhang Signed-off-by: "Theodore Ts'o" --- fs/ext4/mballoc.c | 14 -------------- 1 file changed, 14 deletions(-) (limited to 'fs') diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 21ee30b86de5..cd5214f75397 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -3881,19 +3881,6 @@ repeat: } } -/* - * finds all preallocated spaces and return blocks being freed to them - * if preallocated space becomes full (no block is used from the space) - * then the function frees space in buddy - * XXX: at the moment, truncate (which is the only way to free blocks) - * discards all preallocations - */ -static void ext4_mb_return_to_preallocation(struct inode *inode, - struct ext4_buddy *e4b, - sector_t block, int count) -{ - BUG_ON(!list_empty(&EXT4_I(inode)->i_prealloc_list)); -} #ifdef CONFIG_EXT4_DEBUG static void ext4_mb_show_ac(struct ext4_allocation_context *ac) { @@ -4648,7 +4635,6 @@ do_more: ext4_lock_group(sb, block_group); mb_clear_bits(bitmap_bh->b_data, bit, count); mb_free_blocks(inode, &e4b, bit, count); - ext4_mb_return_to_preallocation(inode, &e4b, block, count); } ret = ext4_free_blks_count(sb, gdp) + count; -- cgit v1.2.2 From 1c5b9e9065567876c2d4a7a16d78f0fed154a5bf Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Mon, 10 Jan 2011 12:51:28 -0500 Subject: ext4: fix memory leak in ext4_free_branches Commit 40389687 moved a call to ext4_forget() out of ext4_free_branches and let ext4_free_blocks() handle calling bforget(). But that change unfortunately did not replace the call to ext4_forget() with brelse(), which was needed to drop the in-use count of the indirect block's buffer head, which lead to a memory leak when deleting files that used indirect blocks. Fix this. Thanks to Hugh Dickins for pointing this out. Cc: stable@kernel.org Signed-off-by: "Theodore Ts'o" --- fs/ext4/inode.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 84b616269265..e80fc513eacc 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -4378,6 +4378,7 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode, (__le32 *) bh->b_data, (__le32 *) bh->b_data + addr_per_block, depth); + brelse(bh); /* * Everything below this this pointer has been -- cgit v1.2.2 From d002ebf1d8daa5a317645b1c4a3a0b7ea2abc9ac Mon Sep 17 00:00:00 2001 From: Eric Sandeen Date: Mon, 10 Jan 2011 13:03:35 -0500 Subject: ext4: don't pass entire map to check_eofblocks_fl Since check_eofblocks_fl() only uses the m_lblk portion of the map structure, we may as well pass that directly, rather than passing the entire map, which IMHO obfuscates what parameters check_eofblocks_fl() cares about. Not a big deal, but seems tidier and less confusing, to me. Signed-off-by: Eric Sandeen Signed-off-by: "Theodore Ts'o" --- fs/ext4/extents.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 627f7ae94ae5..e910720e8bb8 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -3102,7 +3102,7 @@ static void unmap_underlying_metadata_blocks(struct block_device *bdev, * Handle EOFBLOCKS_FL flag, clearing it if necessary */ static int check_eofblocks_fl(handle_t *handle, struct inode *inode, - struct ext4_map_blocks *map, + ext4_lblk_t lblk, struct ext4_ext_path *path, unsigned int len) { @@ -3132,7 +3132,7 @@ static int check_eofblocks_fl(handle_t *handle, struct inode *inode, * this turns out to be false, we can bail out from this * function immediately. */ - if (map->m_lblk + len < le32_to_cpu(last_ex->ee_block) + + if (lblk + len < le32_to_cpu(last_ex->ee_block) + ext4_ext_get_actual_len(last_ex)) return 0; /* @@ -3188,8 +3188,8 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, path); if (ret >= 0) { ext4_update_inode_fsync_trans(handle, inode, 1); - err = check_eofblocks_fl(handle, inode, map, path, - map->m_len); + err = check_eofblocks_fl(handle, inode, map->m_lblk, + path, map->m_len); } else err = ret; goto out2; @@ -3219,7 +3219,8 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, ret = ext4_ext_convert_to_initialized(handle, inode, map, path); if (ret >= 0) { ext4_update_inode_fsync_trans(handle, inode, 1); - err = check_eofblocks_fl(handle, inode, map, path, map->m_len); + err = check_eofblocks_fl(handle, inode, map->m_lblk, path, + map->m_len); if (err < 0) goto out2; } @@ -3472,7 +3473,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, map->m_flags |= EXT4_MAP_UNINIT; } - err = check_eofblocks_fl(handle, inode, map, path, ar.len); + err = check_eofblocks_fl(handle, inode, map->m_lblk, path, ar.len); if (err) goto out2; -- cgit v1.2.2 From b853b96b1dbdc05fc8eae141a595366d8172962b Mon Sep 17 00:00:00 2001 From: Lukas Czerner Date: Mon, 22 Nov 2010 12:29:17 +0100 Subject: ext3: Add batched discard support for ext3 Walk through allocation groups and trim all free extents. It can be invoked through FITRIM ioctl on the file system. The main idea is to provide a way to trim the whole file system if needed, since some SSD's may suffer from performance loss after the whole device was filled (it does not mean that fs is full!). It search for free extents in allocation groups specified by Byte range start -> start+len. When the free extent is within this range, blocks are marked as used and then trimmed. Afterwards these blocks are marked as free in per-group bitmap. [JK: Fixed up error handling and trimming of a single group] Signed-off-by: Lukas Czerner Reviewed-by: Jan Kara Reviewed-by: Dmitry Monakhov Signed-off-by: Jan Kara --- fs/ext3/balloc.c | 266 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 266 insertions(+) (limited to 'fs') diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c index b3db22649426..045995c8ce5a 100644 --- a/fs/ext3/balloc.c +++ b/fs/ext3/balloc.c @@ -20,6 +20,7 @@ #include #include #include +#include /* * balloc.c contains the blocks allocation and deallocation routines @@ -39,6 +40,21 @@ #define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1) +/* + * Calculate the block group number and offset, given a block number + */ +static void ext3_get_group_no_and_offset(struct super_block *sb, + ext3_fsblk_t blocknr, unsigned long *blockgrpp, ext3_grpblk_t *offsetp) +{ + struct ext3_super_block *es = EXT3_SB(sb)->s_es; + + blocknr = blocknr - le32_to_cpu(es->s_first_data_block); + if (offsetp) + *offsetp = blocknr % EXT3_BLOCKS_PER_GROUP(sb); + if (blockgrpp) + *blockgrpp = blocknr / EXT3_BLOCKS_PER_GROUP(sb); +} + /** * ext3_get_group_desc() -- load group descriptor from disk * @sb: super block @@ -1885,3 +1901,253 @@ unsigned long ext3_bg_num_gdb(struct super_block *sb, int group) return ext3_bg_num_gdb_meta(sb,group); } + +/** + * ext3_trim_all_free -- function to trim all free space in alloc. group + * @sb: super block for file system + * @group: allocation group to trim + * @start: first group block to examine + * @max: last group block to examine + * @gdp: allocation group description structure + * @minblocks: minimum extent block count + * + * ext3_trim_all_free walks through group's block bitmap searching for free + * blocks. When the free block is found, it tries to allocate this block and + * consequent free block to get the biggest free extent possible, until it + * reaches any used block. Then issue a TRIM command on this extent and free + * the extent in the block bitmap. This is done until whole group is scanned. + */ +ext3_grpblk_t ext3_trim_all_free(struct super_block *sb, unsigned int group, + ext3_grpblk_t start, ext3_grpblk_t max, + ext3_grpblk_t minblocks) +{ + handle_t *handle; + ext3_grpblk_t next, free_blocks, bit, freed, count = 0; + ext3_fsblk_t discard_block; + struct ext3_sb_info *sbi; + struct buffer_head *gdp_bh, *bitmap_bh = NULL; + struct ext3_group_desc *gdp; + int err = 0, ret = 0; + + /* + * We will update one block bitmap, and one group descriptor + */ + handle = ext3_journal_start_sb(sb, 2); + if (IS_ERR(handle)) + return PTR_ERR(handle); + + bitmap_bh = read_block_bitmap(sb, group); + if (!bitmap_bh) { + err = -EIO; + goto err_out; + } + + BUFFER_TRACE(bitmap_bh, "getting undo access"); + err = ext3_journal_get_undo_access(handle, bitmap_bh); + if (err) + goto err_out; + + gdp = ext3_get_group_desc(sb, group, &gdp_bh); + if (!gdp) { + err = -EIO; + goto err_out; + } + + BUFFER_TRACE(gdp_bh, "get_write_access"); + err = ext3_journal_get_write_access(handle, gdp_bh); + if (err) + goto err_out; + + free_blocks = le16_to_cpu(gdp->bg_free_blocks_count); + sbi = EXT3_SB(sb); + + /* Walk through the whole group */ + while (start < max) { + start = bitmap_search_next_usable_block(start, bitmap_bh, max); + if (start < 0) + break; + next = start; + + /* + * Allocate contiguous free extents by setting bits in the + * block bitmap + */ + while (next < max + && claim_block(sb_bgl_lock(sbi, group), + next, bitmap_bh)) { + next++; + } + + /* We did not claim any blocks */ + if (next == start) + continue; + + discard_block = (ext3_fsblk_t)start + + ext3_group_first_block_no(sb, group); + + /* Update counters */ + spin_lock(sb_bgl_lock(sbi, group)); + le16_add_cpu(&gdp->bg_free_blocks_count, start - next); + spin_unlock(sb_bgl_lock(sbi, group)); + percpu_counter_sub(&sbi->s_freeblocks_counter, next - start); + + /* Do not issue a TRIM on extents smaller than minblocks */ + if ((next - start) < minblocks) + goto free_extent; + + /* Send the TRIM command down to the device */ + err = sb_issue_discard(sb, discard_block, next - start, + GFP_NOFS, 0); + count += (next - start); +free_extent: + freed = 0; + + /* + * Clear bits in the bitmap + */ + for (bit = start; bit < next; bit++) { + BUFFER_TRACE(bitmap_bh, "clear bit"); + if (!ext3_clear_bit_atomic(sb_bgl_lock(sbi, group), + bit, bitmap_bh->b_data)) { + ext3_error(sb, __func__, + "bit already cleared for block "E3FSBLK, + (unsigned long)bit); + BUFFER_TRACE(bitmap_bh, "bit already cleared"); + } else { + freed++; + } + } + + /* Update couters */ + spin_lock(sb_bgl_lock(sbi, group)); + le16_add_cpu(&gdp->bg_free_blocks_count, freed); + spin_unlock(sb_bgl_lock(sbi, group)); + percpu_counter_add(&sbi->s_freeblocks_counter, freed); + + start = next; + if (err < 0) { + if (err != -EOPNOTSUPP) + ext3_warning(sb, __func__, "Discard command " + "returned error %d\n", err); + break; + } + + if (fatal_signal_pending(current)) { + err = -ERESTARTSYS; + break; + } + + cond_resched(); + + /* No more suitable extents */ + if ((free_blocks - count) < minblocks) + break; + } + + /* We dirtied the bitmap block */ + BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); + ret = ext3_journal_dirty_metadata(handle, bitmap_bh); + if (!err) + err = ret; + + /* And the group descriptor block */ + BUFFER_TRACE(gdp_bh, "dirtied group descriptor block"); + ret = ext3_journal_dirty_metadata(handle, gdp_bh); + if (!err) + err = ret; + + ext3_debug("trimmed %d blocks in the group %d\n", + count, group); + +err_out: + if (err) + count = err; + ext3_journal_stop(handle); + brelse(bitmap_bh); + + return count; +} + +/** + * ext3_trim_fs() -- trim ioctl handle function + * @sb: superblock for filesystem + * @start: First Byte to trim + * @len: number of Bytes to trim from start + * @minlen: minimum extent length in Bytes + * + * ext3_trim_fs goes through all allocation groups containing Bytes from + * start to start+len. For each such a group ext3_trim_all_free function + * is invoked to trim all free space. + */ +int ext3_trim_fs(struct super_block *sb, struct fstrim_range *range) +{ + ext3_grpblk_t last_block, first_block, free_blocks; + unsigned long first_group, last_group; + unsigned long group, ngroups; + struct ext3_group_desc *gdp; + struct ext3_super_block *es = EXT3_SB(sb)->s_es; + uint64_t start, len, minlen, trimmed; + ext3_fsblk_t max_blks = le32_to_cpu(es->s_blocks_count); + int ret = 0; + + start = range->start >> sb->s_blocksize_bits; + len = range->len >> sb->s_blocksize_bits; + minlen = range->minlen >> sb->s_blocksize_bits; + trimmed = 0; + + if (unlikely(minlen > EXT3_BLOCKS_PER_GROUP(sb))) + return -EINVAL; + if (start >= max_blks) + goto out; + if (start < le32_to_cpu(es->s_first_data_block)) { + len -= le32_to_cpu(es->s_first_data_block) - start; + start = le32_to_cpu(es->s_first_data_block); + } + if (start + len > max_blks) + len = max_blks - start; + + ngroups = EXT3_SB(sb)->s_groups_count; + smp_rmb(); + + /* Determine first and last group to examine based on start and len */ + ext3_get_group_no_and_offset(sb, (ext3_fsblk_t) start, + &first_group, &first_block); + ext3_get_group_no_and_offset(sb, (ext3_fsblk_t) (start + len), + &last_group, &last_block); + last_group = (last_group > ngroups - 1) ? ngroups - 1 : last_group; + last_block = EXT3_BLOCKS_PER_GROUP(sb); + + if (first_group > last_group) + return -EINVAL; + + for (group = first_group; group <= last_group; group++) { + gdp = ext3_get_group_desc(sb, group, NULL); + if (!gdp) + break; + + free_blocks = le16_to_cpu(gdp->bg_free_blocks_count); + if (free_blocks < minlen) + continue; + + if (len >= EXT3_BLOCKS_PER_GROUP(sb)) + len -= (EXT3_BLOCKS_PER_GROUP(sb) - first_block); + else + last_block = first_block + len; + + ret = ext3_trim_all_free(sb, group, first_block, + last_block, minlen); + if (ret < 0) + break; + + trimmed += ret; + first_block = 0; + } + + if (ret >= 0) + ret = 0; + +out: + range->len = trimmed * sb->s_blocksize; + + return ret; +} -- cgit v1.2.2 From 9c52749232b5cef506877ac633ea14083bd17e02 Mon Sep 17 00:00:00 2001 From: Lukas Czerner Date: Mon, 22 Nov 2010 12:29:18 +0100 Subject: ext3: Add FITRIM handling The ioctl takes fstrim_range structure (defined in include/linux/fs.h) as an argument specifying a range of filesystem to trim and the minimum size of an continguous extent to trim. After the FITRIM is done, the number of bytes passed from the filesystem down the block stack to the device for potential discard is stored in fstrim_range.len. This number is a maximum discard amount from the storage device's perspective, because FITRIM called repeatedly will keep sending the same sectors for discard. fstrim_range.len will report the same potential discard bytes each time, but only sectors which had been written to between the discards would actually be discarded by the storage device. Further, the kernel block layer reserves the right to adjust the discard ranges to fit raid stripe geometry, non-trim capable devices in a LVM setup, etc. These reductions would not be reflected in fstrim_range.len. Thus fstrim_range.len can give the user better insight on how much storage space has potentially been released for wear-leveling, but it needs to be one of only one criteria the userspace tools take into account when trying to optimize calls to FITRIM. Thanks to Greg Freemyer for better commit message. Signed-off-by: Lukas Czerner Signed-off-by: Jan Kara --- fs/ext3/ioctl.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) (limited to 'fs') diff --git a/fs/ext3/ioctl.c b/fs/ext3/ioctl.c index 88974814783a..fc080dd561f7 100644 --- a/fs/ext3/ioctl.c +++ b/fs/ext3/ioctl.c @@ -276,7 +276,29 @@ group_add_out: mnt_drop_write(filp->f_path.mnt); return err; } + case FITRIM: { + struct super_block *sb = inode->i_sb; + struct fstrim_range range; + int ret = 0; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if (copy_from_user(&range, (struct fstrim_range *)arg, + sizeof(range))) + return -EFAULT; + + ret = ext3_trim_fs(sb, &range); + if (ret < 0) + return ret; + + if (copy_to_user((struct fstrim_range *)arg, &range, + sizeof(range))) + return -EFAULT; + + return 0; + } default: return -ENOTTY; -- cgit v1.2.2 From 055adcbd7da75868697e767adc4f3272f6cae76c Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Tue, 23 Nov 2010 18:49:54 -0800 Subject: quota: Use %pV and __attribute__((format (printf in __quota_error and fix fallout Use %pV in __quota_error so a single printk can not be interleaved with other logging messages. Add __attribute__((format (printf, 3, 4))) so format and arguments can be verified by compiler. Make sure printk formats and arguments match. Block # needed a pointer dereference. Signed-off-by: Joe Perches Signed-off-by: Jan Kara --- fs/quota/dquot.c | 18 +++++++++++------- fs/quota/quota_tree.c | 9 +++++---- 2 files changed, 16 insertions(+), 11 deletions(-) (limited to 'fs') diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 0fed41e6efcd..84becd3e4772 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -133,16 +133,20 @@ __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock); EXPORT_SYMBOL(dq_data_lock); void __quota_error(struct super_block *sb, const char *func, - const char *fmt, ...) + const char *fmt, ...) { - va_list args; - if (printk_ratelimit()) { + va_list args; + struct va_format vaf; + va_start(args, fmt); - printk(KERN_ERR "Quota error (device %s): %s: ", - sb->s_id, func); - vprintk(fmt, args); - printk("\n"); + + vaf.fmt = fmt; + vaf.va = &args; + + printk(KERN_ERR "Quota error (device %s): %s: %pV\n", + sb->s_id, func, &vaf); + va_end(args); } } diff --git a/fs/quota/quota_tree.c b/fs/quota/quota_tree.c index 9e48874eabcc..e41c1becf096 100644 --- a/fs/quota/quota_tree.c +++ b/fs/quota/quota_tree.c @@ -468,8 +468,8 @@ static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, return -ENOMEM; ret = read_blk(info, *blk, buf); if (ret < 0) { - quota_error(dquot->dq_sb, "Can't read quota data " - "block %u", blk); + quota_error(dquot->dq_sb, "Can't read quota data block %u", + *blk); goto out_buf; } newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); @@ -493,8 +493,9 @@ static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, } else { ret = write_blk(info, *blk, buf); if (ret < 0) - quota_error(dquot->dq_sb, "Can't write quota " - "tree block %u", blk); + quota_error(dquot->dq_sb, + "Can't write quota tree block %u", + *blk); } } out_buf: -- cgit v1.2.2 From 41dc6385bd6cd3366c1b4bede33688521eb21db9 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Thu, 25 Nov 2010 01:53:12 +0900 Subject: ext3: Add journal error check in resize.c Check return value of ext3_journal_get_write_access() and ext3_journal_dirty_metadata(). Signed-off-by: Namhyung Kim Signed-off-by: Jan Kara --- fs/ext3/resize.c | 65 ++++++++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 51 insertions(+), 14 deletions(-) (limited to 'fs') diff --git a/fs/ext3/resize.c b/fs/ext3/resize.c index e746d30b1232..108b142e11ed 100644 --- a/fs/ext3/resize.c +++ b/fs/ext3/resize.c @@ -249,7 +249,11 @@ static int setup_new_group_blocks(struct super_block *sb, memcpy(gdb->b_data, sbi->s_group_desc[i]->b_data, gdb->b_size); set_buffer_uptodate(gdb); unlock_buffer(gdb); - ext3_journal_dirty_metadata(handle, gdb); + err = ext3_journal_dirty_metadata(handle, gdb); + if (err) { + brelse(gdb); + goto exit_bh; + } ext3_set_bit(bit, bh->b_data); brelse(gdb); } @@ -269,7 +273,11 @@ static int setup_new_group_blocks(struct super_block *sb, err = PTR_ERR(gdb); goto exit_bh; } - ext3_journal_dirty_metadata(handle, gdb); + err = ext3_journal_dirty_metadata(handle, gdb); + if (err) { + brelse(gdb); + goto exit_bh; + } ext3_set_bit(bit, bh->b_data); brelse(gdb); } @@ -295,7 +303,11 @@ static int setup_new_group_blocks(struct super_block *sb, err = PTR_ERR(it); goto exit_bh; } - ext3_journal_dirty_metadata(handle, it); + err = ext3_journal_dirty_metadata(handle, it); + if (err) { + brelse(it); + goto exit_bh; + } brelse(it); ext3_set_bit(bit, bh->b_data); } @@ -306,7 +318,9 @@ static int setup_new_group_blocks(struct super_block *sb, mark_bitmap_end(input->blocks_count, EXT3_BLOCKS_PER_GROUP(sb), bh->b_data); - ext3_journal_dirty_metadata(handle, bh); + err = ext3_journal_dirty_metadata(handle, bh); + if (err) + goto exit_bh; brelse(bh); /* Mark unused entries in inode bitmap used */ @@ -319,7 +333,7 @@ static int setup_new_group_blocks(struct super_block *sb, mark_bitmap_end(EXT3_INODES_PER_GROUP(sb), EXT3_BLOCKS_PER_GROUP(sb), bh->b_data); - ext3_journal_dirty_metadata(handle, bh); + err = ext3_journal_dirty_metadata(handle, bh); exit_bh: brelse(bh); @@ -503,12 +517,19 @@ static int add_new_gdb(handle_t *handle, struct inode *inode, * reserved inode, and will become GDT blocks (primary and backup). */ data[gdb_num % EXT3_ADDR_PER_BLOCK(sb)] = 0; - ext3_journal_dirty_metadata(handle, dind); + err = ext3_journal_dirty_metadata(handle, dind); + if (err) + goto exit_group_desc; brelse(dind); + dind = NULL; inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >> 9; - ext3_mark_iloc_dirty(handle, inode, &iloc); + err = ext3_mark_iloc_dirty(handle, inode, &iloc); + if (err) + goto exit_group_desc; memset((*primary)->b_data, 0, sb->s_blocksize); - ext3_journal_dirty_metadata(handle, *primary); + err = ext3_journal_dirty_metadata(handle, *primary); + if (err) + goto exit_group_desc; o_group_desc = EXT3_SB(sb)->s_group_desc; memcpy(n_group_desc, o_group_desc, @@ -519,10 +540,14 @@ static int add_new_gdb(handle_t *handle, struct inode *inode, kfree(o_group_desc); le16_add_cpu(&es->s_reserved_gdt_blocks, -1); - ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh); + err = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh); + if (err) + goto exit_inode; return 0; +exit_group_desc: + kfree(n_group_desc); exit_inode: //ext3_journal_release_buffer(handle, iloc.bh); brelse(iloc.bh); @@ -706,16 +731,20 @@ static void update_backups(struct super_block *sb, } ext3_debug("update metadata backup %#04lx\n", (unsigned long)bh->b_blocknr); - if ((err = ext3_journal_get_write_access(handle, bh))) + if ((err = ext3_journal_get_write_access(handle, bh))) { + brelse(bh); break; + } lock_buffer(bh); memcpy(bh->b_data, data, size); if (rest) memset(bh->b_data + size, 0, rest); set_buffer_uptodate(bh); unlock_buffer(bh); - ext3_journal_dirty_metadata(handle, bh); + err = ext3_journal_dirty_metadata(handle, bh); brelse(bh); + if (err) + break; } if ((err2 = ext3_journal_stop(handle)) && !err) err = err2; @@ -922,7 +951,9 @@ int ext3_group_add(struct super_block *sb, struct ext3_new_group_data *input) /* Update the global fs size fields */ sbi->s_groups_count++; - ext3_journal_dirty_metadata(handle, primary); + err = ext3_journal_dirty_metadata(handle, primary); + if (err) + goto exit_journal; /* Update the reserved block counts only once the new group is * active. */ @@ -934,7 +965,7 @@ int ext3_group_add(struct super_block *sb, struct ext3_new_group_data *input) percpu_counter_add(&sbi->s_freeinodes_counter, EXT3_INODES_PER_GROUP(sb)); - ext3_journal_dirty_metadata(handle, sbi->s_sbh); + err = ext3_journal_dirty_metadata(handle, sbi->s_sbh); exit_journal: mutex_unlock(&sbi->s_resize_lock); @@ -1064,8 +1095,14 @@ int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es, goto exit_put; } es->s_blocks_count = cpu_to_le32(o_blocks_count + add); - ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh); + err = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh); mutex_unlock(&EXT3_SB(sb)->s_resize_lock); + if (err) { + ext3_warning(sb, __func__, + "error %d on journal dirty metadata", err); + ext3_journal_stop(handle); + goto exit_put; + } ext3_debug("freeing blocks "E3FSBLK" through "E3FSBLK"\n", o_blocks_count, o_blocks_count + add); ext3_free_blocks_sb(handle, sb, o_blocks_count, add, &freed_blocks); -- cgit v1.2.2 From 156e74312f1ffc0a2639c24c771c5a0e106f0505 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Thu, 25 Nov 2010 01:53:13 +0900 Subject: ext3: Add more journal error check Check return value of ext3_journal_get_write_acccess() and ext3_journal_dirty_metadata(). Signed-off-by: Namhyung Kim Signed-off-by: Jan Kara --- fs/ext3/inode.c | 6 ++++-- fs/ext3/namei.c | 4 +++- 2 files changed, 7 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index a9580617edd2..ae94f6d949f5 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c @@ -2145,13 +2145,15 @@ static void ext3_clear_blocks(handle_t *handle, struct inode *inode, if (try_to_extend_transaction(handle, inode)) { if (bh) { BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata"); - ext3_journal_dirty_metadata(handle, bh); + if (ext3_journal_dirty_metadata(handle, bh)) + return; } ext3_mark_inode_dirty(handle, inode); truncate_restart_transaction(handle, inode); if (bh) { BUFFER_TRACE(bh, "retaking write access"); - ext3_journal_get_write_access(handle, bh); + if (ext3_journal_get_write_access(handle, bh)) + return; } } diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c index e69eed547242..cc682ab55b73 100644 --- a/fs/ext3/namei.c +++ b/fs/ext3/namei.c @@ -1598,7 +1598,9 @@ static int ext3_dx_add_entry(handle_t *handle, struct dentry *dentry, if (err) goto journal_error; } - ext3_journal_dirty_metadata(handle, frames[0].bh); + err = ext3_journal_dirty_metadata(handle, frames[0].bh); + if (err) + goto journal_error; } de = do_split(handle, dir, &bh, frame, &hinfo, &err); if (!de) -- cgit v1.2.2 From 40a063f6691ce937a3d00c9700b6964b5ec4e022 Mon Sep 17 00:00:00 2001 From: Eric Sandeen Date: Tue, 7 Dec 2010 11:51:05 -0600 Subject: ext2: speed up file creates by optimizing rec_len functions The addition of 64k block capability in the rec_len_from_disk and rec_len_to_disk functions added a bit of math overhead which slows down file create workloads needlessly when the architecture cannot even support 64k blocks, thanks to page size limits. The directory entry checking can also be optimized a bit by sprinkling in some unlikely() conditions to move the error handling out of line. bonnie++ sequential file creates on a 512MB ramdisk speeds up from about 2200/s to about 2500/s, about a 14% improvement. Signed-off-by: Eric Sandeen Signed-off-by: Jan Kara --- fs/ext2/dir.c | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c index 2709b34206ab..47cda410b548 100644 --- a/fs/ext2/dir.c +++ b/fs/ext2/dir.c @@ -28,21 +28,30 @@ typedef struct ext2_dir_entry_2 ext2_dirent; +/* + * Tests against MAX_REC_LEN etc were put in place for 64k block + * sizes; if that is not possible on this arch, we can skip + * those tests and speed things up. + */ static inline unsigned ext2_rec_len_from_disk(__le16 dlen) { unsigned len = le16_to_cpu(dlen); +#if (PAGE_CACHE_SIZE >= 65536) if (len == EXT2_MAX_REC_LEN) return 1 << 16; +#endif return len; } static inline __le16 ext2_rec_len_to_disk(unsigned len) { +#if (PAGE_CACHE_SIZE >= 65536) if (len == (1 << 16)) return cpu_to_le16(EXT2_MAX_REC_LEN); else BUG_ON(len > (1 << 16)); +#endif return cpu_to_le16(len); } @@ -129,15 +138,15 @@ static void ext2_check_page(struct page *page, int quiet) p = (ext2_dirent *)(kaddr + offs); rec_len = ext2_rec_len_from_disk(p->rec_len); - if (rec_len < EXT2_DIR_REC_LEN(1)) + if (unlikely(rec_len < EXT2_DIR_REC_LEN(1))) goto Eshort; - if (rec_len & 3) + if (unlikely(rec_len & 3)) goto Ealign; - if (rec_len < EXT2_DIR_REC_LEN(p->name_len)) + if (unlikely(rec_len < EXT2_DIR_REC_LEN(p->name_len))) goto Enamelen; - if (((offs + rec_len - 1) ^ offs) & ~(chunk_size-1)) + if (unlikely(((offs + rec_len - 1) ^ offs) & ~(chunk_size-1))) goto Espan; - if (le32_to_cpu(p->inode) > max_inumber) + if (unlikely(le32_to_cpu(p->inode) > max_inumber)) goto Einumber; } if (offs != limit) -- cgit v1.2.2 From a4ae3094869f18e26ece25ad175bbe4cd740e60b Mon Sep 17 00:00:00 2001 From: Eric Sandeen Date: Tue, 7 Dec 2010 11:55:27 -0600 Subject: ext3: speed up file creates by optimizing rec_len functions The addition of 64k block capability in the rec_len_from_disk and rec_len_to_disk functions added a bit of math overhead which slows down file create workloads needlessly when the architecture cannot even support 64k blocks, thanks to page size limits. Similar changes already exist in the ext4 codebase. The directory entry checking can also be optimized a bit by sprinkling in some unlikely() conditions to move the error handling out of line. bonnie++ sequential file creates on a 512MB ramdisk speeds up from about 77,000/s to about 82,000/s, about a 6% improvement. Signed-off-by: Eric Sandeen Signed-off-by: Jan Kara --- fs/ext3/dir.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c index e2e72c367cf6..34f0a072b935 100644 --- a/fs/ext3/dir.c +++ b/fs/ext3/dir.c @@ -69,25 +69,26 @@ int ext3_check_dir_entry (const char * function, struct inode * dir, const char * error_msg = NULL; const int rlen = ext3_rec_len_from_disk(de->rec_len); - if (rlen < EXT3_DIR_REC_LEN(1)) + if (unlikely(rlen < EXT3_DIR_REC_LEN(1))) error_msg = "rec_len is smaller than minimal"; - else if (rlen % 4 != 0) + else if (unlikely(rlen % 4 != 0)) error_msg = "rec_len % 4 != 0"; - else if (rlen < EXT3_DIR_REC_LEN(de->name_len)) + else if (unlikely(rlen < EXT3_DIR_REC_LEN(de->name_len))) error_msg = "rec_len is too small for name_len"; - else if (((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize) + else if (unlikely((((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize))) error_msg = "directory entry across blocks"; - else if (le32_to_cpu(de->inode) > - le32_to_cpu(EXT3_SB(dir->i_sb)->s_es->s_inodes_count)) + else if (unlikely(le32_to_cpu(de->inode) > + le32_to_cpu(EXT3_SB(dir->i_sb)->s_es->s_inodes_count))) error_msg = "inode out of bounds"; - if (error_msg != NULL) + if (unlikely(error_msg != NULL)) ext3_error (dir->i_sb, function, "bad entry in directory #%lu: %s - " "offset=%lu, inode=%lu, rec_len=%d, name_len=%d", dir->i_ino, error_msg, offset, (unsigned long) le32_to_cpu(de->inode), rlen, de->name_len); + return error_msg == NULL ? 1 : 0; } -- cgit v1.2.2 From 0ed0cca7aa87b1f5708f597075013c20d8359667 Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Thu, 9 Dec 2010 15:39:00 +0100 Subject: ext2: Remove redundant unlikely() IS_ERR() already implies unlikely(), so it can be omitted here. Signed-off-by: Tobias Klauser Signed-off-by: Jan Kara --- fs/ext2/namei.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c index f8aecd2e3297..2e1d8341d827 100644 --- a/fs/ext2/namei.c +++ b/fs/ext2/namei.c @@ -67,7 +67,7 @@ static struct dentry *ext2_lookup(struct inode * dir, struct dentry *dentry, str inode = NULL; if (ino) { inode = ext2_iget(dir->i_sb, ino); - if (unlikely(IS_ERR(inode))) { + if (IS_ERR(inode)) { if (PTR_ERR(inode) == -ESTALE) { ext2_error(dir->i_sb, __func__, "deleted inode referenced: %lu", -- cgit v1.2.2 From 8057b9653923bd762d89ccb730c76cba40ce96f0 Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Thu, 9 Dec 2010 15:39:34 +0100 Subject: ext3: Remove redundant unlikely() IS_ERR() already implies unlikely(), so it can be omitted here. Signed-off-by: Tobias Klauser Signed-off-by: Jan Kara --- fs/ext3/namei.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c index cc682ab55b73..b27ba71810ec 100644 --- a/fs/ext3/namei.c +++ b/fs/ext3/namei.c @@ -1038,7 +1038,7 @@ static struct dentry *ext3_lookup(struct inode * dir, struct dentry *dentry, str return ERR_PTR(-EIO); } inode = ext3_iget(dir->i_sb, ino); - if (unlikely(IS_ERR(inode))) { + if (IS_ERR(inode)) { if (PTR_ERR(inode) == -ESTALE) { ext3_error(dir->i_sb, __func__, "deleted inode referenced: %lu", -- cgit v1.2.2 From d96336b05d718b03ff03c94c0dc0cc283a29d534 Mon Sep 17 00:00:00 2001 From: Josh Hunt Date: Mon, 27 Dec 2010 13:46:38 -0800 Subject: ext2: Resolve 'dereferencing pointer to incomplete type' when enabling EXT2_XATTR_DEBUG When I enable EXT2_XATTR_DEBUG in fs/ext2/xattr.c I get a build error stating the following: CC fs/ext2/xattr.o fs/ext2/xattr.c: In function 'ext2_xattr_cache_insert': fs/ext2/xattr.c:841: error: dereferencing pointer to incomplete type fs/ext2/xattr.c:846: error: dereferencing pointer to incomplete type make[2]: *** [fs/ext2/xattr.o] Error 1 make[1]: *** [fs/ext2] Error 2 make: *** [fs] Error 2 These lines reference ext2_xattr_cache->c_entry_count which is defined in struct mb_cache. struct mb_cache is currently only defined in fs/mbcache.c. Moving struct mb_cache definition to include/linux/mbcache.h to resolve the issue. Signed-off-by: Josh Hunt Signed-off-by: Jan Kara --- fs/mbcache.c | 12 ------------ 1 file changed, 12 deletions(-) (limited to 'fs') diff --git a/fs/mbcache.c b/fs/mbcache.c index 93444747237b..a25444ab2baf 100644 --- a/fs/mbcache.c +++ b/fs/mbcache.c @@ -76,18 +76,6 @@ EXPORT_SYMBOL(mb_cache_entry_find_first); EXPORT_SYMBOL(mb_cache_entry_find_next); #endif -struct mb_cache { - struct list_head c_cache_list; - const char *c_name; - atomic_t c_entry_count; - int c_max_entries; - int c_bucket_bits; - struct kmem_cache *c_entry_cache; - struct list_head *c_block_hash; - struct list_head *c_index_hash; -}; - - /* * Global data: list of all mbcache's, lru list, and a spinlock for * accessing cache data structures on SMP machines. The lru list is -- cgit v1.2.2 From 6650239a4b01077e80d5a4468562756d77afaa59 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Sat, 8 Jan 2011 17:45:38 -0500 Subject: NFS: Don't use vm_map_ram() in readdir vm_map_ram() is not available on NOMMU platforms, and causes trouble on incoherrent architectures such as ARM when we access the page data through both the direct and the virtual mapping. The alternative is to use the direct mapping to access page data for the case when we are not crossing a page boundary, but to copy the data into a linear scratch buffer when we are accessing data that spans page boundaries. Signed-off-by: Trond Myklebust Tested-by: Marc Kleine-Budde Cc: stable@kernel.org [2.6.37] --- fs/nfs/dir.c | 44 +++++++++++++++++++++----------------------- fs/nfs/nfs2xdr.c | 6 ------ fs/nfs/nfs3xdr.c | 6 ------ fs/nfs/nfs4xdr.c | 6 ------ 4 files changed, 21 insertions(+), 41 deletions(-) (limited to 'fs') diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 996dd8989a91..0108cf4f3403 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -33,7 +33,6 @@ #include #include #include -#include #include #include "delegation.h" @@ -459,25 +458,26 @@ out: /* Perform conversion from xdr to cache array */ static int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *entry, - void *xdr_page, struct page *page, unsigned int buflen) + struct page **xdr_pages, struct page *page, unsigned int buflen) { struct xdr_stream stream; - struct xdr_buf buf; - __be32 *ptr = xdr_page; + struct xdr_buf buf = { + .pages = xdr_pages, + .page_len = buflen, + .buflen = buflen, + .len = buflen, + }; + struct page *scratch; struct nfs_cache_array *array; unsigned int count = 0; int status; - buf.head->iov_base = xdr_page; - buf.head->iov_len = buflen; - buf.tail->iov_len = 0; - buf.page_base = 0; - buf.page_len = 0; - buf.buflen = buf.head->iov_len; - buf.len = buf.head->iov_len; - - xdr_init_decode(&stream, &buf, ptr); + scratch = alloc_page(GFP_KERNEL); + if (scratch == NULL) + return -ENOMEM; + xdr_init_decode(&stream, &buf, NULL); + xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE); do { status = xdr_decode(desc, entry, &stream); @@ -506,6 +506,8 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en } else status = PTR_ERR(array); } + + put_page(scratch); return status; } @@ -521,7 +523,6 @@ static void nfs_readdir_free_large_page(void *ptr, struct page **pages, unsigned int npages) { - vm_unmap_ram(ptr, npages); nfs_readdir_free_pagearray(pages, npages); } @@ -530,9 +531,8 @@ void nfs_readdir_free_large_page(void *ptr, struct page **pages, * to nfs_readdir_free_large_page */ static -void *nfs_readdir_large_page(struct page **pages, unsigned int npages) +int nfs_readdir_large_page(struct page **pages, unsigned int npages) { - void *ptr; unsigned int i; for (i = 0; i < npages; i++) { @@ -541,13 +541,11 @@ void *nfs_readdir_large_page(struct page **pages, unsigned int npages) goto out_freepages; pages[i] = page; } + return 0; - ptr = vm_map_ram(pages, npages, 0, PAGE_KERNEL); - if (!IS_ERR_OR_NULL(ptr)) - return ptr; out_freepages: nfs_readdir_free_pagearray(pages, i); - return NULL; + return -ENOMEM; } static @@ -577,8 +575,8 @@ int nfs_readdir_xdr_to_array(nfs_readdir_descriptor_t *desc, struct page *page, memset(array, 0, sizeof(struct nfs_cache_array)); array->eof_index = -1; - pages_ptr = nfs_readdir_large_page(pages, array_size); - if (!pages_ptr) + status = nfs_readdir_large_page(pages, array_size); + if (status < 0) goto out_release_array; do { unsigned int pglen; @@ -587,7 +585,7 @@ int nfs_readdir_xdr_to_array(nfs_readdir_descriptor_t *desc, struct page *page, if (status < 0) break; pglen = status; - status = nfs_readdir_page_filler(desc, &entry, pages_ptr, page, pglen); + status = nfs_readdir_page_filler(desc, &entry, pages, page, pglen); if (status < 0) { if (status == -ENOSPC) status = 0; diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c index 5914a1911c95..b382a1b5e7e4 100644 --- a/fs/nfs/nfs2xdr.c +++ b/fs/nfs/nfs2xdr.c @@ -487,12 +487,6 @@ nfs_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, struct nfs_se entry->d_type = DT_UNKNOWN; - p = xdr_inline_peek(xdr, 8); - if (p != NULL) - entry->eof = !p[0] && p[1]; - else - entry->eof = 0; - return p; out_overflow: diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c index f6cc60f06dac..ba91236c6ee7 100644 --- a/fs/nfs/nfs3xdr.c +++ b/fs/nfs/nfs3xdr.c @@ -647,12 +647,6 @@ nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, struct nfs_s memset((u8*)(entry->fh), 0, sizeof(*entry->fh)); } - p = xdr_inline_peek(xdr, 8); - if (p != NULL) - entry->eof = !p[0] && p[1]; - else - entry->eof = 0; - return p; out_overflow: diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 9f1826b012e6..0662a9821df5 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -6215,12 +6215,6 @@ __be32 *nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, if (verify_attr_len(xdr, p, len) < 0) goto out_overflow; - p = xdr_inline_peek(xdr, 8); - if (p != NULL) - entry->eof = !p[0] && p[1]; - else - entry->eof = 0; - return p; out_overflow: -- cgit v1.2.2 From a363f0c2030cb9781e7e458f4a9e354b6c43d7ce Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Tue, 11 Jan 2011 10:13:53 +1100 Subject: xfs: ensure sync write errors are returned xfs_file_aio_write() only returns the error from synchronous flushing of the data and inode if error == 0. At the point where error is being checked, it is guaranteed to be > 0. Therefore any errors returned by the data or fsync flush will never be returned. Fix the checks so we overwrite the current error once and only if an error really occurred. Signed-off-by: Dave Chinner Reviewed-by: Alex Elder Reviewed-by: Christoph Hellwig --- fs/xfs/linux-2.6/xfs_file.c | 49 +++++++++++++++++++++------------------------ 1 file changed, 23 insertions(+), 26 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c index ba8ad422a165..10b7fb4807a6 100644 --- a/fs/xfs/linux-2.6/xfs_file.c +++ b/fs/xfs/linux-2.6/xfs_file.c @@ -574,7 +574,7 @@ xfs_file_aio_write( struct inode *inode = mapping->host; struct xfs_inode *ip = XFS_I(inode); struct xfs_mount *mp = ip->i_mount; - ssize_t ret = 0, error = 0; + ssize_t ret = 0; int ioflags = 0; xfs_fsize_t isize, new_size; int iolock; @@ -590,9 +590,9 @@ xfs_file_aio_write( if (file->f_mode & FMODE_NOCMTIME) ioflags |= IO_INVIS; - error = generic_segment_checks(iovp, &nr_segs, &ocount, VERIFY_READ); - if (error) - return error; + ret = generic_segment_checks(iovp, &nr_segs, &ocount, VERIFY_READ); + if (ret) + return ret; count = ocount; if (count == 0) @@ -616,9 +616,9 @@ relock: xfs_ilock(ip, XFS_ILOCK_EXCL|iolock); start: - error = -generic_write_checks(file, &pos, &count, + ret = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); - if (error) { + if (ret) { xfs_iunlock(ip, XFS_ILOCK_EXCL|iolock); goto out_unlock_mutex; } @@ -660,8 +660,8 @@ start: */ if (pos > ip->i_size) { - error = xfs_zero_eof(ip, pos, ip->i_size); - if (error) { + ret = -xfs_zero_eof(ip, pos, ip->i_size); + if (ret) { xfs_iunlock(ip, XFS_ILOCK_EXCL); goto out_unlock_internal; } @@ -674,8 +674,8 @@ start: * by root. This keeps people from modifying setuid and * setgid binaries. */ - error = -file_remove_suid(file); - if (unlikely(error)) + ret = file_remove_suid(file); + if (unlikely(ret)) goto out_unlock_internal; /* We can write back this queue in page reclaim */ @@ -684,10 +684,10 @@ start: if ((ioflags & IO_ISDIRECT)) { if (mapping->nrpages) { WARN_ON(need_i_mutex == 0); - error = xfs_flushinval_pages(ip, + ret = -xfs_flushinval_pages(ip, (pos & PAGE_CACHE_MASK), -1, FI_REMAPF_LOCKED); - if (error) + if (ret) goto out_unlock_internal; } @@ -720,24 +720,22 @@ start: } } else { int enospc = 0; - ssize_t ret2 = 0; write_retry: trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, ioflags); - ret2 = generic_file_buffered_write(iocb, iovp, nr_segs, + ret = generic_file_buffered_write(iocb, iovp, nr_segs, pos, &iocb->ki_pos, count, ret); /* * if we just got an ENOSPC, flush the inode now we * aren't holding any page locks and retry *once* */ - if (ret2 == -ENOSPC && !enospc) { - error = xfs_flush_pages(ip, 0, -1, 0, FI_NONE); - if (error) + if (ret == -ENOSPC && !enospc) { + ret = xfs_flush_pages(ip, 0, -1, 0, FI_NONE); + if (ret) goto out_unlock_internal; enospc = 1; goto write_retry; } - ret = ret2; } current->backing_dev_info = NULL; @@ -753,7 +751,6 @@ write_retry: xfs_iunlock(ip, XFS_ILOCK_EXCL); } - error = -ret; if (ret <= 0) goto out_unlock_internal; @@ -762,23 +759,23 @@ write_retry: /* Handle various SYNC-type writes */ if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) { loff_t end = pos + ret - 1; - int error2; + int error, error2; xfs_iunlock(ip, iolock); if (need_i_mutex) mutex_unlock(&inode->i_mutex); - error2 = filemap_write_and_wait_range(mapping, pos, end); - if (!error) - error = error2; + error = filemap_write_and_wait_range(mapping, pos, end); if (need_i_mutex) mutex_lock(&inode->i_mutex); xfs_ilock(ip, iolock); error2 = -xfs_file_fsync(file, (file->f_flags & __O_SYNC) ? 0 : 1); - if (!error) - error = error2; + if (error) + ret = error; + else if (error2) + ret = error2; } out_unlock_internal: @@ -800,7 +797,7 @@ write_retry: out_unlock_mutex: if (need_i_mutex) mutex_unlock(&inode->i_mutex); - return -error; + return ret; } STATIC int -- cgit v1.2.2 From edafb6da9aa725e4de5fe758fe81644b6167f9a2 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Tue, 11 Jan 2011 10:14:06 +1100 Subject: xfs: factor common post-write isize handling code Signed-off-by: Dave Chinner Reviewed-by: Alex Elder Reviewed-by: Christoph Hellwig --- fs/xfs/linux-2.6/xfs_file.c | 54 +++++++++++++++++++++++---------------------- 1 file changed, 28 insertions(+), 26 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c index 10b7fb4807a6..b3915bf25770 100644 --- a/fs/xfs/linux-2.6/xfs_file.c +++ b/fs/xfs/linux-2.6/xfs_file.c @@ -321,6 +321,30 @@ xfs_file_splice_read( return ret; } +STATIC void +xfs_aio_write_isize_update( + struct inode *inode, + loff_t *ppos, + ssize_t bytes_written) +{ + struct xfs_inode *ip = XFS_I(inode); + xfs_fsize_t isize = i_size_read(inode); + + if (bytes_written > 0) + XFS_STATS_ADD(xs_write_bytes, bytes_written); + + if (unlikely(bytes_written < 0 && bytes_written != -EFAULT && + *ppos > isize)) + *ppos = isize; + + if (*ppos > ip->i_size) { + xfs_ilock(ip, XFS_ILOCK_EXCL); + if (*ppos > ip->i_size) + ip->i_size = *ppos; + xfs_iunlock(ip, XFS_ILOCK_EXCL); + } +} + STATIC ssize_t xfs_file_splice_write( struct pipe_inode_info *pipe, @@ -331,7 +355,7 @@ xfs_file_splice_write( { struct inode *inode = outfilp->f_mapping->host; struct xfs_inode *ip = XFS_I(inode); - xfs_fsize_t isize, new_size; + xfs_fsize_t new_size; int ioflags = 0; ssize_t ret; @@ -355,19 +379,8 @@ xfs_file_splice_write( trace_xfs_file_splice_write(ip, count, *ppos, ioflags); ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags); - if (ret > 0) - XFS_STATS_ADD(xs_write_bytes, ret); - - isize = i_size_read(inode); - if (unlikely(ret < 0 && ret != -EFAULT && *ppos > isize)) - *ppos = isize; - if (*ppos > ip->i_size) { - xfs_ilock(ip, XFS_ILOCK_EXCL); - if (*ppos > ip->i_size) - ip->i_size = *ppos; - xfs_iunlock(ip, XFS_ILOCK_EXCL); - } + xfs_aio_write_isize_update(inode, ppos, ret); if (ip->i_new_size) { xfs_ilock(ip, XFS_ILOCK_EXCL); @@ -576,7 +589,7 @@ xfs_file_aio_write( struct xfs_mount *mp = ip->i_mount; ssize_t ret = 0; int ioflags = 0; - xfs_fsize_t isize, new_size; + xfs_fsize_t new_size; int iolock; size_t ocount = 0, count; int need_i_mutex; @@ -740,22 +753,11 @@ write_retry: current->backing_dev_info = NULL; - isize = i_size_read(inode); - if (unlikely(ret < 0 && ret != -EFAULT && iocb->ki_pos > isize)) - iocb->ki_pos = isize; - - if (iocb->ki_pos > ip->i_size) { - xfs_ilock(ip, XFS_ILOCK_EXCL); - if (iocb->ki_pos > ip->i_size) - ip->i_size = iocb->ki_pos; - xfs_iunlock(ip, XFS_ILOCK_EXCL); - } + xfs_aio_write_isize_update(inode, &iocb->ki_pos, ret); if (ret <= 0) goto out_unlock_internal; - XFS_STATS_ADD(xs_write_bytes, ret); - /* Handle various SYNC-type writes */ if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) { loff_t end = pos + ret - 1; -- cgit v1.2.2 From 4c5cfd1b4157fb75d43b44a147c2feba6422fc4f Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Tue, 11 Jan 2011 10:14:16 +1100 Subject: xfs: factor post-write newsize updates Signed-off-by: Dave Chinner Reviewed-by: Alex Elder Reviewed-by: Christoph Hellwig --- fs/xfs/linux-2.6/xfs_file.c | 43 +++++++++++++++++++++---------------------- 1 file changed, 21 insertions(+), 22 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c index b3915bf25770..c47d7dc0a307 100644 --- a/fs/xfs/linux-2.6/xfs_file.c +++ b/fs/xfs/linux-2.6/xfs_file.c @@ -345,6 +345,25 @@ xfs_aio_write_isize_update( } } +/* + * If this was a direct or synchronous I/O that failed (such as ENOSPC) then + * part of the I/O may have been written to disk before the error occured. In + * this case the on-disk file size may have been adjusted beyond the in-memory + * file size and now needs to be truncated back. + */ +STATIC void +xfs_aio_write_newsize_update( + struct xfs_inode *ip) +{ + if (ip->i_new_size) { + xfs_ilock(ip, XFS_ILOCK_EXCL); + ip->i_new_size = 0; + if (ip->i_d.di_size > ip->i_size) + ip->i_d.di_size = ip->i_size; + xfs_iunlock(ip, XFS_ILOCK_EXCL); + } +} + STATIC ssize_t xfs_file_splice_write( struct pipe_inode_info *pipe, @@ -381,14 +400,7 @@ xfs_file_splice_write( ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags); xfs_aio_write_isize_update(inode, ppos, ret); - - if (ip->i_new_size) { - xfs_ilock(ip, XFS_ILOCK_EXCL); - ip->i_new_size = 0; - if (ip->i_d.di_size > ip->i_size) - ip->i_d.di_size = ip->i_size; - xfs_iunlock(ip, XFS_ILOCK_EXCL); - } + xfs_aio_write_newsize_update(ip); xfs_iunlock(ip, XFS_IOLOCK_EXCL); return ret; } @@ -781,20 +793,7 @@ write_retry: } out_unlock_internal: - if (ip->i_new_size) { - xfs_ilock(ip, XFS_ILOCK_EXCL); - ip->i_new_size = 0; - /* - * If this was a direct or synchronous I/O that failed (such - * as ENOSPC) then part of the I/O may have been written to - * disk before the error occured. In this case the on-disk - * file size may have been adjusted beyond the in-memory file - * size and now needs to be truncated back. - */ - if (ip->i_d.di_size > ip->i_size) - ip->i_d.di_size = ip->i_size; - xfs_iunlock(ip, XFS_ILOCK_EXCL); - } + xfs_aio_write_newsize_update(ip); xfs_iunlock(ip, iolock); out_unlock_mutex: if (need_i_mutex) -- cgit v1.2.2 From 487f84f3f80bc6f00c59725e822653d3ec174b85 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Wed, 12 Jan 2011 11:37:10 +1100 Subject: xfs: introduce xfs_rw_lock() helpers for locking the inode We need to obtain the i_mutex, i_iolock and i_ilock during the read and write paths. Add a set of wrapper functions to neatly encapsulate the lock ordering and shared/exclusive semantics to make the locking easier to follow and get right. Note that this changes some of the exclusive locking serialisation in that serialisation will occur against the i_mutex instead of the XFS_IOLOCK_EXCL. This does not change any behaviour, and it is arguably more efficient to use the mutex for such serialisation than the rw_sem. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig --- fs/xfs/linux-2.6/xfs_file.c | 131 +++++++++++++++++++++++++++----------------- 1 file changed, 80 insertions(+), 51 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c index c47d7dc0a307..b5e13fbb7386 100644 --- a/fs/xfs/linux-2.6/xfs_file.c +++ b/fs/xfs/linux-2.6/xfs_file.c @@ -40,6 +40,40 @@ static const struct vm_operations_struct xfs_file_vm_ops; +/* + * Locking primitives for read and write IO paths to ensure we consistently use + * and order the inode->i_mutex, ip->i_lock and ip->i_iolock. + */ +static inline void +xfs_rw_ilock( + struct xfs_inode *ip, + int type) +{ + if (type & XFS_IOLOCK_EXCL) + mutex_lock(&VFS_I(ip)->i_mutex); + xfs_ilock(ip, type); +} + +static inline void +xfs_rw_iunlock( + struct xfs_inode *ip, + int type) +{ + xfs_iunlock(ip, type); + if (type & XFS_IOLOCK_EXCL) + mutex_unlock(&VFS_I(ip)->i_mutex); +} + +static inline void +xfs_rw_ilock_demote( + struct xfs_inode *ip, + int type) +{ + xfs_ilock_demote(ip, type); + if (type & XFS_IOLOCK_EXCL) + mutex_unlock(&VFS_I(ip)->i_mutex); +} + /* * xfs_iozero * @@ -262,22 +296,21 @@ xfs_file_aio_read( if (XFS_FORCED_SHUTDOWN(mp)) return -EIO; - if (unlikely(ioflags & IO_ISDIRECT)) - mutex_lock(&inode->i_mutex); - xfs_ilock(ip, XFS_IOLOCK_SHARED); - if (unlikely(ioflags & IO_ISDIRECT)) { + xfs_rw_ilock(ip, XFS_IOLOCK_EXCL); + if (inode->i_mapping->nrpages) { ret = -xfs_flushinval_pages(ip, (iocb->ki_pos & PAGE_CACHE_MASK), -1, FI_REMAPF_LOCKED); + if (ret) { + xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL); + return ret; + } } - mutex_unlock(&inode->i_mutex); - if (ret) { - xfs_iunlock(ip, XFS_IOLOCK_SHARED); - return ret; - } - } + xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); + } else + xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); trace_xfs_file_read(ip, size, iocb->ki_pos, ioflags); @@ -285,7 +318,7 @@ xfs_file_aio_read( if (ret > 0) XFS_STATS_ADD(xs_read_bytes, ret); - xfs_iunlock(ip, XFS_IOLOCK_SHARED); + xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); return ret; } @@ -309,7 +342,7 @@ xfs_file_splice_read( if (XFS_FORCED_SHUTDOWN(ip->i_mount)) return -EIO; - xfs_ilock(ip, XFS_IOLOCK_SHARED); + xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); trace_xfs_file_splice_read(ip, count, *ppos, ioflags); @@ -317,7 +350,7 @@ xfs_file_splice_read( if (ret > 0) XFS_STATS_ADD(xs_read_bytes, ret); - xfs_iunlock(ip, XFS_IOLOCK_SHARED); + xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); return ret; } @@ -338,10 +371,10 @@ xfs_aio_write_isize_update( *ppos = isize; if (*ppos > ip->i_size) { - xfs_ilock(ip, XFS_ILOCK_EXCL); + xfs_rw_ilock(ip, XFS_ILOCK_EXCL); if (*ppos > ip->i_size) ip->i_size = *ppos; - xfs_iunlock(ip, XFS_ILOCK_EXCL); + xfs_rw_iunlock(ip, XFS_ILOCK_EXCL); } } @@ -356,14 +389,22 @@ xfs_aio_write_newsize_update( struct xfs_inode *ip) { if (ip->i_new_size) { - xfs_ilock(ip, XFS_ILOCK_EXCL); + xfs_rw_ilock(ip, XFS_ILOCK_EXCL); ip->i_new_size = 0; if (ip->i_d.di_size > ip->i_size) ip->i_d.di_size = ip->i_size; - xfs_iunlock(ip, XFS_ILOCK_EXCL); + xfs_rw_iunlock(ip, XFS_ILOCK_EXCL); } } +/* + * xfs_file_splice_write() does not use xfs_rw_ilock() because + * generic_file_splice_write() takes the i_mutex itself. This, in theory, + * couuld cause lock inversions between the aio_write path and the splice path + * if someone is doing concurrent splice(2) based writes and write(2) based + * writes to the same inode. The only real way to fix this is to re-implement + * the generic code here with correct locking orders. + */ STATIC ssize_t xfs_file_splice_write( struct pipe_inode_info *pipe, @@ -604,7 +645,6 @@ xfs_file_aio_write( xfs_fsize_t new_size; int iolock; size_t ocount = 0, count; - int need_i_mutex; XFS_STATS_INC(xs_write_calls); @@ -631,21 +671,17 @@ xfs_file_aio_write( relock: if (ioflags & IO_ISDIRECT) { iolock = XFS_IOLOCK_SHARED; - need_i_mutex = 0; } else { iolock = XFS_IOLOCK_EXCL; - need_i_mutex = 1; - mutex_lock(&inode->i_mutex); } - xfs_ilock(ip, XFS_ILOCK_EXCL|iolock); - start: + xfs_rw_ilock(ip, XFS_ILOCK_EXCL|iolock); ret = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); if (ret) { - xfs_iunlock(ip, XFS_ILOCK_EXCL|iolock); - goto out_unlock_mutex; + xfs_rw_iunlock(ip, XFS_ILOCK_EXCL|iolock); + return ret; } if (ioflags & IO_ISDIRECT) { @@ -654,16 +690,20 @@ start: mp->m_rtdev_targp : mp->m_ddev_targp; if ((pos & target->bt_smask) || (count & target->bt_smask)) { - xfs_iunlock(ip, XFS_ILOCK_EXCL|iolock); + xfs_rw_iunlock(ip, XFS_ILOCK_EXCL|iolock); return XFS_ERROR(-EINVAL); } - if (!need_i_mutex && (mapping->nrpages || pos > ip->i_size)) { - xfs_iunlock(ip, XFS_ILOCK_EXCL|iolock); + /* + * For direct I/O, if there are cached pages or we're extending + * the file, we need IOLOCK_EXCL until we're sure the bytes at + * the new EOF have been zeroed and/or the cached pages are + * flushed out. Upgrade the I/O lock and start again. + */ + if (iolock != XFS_IOLOCK_EXCL && + (mapping->nrpages || pos > ip->i_size)) { + xfs_rw_iunlock(ip, XFS_ILOCK_EXCL|iolock); iolock = XFS_IOLOCK_EXCL; - need_i_mutex = 1; - mutex_lock(&inode->i_mutex); - xfs_ilock(ip, XFS_ILOCK_EXCL|iolock); goto start; } } @@ -687,11 +727,11 @@ start: if (pos > ip->i_size) { ret = -xfs_zero_eof(ip, pos, ip->i_size); if (ret) { - xfs_iunlock(ip, XFS_ILOCK_EXCL); + xfs_rw_iunlock(ip, XFS_ILOCK_EXCL); goto out_unlock_internal; } } - xfs_iunlock(ip, XFS_ILOCK_EXCL); + xfs_rw_iunlock(ip, XFS_ILOCK_EXCL); /* * If we're writing the file then make sure to clear the @@ -708,7 +748,7 @@ start: if ((ioflags & IO_ISDIRECT)) { if (mapping->nrpages) { - WARN_ON(need_i_mutex == 0); + WARN_ON(iolock != XFS_IOLOCK_EXCL); ret = -xfs_flushinval_pages(ip, (pos & PAGE_CACHE_MASK), -1, FI_REMAPF_LOCKED); @@ -716,13 +756,10 @@ start: goto out_unlock_internal; } - if (need_i_mutex) { + if (iolock == XFS_IOLOCK_EXCL) { /* demote the lock now the cached pages are gone */ - xfs_ilock_demote(ip, XFS_IOLOCK_EXCL); - mutex_unlock(&inode->i_mutex); - + xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); iolock = XFS_IOLOCK_SHARED; - need_i_mutex = 0; } trace_xfs_file_direct_write(ip, count, iocb->ki_pos, ioflags); @@ -740,7 +777,7 @@ start: count -= ret; ioflags &= ~IO_ISDIRECT; - xfs_iunlock(ip, iolock); + xfs_rw_iunlock(ip, iolock); goto relock; } } else { @@ -775,14 +812,9 @@ write_retry: loff_t end = pos + ret - 1; int error, error2; - xfs_iunlock(ip, iolock); - if (need_i_mutex) - mutex_unlock(&inode->i_mutex); - + xfs_rw_iunlock(ip, iolock); error = filemap_write_and_wait_range(mapping, pos, end); - if (need_i_mutex) - mutex_lock(&inode->i_mutex); - xfs_ilock(ip, iolock); + xfs_rw_ilock(ip, iolock); error2 = -xfs_file_fsync(file, (file->f_flags & __O_SYNC) ? 0 : 1); @@ -794,10 +826,7 @@ write_retry: out_unlock_internal: xfs_aio_write_newsize_update(ip); - xfs_iunlock(ip, iolock); - out_unlock_mutex: - if (need_i_mutex) - mutex_unlock(&inode->i_mutex); + xfs_rw_iunlock(ip, iolock); return ret; } -- cgit v1.2.2 From f0d26e860b6c496464c5c8165d7df08dabde01fa Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Tue, 11 Jan 2011 10:15:36 +1100 Subject: xfs: split direct IO write path from xfs_file_aio_write The current xfs_file_aio_write code is a mess of locking shenanigans to handle the different locking requirements of buffered and direct IO. Start to clean this up by disentangling the direct IO path from the mess. This also removes the failed direct IO fallback path to buffered IO. XFS handles all direct IO cases without needing to fall back to buffered IO, so we can safely remove this unused path. This greatly simplifies the logic and locking needed in the write path. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig --- fs/xfs/linux-2.6/xfs_file.c | 179 ++++++++++++++++++++++++++++---------------- 1 file changed, 116 insertions(+), 63 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c index b5e13fbb7386..00661fd21fc0 100644 --- a/fs/xfs/linux-2.6/xfs_file.c +++ b/fs/xfs/linux-2.6/xfs_file.c @@ -628,6 +628,116 @@ out_lock: return error; } +/* + * xfs_file_dio_aio_write - handle direct IO writes + * + * Lock the inode appropriately to prepare for and issue a direct IO write. + * By spearating it from the buffered write path we remove all the tricky to + * follow locking changes and looping. + * + * Returns with locks held indicated by @iolock and errors indicated by + * negative return values. + */ +STATIC ssize_t +xfs_file_dio_aio_write( + struct kiocb *iocb, + const struct iovec *iovp, + unsigned long nr_segs, + loff_t pos, + size_t ocount, + int *iolock) +{ + struct file *file = iocb->ki_filp; + struct address_space *mapping = file->f_mapping; + struct inode *inode = mapping->host; + struct xfs_inode *ip = XFS_I(inode); + struct xfs_mount *mp = ip->i_mount; + ssize_t ret = 0; + xfs_fsize_t new_size; + size_t count = ocount; + struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ? + mp->m_rtdev_targp : mp->m_ddev_targp; + + *iolock = 0; + if ((pos & target->bt_smask) || (count & target->bt_smask)) + return -XFS_ERROR(EINVAL); + + /* + * For direct I/O, if there are cached pages or we're extending + * the file, we need IOLOCK_EXCL until we're sure the bytes at + * the new EOF have been zeroed and/or the cached pages are + * flushed out. + */ + if (mapping->nrpages || pos > ip->i_size) + *iolock = XFS_IOLOCK_EXCL; + else + *iolock = XFS_IOLOCK_SHARED; + xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock); + + ret = generic_write_checks(file, &pos, &count, + S_ISBLK(inode->i_mode)); + if (ret) { + xfs_rw_iunlock(ip, XFS_ILOCK_EXCL | *iolock); + *iolock = 0; + return ret; + } + + new_size = pos + count; + if (new_size > ip->i_size) + ip->i_new_size = new_size; + + if (likely(!(file->f_mode & FMODE_NOCMTIME))) + file_update_time(file); + + /* + * If the offset is beyond the size of the file, we have a couple of + * things to do. First, if there is already space allocated we need to + * either create holes or zero the disk or ... + * + * If there is a page where the previous size lands, we need to zero it + * out up to the new size. + */ + if (pos > ip->i_size) { + ret = -xfs_zero_eof(ip, pos, ip->i_size); + if (ret) { + xfs_rw_iunlock(ip, XFS_ILOCK_EXCL); + return ret; + } + } + xfs_rw_iunlock(ip, XFS_ILOCK_EXCL); + + /* + * If we're writing the file then make sure to clear the setuid and + * setgid bits if the process is not being run by root. This keeps + * people from modifying setuid and setgid binaries. + */ + ret = file_remove_suid(file); + if (unlikely(ret)) + return ret; + + if (mapping->nrpages) { + WARN_ON(*iolock != XFS_IOLOCK_EXCL); + ret = -xfs_flushinval_pages(ip, (pos & PAGE_CACHE_MASK), -1, + FI_REMAPF_LOCKED); + if (ret) + return ret; + } + + if (*iolock == XFS_IOLOCK_EXCL) { + /* demote the lock now the cached pages are gone */ + xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); + *iolock = XFS_IOLOCK_SHARED; + } + + trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0); + ret = generic_file_direct_write(iocb, iovp, + &nr_segs, pos, &iocb->ki_pos, count, ocount); + + /* No fallback to buffered IO on errors for XFS. */ + ASSERT(ret < 0 || ret == count); + return ret; +} + STATIC ssize_t xfs_file_aio_write( struct kiocb *iocb, @@ -670,12 +780,12 @@ xfs_file_aio_write( relock: if (ioflags & IO_ISDIRECT) { - iolock = XFS_IOLOCK_SHARED; - } else { - iolock = XFS_IOLOCK_EXCL; + ret = xfs_file_dio_aio_write(iocb, iovp, nr_segs, pos, + ocount, &iolock); + goto done_io; } + iolock = XFS_IOLOCK_EXCL; -start: xfs_rw_ilock(ip, XFS_ILOCK_EXCL|iolock); ret = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); @@ -684,30 +794,6 @@ start: return ret; } - if (ioflags & IO_ISDIRECT) { - xfs_buftarg_t *target = - XFS_IS_REALTIME_INODE(ip) ? - mp->m_rtdev_targp : mp->m_ddev_targp; - - if ((pos & target->bt_smask) || (count & target->bt_smask)) { - xfs_rw_iunlock(ip, XFS_ILOCK_EXCL|iolock); - return XFS_ERROR(-EINVAL); - } - - /* - * For direct I/O, if there are cached pages or we're extending - * the file, we need IOLOCK_EXCL until we're sure the bytes at - * the new EOF have been zeroed and/or the cached pages are - * flushed out. Upgrade the I/O lock and start again. - */ - if (iolock != XFS_IOLOCK_EXCL && - (mapping->nrpages || pos > ip->i_size)) { - xfs_rw_iunlock(ip, XFS_ILOCK_EXCL|iolock); - iolock = XFS_IOLOCK_EXCL; - goto start; - } - } - new_size = pos + count; if (new_size > ip->i_size) ip->i_new_size = new_size; @@ -746,41 +832,7 @@ start: /* We can write back this queue in page reclaim */ current->backing_dev_info = mapping->backing_dev_info; - if ((ioflags & IO_ISDIRECT)) { - if (mapping->nrpages) { - WARN_ON(iolock != XFS_IOLOCK_EXCL); - ret = -xfs_flushinval_pages(ip, - (pos & PAGE_CACHE_MASK), - -1, FI_REMAPF_LOCKED); - if (ret) - goto out_unlock_internal; - } - - if (iolock == XFS_IOLOCK_EXCL) { - /* demote the lock now the cached pages are gone */ - xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); - iolock = XFS_IOLOCK_SHARED; - } - - trace_xfs_file_direct_write(ip, count, iocb->ki_pos, ioflags); - ret = generic_file_direct_write(iocb, iovp, - &nr_segs, pos, &iocb->ki_pos, count, ocount); - - /* - * direct-io write to a hole: fall through to buffered I/O - * for completing the rest of the request. - */ - if (ret >= 0 && ret != count) { - XFS_STATS_ADD(xs_write_bytes, ret); - - pos += ret; - count -= ret; - - ioflags &= ~IO_ISDIRECT; - xfs_rw_iunlock(ip, iolock); - goto relock; - } - } else { + if (!(ioflags & IO_ISDIRECT)) { int enospc = 0; write_retry: @@ -802,6 +854,7 @@ write_retry: current->backing_dev_info = NULL; +done_io: xfs_aio_write_isize_update(inode, &iocb->ki_pos, ret); if (ret <= 0) -- cgit v1.2.2 From 637bbc75d9fda57c7bc77ce5ee37e29a77a0520d Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Tue, 11 Jan 2011 10:17:30 +1100 Subject: xfs: split buffered IO write path from xfs_file_aio_write Complete the split of the different write IO paths by splitting the buffered IO write path out of xfs_file_aio_write(). This makes the different mechanisms of the write patchs easier to follow. Signed-off-by: Dave Chinner Reviewed-by: Alex Elder Reviewed-by: Christoph Hellwig --- fs/xfs/linux-2.6/xfs_file.c | 146 +++++++++++++++++++++----------------------- 1 file changed, 69 insertions(+), 77 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c index 00661fd21fc0..e2bcf51d292e 100644 --- a/fs/xfs/linux-2.6/xfs_file.c +++ b/fs/xfs/linux-2.6/xfs_file.c @@ -739,58 +739,31 @@ xfs_file_dio_aio_write( } STATIC ssize_t -xfs_file_aio_write( +xfs_file_buffered_aio_write( struct kiocb *iocb, const struct iovec *iovp, unsigned long nr_segs, - loff_t pos) + loff_t pos, + size_t ocount, + int *iolock) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; struct xfs_inode *ip = XFS_I(inode); - struct xfs_mount *mp = ip->i_mount; - ssize_t ret = 0; - int ioflags = 0; + ssize_t ret; + int enospc = 0; xfs_fsize_t new_size; - int iolock; - size_t ocount = 0, count; - - XFS_STATS_INC(xs_write_calls); - - BUG_ON(iocb->ki_pos != pos); - - if (unlikely(file->f_flags & O_DIRECT)) - ioflags |= IO_ISDIRECT; - if (file->f_mode & FMODE_NOCMTIME) - ioflags |= IO_INVIS; - - ret = generic_segment_checks(iovp, &nr_segs, &ocount, VERIFY_READ); - if (ret) - return ret; - - count = ocount; - if (count == 0) - return 0; - - xfs_wait_for_freeze(mp, SB_FREEZE_WRITE); - - if (XFS_FORCED_SHUTDOWN(mp)) - return -EIO; + size_t count = ocount; -relock: - if (ioflags & IO_ISDIRECT) { - ret = xfs_file_dio_aio_write(iocb, iovp, nr_segs, pos, - ocount, &iolock); - goto done_io; - } - iolock = XFS_IOLOCK_EXCL; + *iolock = XFS_IOLOCK_EXCL; + xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock); - xfs_rw_ilock(ip, XFS_ILOCK_EXCL|iolock); ret = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); if (ret) { - xfs_rw_iunlock(ip, XFS_ILOCK_EXCL|iolock); + xfs_rw_iunlock(ip, XFS_ILOCK_EXCL | *iolock); + *iolock = 0; return ret; } @@ -798,67 +771,86 @@ relock: if (new_size > ip->i_size) ip->i_new_size = new_size; - if (likely(!(ioflags & IO_INVIS))) + if (likely(!(file->f_mode & FMODE_NOCMTIME))) file_update_time(file); - /* - * If the offset is beyond the size of the file, we have a couple - * of things to do. First, if there is already space allocated - * we need to either create holes or zero the disk or ... - * - * If there is a page where the previous size lands, we need - * to zero it out up to the new size. - */ - if (pos > ip->i_size) { ret = -xfs_zero_eof(ip, pos, ip->i_size); if (ret) { xfs_rw_iunlock(ip, XFS_ILOCK_EXCL); - goto out_unlock_internal; + return ret; } } xfs_rw_iunlock(ip, XFS_ILOCK_EXCL); - /* - * If we're writing the file then make sure to clear the - * setuid and setgid bits if the process is not being run - * by root. This keeps people from modifying setuid and - * setgid binaries. - */ ret = file_remove_suid(file); if (unlikely(ret)) - goto out_unlock_internal; + return ret; /* We can write back this queue in page reclaim */ current->backing_dev_info = mapping->backing_dev_info; - if (!(ioflags & IO_ISDIRECT)) { - int enospc = 0; - write_retry: - trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, ioflags); - ret = generic_file_buffered_write(iocb, iovp, nr_segs, - pos, &iocb->ki_pos, count, ret); - /* - * if we just got an ENOSPC, flush the inode now we - * aren't holding any page locks and retry *once* - */ - if (ret == -ENOSPC && !enospc) { - ret = xfs_flush_pages(ip, 0, -1, 0, FI_NONE); - if (ret) - goto out_unlock_internal; - enospc = 1; - goto write_retry; - } + trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0); + ret = generic_file_buffered_write(iocb, iovp, nr_segs, + pos, &iocb->ki_pos, count, ret); + /* + * if we just got an ENOSPC, flush the inode now we aren't holding any + * page locks and retry *once* + */ + if (ret == -ENOSPC && !enospc) { + ret = -xfs_flush_pages(ip, 0, -1, 0, FI_NONE); + if (ret) + return ret; + enospc = 1; + goto write_retry; } - current->backing_dev_info = NULL; + return ret; +} + +STATIC ssize_t +xfs_file_aio_write( + struct kiocb *iocb, + const struct iovec *iovp, + unsigned long nr_segs, + loff_t pos) +{ + struct file *file = iocb->ki_filp; + struct address_space *mapping = file->f_mapping; + struct inode *inode = mapping->host; + struct xfs_inode *ip = XFS_I(inode); + ssize_t ret; + int iolock; + size_t ocount = 0; + + XFS_STATS_INC(xs_write_calls); + + BUG_ON(iocb->ki_pos != pos); + + ret = generic_segment_checks(iovp, &nr_segs, &ocount, VERIFY_READ); + if (ret) + return ret; + + if (ocount == 0) + return 0; + + xfs_wait_for_freeze(ip->i_mount, SB_FREEZE_WRITE); + + if (XFS_FORCED_SHUTDOWN(ip->i_mount)) + return -EIO; + + if (unlikely(file->f_flags & O_DIRECT)) + ret = xfs_file_dio_aio_write(iocb, iovp, nr_segs, pos, + ocount, &iolock); + else + ret = xfs_file_buffered_aio_write(iocb, iovp, nr_segs, pos, + ocount, &iolock); -done_io: xfs_aio_write_isize_update(inode, &iocb->ki_pos, ret); if (ret <= 0) - goto out_unlock_internal; + goto out_unlock; /* Handle various SYNC-type writes */ if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) { @@ -877,7 +869,7 @@ done_io: ret = error2; } - out_unlock_internal: +out_unlock: xfs_aio_write_newsize_update(ip); xfs_rw_iunlock(ip, iolock); return ret; -- cgit v1.2.2 From 4d8d15812fd9bc96d0da11467d23e0373feae933 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Tue, 11 Jan 2011 10:23:42 +1100 Subject: xfs: factor common write setup code The buffered IO and direct IO write paths share a common set of checks and limiting code prior to issuing the write. Factor that into a common helper function. Signed-off-by: Dave Chinner Reviewed-by: Alex Elder Reviewed-by: Christoph Hellwig --- fs/xfs/linux-2.6/xfs_file.c | 123 ++++++++++++++++++++------------------------ 1 file changed, 56 insertions(+), 67 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c index e2bcf51d292e..5863dd8f448c 100644 --- a/fs/xfs/linux-2.6/xfs_file.c +++ b/fs/xfs/linux-2.6/xfs_file.c @@ -628,6 +628,58 @@ out_lock: return error; } +/* + * Common pre-write limit and setup checks. + * + * Returns with iolock held according to @iolock. + */ +STATIC ssize_t +xfs_file_aio_write_checks( + struct file *file, + loff_t *pos, + size_t *count, + int *iolock) +{ + struct inode *inode = file->f_mapping->host; + struct xfs_inode *ip = XFS_I(inode); + xfs_fsize_t new_size; + int error = 0; + + error = generic_write_checks(file, pos, count, S_ISBLK(inode->i_mode)); + if (error) { + xfs_rw_iunlock(ip, XFS_ILOCK_EXCL | *iolock); + *iolock = 0; + return error; + } + + new_size = *pos + *count; + if (new_size > ip->i_size) + ip->i_new_size = new_size; + + if (likely(!(file->f_mode & FMODE_NOCMTIME))) + file_update_time(file); + + /* + * If the offset is beyond the size of the file, we need to zero any + * blocks that fall between the existing EOF and the start of this + * write. + */ + if (*pos > ip->i_size) + error = -xfs_zero_eof(ip, *pos, ip->i_size); + + xfs_rw_iunlock(ip, XFS_ILOCK_EXCL); + if (error) + return error; + + /* + * If we're writing the file then make sure to clear the setuid and + * setgid bits if the process is not being run by root. This keeps + * people from modifying setuid and setgid binaries. + */ + return file_remove_suid(file); + +} + /* * xfs_file_dio_aio_write - handle direct IO writes * @@ -653,7 +705,6 @@ xfs_file_dio_aio_write( struct xfs_inode *ip = XFS_I(inode); struct xfs_mount *mp = ip->i_mount; ssize_t ret = 0; - xfs_fsize_t new_size; size_t count = ocount; struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ? mp->m_rtdev_targp : mp->m_ddev_targp; @@ -674,45 +725,8 @@ xfs_file_dio_aio_write( *iolock = XFS_IOLOCK_SHARED; xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock); - ret = generic_write_checks(file, &pos, &count, - S_ISBLK(inode->i_mode)); - if (ret) { - xfs_rw_iunlock(ip, XFS_ILOCK_EXCL | *iolock); - *iolock = 0; - return ret; - } - - new_size = pos + count; - if (new_size > ip->i_size) - ip->i_new_size = new_size; - - if (likely(!(file->f_mode & FMODE_NOCMTIME))) - file_update_time(file); - - /* - * If the offset is beyond the size of the file, we have a couple of - * things to do. First, if there is already space allocated we need to - * either create holes or zero the disk or ... - * - * If there is a page where the previous size lands, we need to zero it - * out up to the new size. - */ - if (pos > ip->i_size) { - ret = -xfs_zero_eof(ip, pos, ip->i_size); - if (ret) { - xfs_rw_iunlock(ip, XFS_ILOCK_EXCL); - return ret; - } - } - xfs_rw_iunlock(ip, XFS_ILOCK_EXCL); - - /* - * If we're writing the file then make sure to clear the setuid and - * setgid bits if the process is not being run by root. This keeps - * people from modifying setuid and setgid binaries. - */ - ret = file_remove_suid(file); - if (unlikely(ret)) + ret = xfs_file_aio_write_checks(file, &pos, &count, iolock); + if (ret) return ret; if (mapping->nrpages) { @@ -753,38 +767,13 @@ xfs_file_buffered_aio_write( struct xfs_inode *ip = XFS_I(inode); ssize_t ret; int enospc = 0; - xfs_fsize_t new_size; size_t count = ocount; *iolock = XFS_IOLOCK_EXCL; xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock); - ret = generic_write_checks(file, &pos, &count, - S_ISBLK(inode->i_mode)); - if (ret) { - xfs_rw_iunlock(ip, XFS_ILOCK_EXCL | *iolock); - *iolock = 0; - return ret; - } - - new_size = pos + count; - if (new_size > ip->i_size) - ip->i_new_size = new_size; - - if (likely(!(file->f_mode & FMODE_NOCMTIME))) - file_update_time(file); - - if (pos > ip->i_size) { - ret = -xfs_zero_eof(ip, pos, ip->i_size); - if (ret) { - xfs_rw_iunlock(ip, XFS_ILOCK_EXCL); - return ret; - } - } - xfs_rw_iunlock(ip, XFS_ILOCK_EXCL); - - ret = file_remove_suid(file); - if (unlikely(ret)) + ret = xfs_file_aio_write_checks(file, &pos, &count, iolock); + if (ret) return ret; /* We can write back this queue in page reclaim */ -- cgit v1.2.2 From eda77982729b7170bdc9e8855f0682edf322d277 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Tue, 11 Jan 2011 10:22:40 +1100 Subject: xfs: serialise unaligned direct IOs When two concurrent unaligned, non-overlapping direct IOs are issued to the same block, the direct Io layer will race to zero the block. The result is that one of the concurrent IOs will overwrite data written by the other IO with zeros. This is demonstrated by the xfsqa test 240. To avoid this problem, serialise all unaligned direct IOs to an inode with a big hammer. We need a big hammer approach as we need to serialise AIO as well, so we can't just block writes on locks. Hence, the big hammer is calling xfs_ioend_wait() while holding out other unaligned direct IOs from starting. We don't bother trying to serialised aligned vs unaligned IOs as they are overlapping IO and the result of concurrent overlapping IOs is undefined - the result of either IO is a valid result so we let them race. Hence we only penalise unaligned IO, which already has a major overhead compared to aligned IO so this isn't a major problem. Signed-off-by: Dave Chinner Reviewed-by: Alex Elder Reviewed-by: Christoph Hellwig --- fs/xfs/linux-2.6/xfs_file.c | 38 ++++++++++++++++++++++++++++---------- 1 file changed, 28 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c index 5863dd8f448c..ef51eb43e137 100644 --- a/fs/xfs/linux-2.6/xfs_file.c +++ b/fs/xfs/linux-2.6/xfs_file.c @@ -684,9 +684,24 @@ xfs_file_aio_write_checks( * xfs_file_dio_aio_write - handle direct IO writes * * Lock the inode appropriately to prepare for and issue a direct IO write. - * By spearating it from the buffered write path we remove all the tricky to + * By separating it from the buffered write path we remove all the tricky to * follow locking changes and looping. * + * If there are cached pages or we're extending the file, we need IOLOCK_EXCL + * until we're sure the bytes at the new EOF have been zeroed and/or the cached + * pages are flushed out. + * + * In most cases the direct IO writes will be done holding IOLOCK_SHARED + * allowing them to be done in parallel with reads and other direct IO writes. + * However, if the IO is not aligned to filesystem blocks, the direct IO layer + * needs to do sub-block zeroing and that requires serialisation against other + * direct IOs to the same block. In this case we need to serialise the + * submission of the unaligned IOs so that we don't get racing block zeroing in + * the dio layer. To avoid the problem with aio, we also need to wait for + * outstanding IOs to complete so that unwritten extent conversion is completed + * before we try to map the overlapping block. This is currently implemented by + * hitting it with a big hammer (i.e. xfs_ioend_wait()). + * * Returns with locks held indicated by @iolock and errors indicated by * negative return values. */ @@ -706,6 +721,7 @@ xfs_file_dio_aio_write( struct xfs_mount *mp = ip->i_mount; ssize_t ret = 0; size_t count = ocount; + int unaligned_io = 0; struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ? mp->m_rtdev_targp : mp->m_ddev_targp; @@ -713,13 +729,10 @@ xfs_file_dio_aio_write( if ((pos & target->bt_smask) || (count & target->bt_smask)) return -XFS_ERROR(EINVAL); - /* - * For direct I/O, if there are cached pages or we're extending - * the file, we need IOLOCK_EXCL until we're sure the bytes at - * the new EOF have been zeroed and/or the cached pages are - * flushed out. - */ - if (mapping->nrpages || pos > ip->i_size) + if ((pos & mp->m_blockmask) || ((pos + count) & mp->m_blockmask)) + unaligned_io = 1; + + if (unaligned_io || mapping->nrpages || pos > ip->i_size) *iolock = XFS_IOLOCK_EXCL; else *iolock = XFS_IOLOCK_SHARED; @@ -737,8 +750,13 @@ xfs_file_dio_aio_write( return ret; } - if (*iolock == XFS_IOLOCK_EXCL) { - /* demote the lock now the cached pages are gone */ + /* + * If we are doing unaligned IO, wait for all other IO to drain, + * otherwise demote the lock if we had to flush cached pages + */ + if (unaligned_io) + xfs_ioend_wait(ip); + else if (*iolock == XFS_IOLOCK_EXCL) { xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); *iolock = XFS_IOLOCK_SHARED; } -- cgit v1.2.2 From 009ca3897ea8313b4ed4da964a2f31ecf5a0624d Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Mon, 15 Nov 2010 03:04:51 +0000 Subject: fs/9p: Remove unnecessary semicolons Signed-off-by: Joe Perches Signed-off-by: Eric Van Hensbergen --- fs/9p/acl.c | 2 +- fs/9p/xattr.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/9p/acl.c b/fs/9p/acl.c index 6e58c4ca1e6e..c9da2640f6f1 100644 --- a/fs/9p/acl.c +++ b/fs/9p/acl.c @@ -28,7 +28,7 @@ static struct posix_acl *__v9fs_get_acl(struct p9_fid *fid, char *name) { ssize_t size; void *value = NULL; - struct posix_acl *acl = NULL;; + struct posix_acl *acl = NULL; size = v9fs_fid_xattr_get(fid, name, NULL, 0); if (size > 0) { diff --git a/fs/9p/xattr.c b/fs/9p/xattr.c index 43ec7df84336..d288773871b3 100644 --- a/fs/9p/xattr.c +++ b/fs/9p/xattr.c @@ -133,7 +133,7 @@ int v9fs_xattr_set(struct dentry *dentry, const char *name, "p9_client_xattrcreate failed %d\n", retval); goto error; } - msize = fid->clnt->msize;; + msize = fid->clnt->msize; while (value_len) { if (value_len > (msize - P9_IOHDRSZ)) write_count = msize - P9_IOHDRSZ; -- cgit v1.2.2 From 6f81c1157468dd0a7377249c44ae83a7fe998bc9 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Fri, 10 Dec 2010 12:19:31 +0530 Subject: fs/9p: Fix the return error on default acl removal If we don't have default ACL, then trying to remove default acl on a file should return 0. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Venkateswararao Jujjuri --- fs/9p/acl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/9p/acl.c b/fs/9p/acl.c index c9da2640f6f1..02a2cf616318 100644 --- a/fs/9p/acl.c +++ b/fs/9p/acl.c @@ -365,7 +365,7 @@ static int v9fs_xattr_set_acl(struct dentry *dentry, const char *name, case ACL_TYPE_DEFAULT: name = POSIX_ACL_XATTR_DEFAULT; if (!S_ISDIR(inode->i_mode)) { - retval = -EINVAL; + retval = acl ? -EINVAL : 0; goto err_out; } break; -- cgit v1.2.2 From 255614c45943d43a3778a04b214692346b9d5049 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sun, 2 Jan 2011 06:15:54 +0000 Subject: fs/9p: fix menu presentation Make the 9P_FS kconfig options subordinate to the 9P_FS kconfig symbol in the menu presentation instead of them all being at the same level. Signed-off-by: Randy Dunlap Signed-off-by: Eric Van Hensbergen --- fs/9p/Kconfig | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/9p/Kconfig b/fs/9p/Kconfig index 7e0511476797..814ac4e213a8 100644 --- a/fs/9p/Kconfig +++ b/fs/9p/Kconfig @@ -9,6 +9,8 @@ config 9P_FS If unsure, say N. +if 9P_FS + config 9P_FSCACHE bool "Enable 9P client caching support (EXPERIMENTAL)" depends on EXPERIMENTAL @@ -20,7 +22,6 @@ config 9P_FSCACHE config 9P_FS_POSIX_ACL bool "9P POSIX Access Control Lists" - depends on 9P_FS select FS_POSIX_ACL help POSIX Access Control Lists (ACLs) support permissions for users and @@ -30,3 +31,5 @@ config 9P_FS_POSIX_ACL Linux website . If you don't know what Access Control Lists are, say N + +endif -- cgit v1.2.2 From 53c06f4e0a4621bb40c8be6ff701e07f6226143d Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Mon, 10 Jan 2011 13:51:47 -0600 Subject: fs/9p: Move dotl inode operations into a seperate file Source Code Reorganization Signed-off-by: Aneesh Kumar K.V Signed-off-by: Venkateswararao Jujjuri Signed-off-by: Eric Van Hensbergen --- fs/9p/Makefile | 1 + fs/9p/v9fs.h | 42 ++- fs/9p/vfs_inode.c | 865 +----------------------------------------------- fs/9p/vfs_inode_dotl.c | 870 +++++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 916 insertions(+), 862 deletions(-) create mode 100644 fs/9p/vfs_inode_dotl.c (limited to 'fs') diff --git a/fs/9p/Makefile b/fs/9p/Makefile index f8ba37effd1b..ab8c12780634 100644 --- a/fs/9p/Makefile +++ b/fs/9p/Makefile @@ -3,6 +3,7 @@ obj-$(CONFIG_9P_FS) := 9p.o 9p-objs := \ vfs_super.o \ vfs_inode.o \ + vfs_inode_dotl.o \ vfs_addr.o \ vfs_file.o \ vfs_dir.o \ diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h index cb6396855e2d..c4b5d8864f0d 100644 --- a/fs/9p/v9fs.h +++ b/fs/9p/v9fs.h @@ -113,9 +113,27 @@ struct v9fs_session_info { struct p9_fid *v9fs_session_init(struct v9fs_session_info *, const char *, char *); -void v9fs_session_close(struct v9fs_session_info *v9ses); -void v9fs_session_cancel(struct v9fs_session_info *v9ses); -void v9fs_session_begin_cancel(struct v9fs_session_info *v9ses); +extern void v9fs_session_close(struct v9fs_session_info *v9ses); +extern void v9fs_session_cancel(struct v9fs_session_info *v9ses); +extern void v9fs_session_begin_cancel(struct v9fs_session_info *v9ses); +extern struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry, + struct nameidata *nameidata); +extern int v9fs_vfs_unlink(struct inode *i, struct dentry *d); +extern int v9fs_vfs_rmdir(struct inode *i, struct dentry *d); +extern int v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry, + struct inode *new_dir, struct dentry *new_dentry); +extern void v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, + void *p); +extern struct inode *v9fs_inode(struct v9fs_session_info *v9ses, + struct p9_fid *fid, + struct super_block *sb); + +extern const struct inode_operations v9fs_dir_inode_operations_dotl; +extern const struct inode_operations v9fs_file_inode_operations_dotl; +extern const struct inode_operations v9fs_symlink_inode_operations_dotl; +extern struct inode *v9fs_inode_dotl(struct v9fs_session_info *v9ses, + struct p9_fid *fid, + struct super_block *sb); /* other default globals */ #define V9FS_PORT 564 @@ -138,3 +156,21 @@ static inline int v9fs_proto_dotl(struct v9fs_session_info *v9ses) { return v9ses->flags & V9FS_PROTO_2000L; } + +/** + * v9fs_inode_from_fid - Helper routine to populate an inode by + * issuing a attribute request + * @v9ses: session information + * @fid: fid to issue attribute request for + * @sb: superblock on which to create inode + * + */ +static inline struct inode * +v9fs_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid, + struct super_block *sb) +{ + if (v9fs_proto_dotl(v9ses)) + return v9fs_inode_dotl(v9ses, fid, sb); + else + return v9fs_inode(v9ses, fid, sb); +} diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index 59782981b225..392358672483 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c @@ -49,15 +49,8 @@ static const struct inode_operations v9fs_dir_inode_operations; static const struct inode_operations v9fs_dir_inode_operations_dotu; -static const struct inode_operations v9fs_dir_inode_operations_dotl; static const struct inode_operations v9fs_file_inode_operations; -static const struct inode_operations v9fs_file_inode_operations_dotl; static const struct inode_operations v9fs_symlink_inode_operations; -static const struct inode_operations v9fs_symlink_inode_operations_dotl; - -static int -v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode, - dev_t rdev); /** * unixmode2p9mode - convert unix mode bits to plan 9 @@ -250,41 +243,6 @@ void v9fs_destroy_inode(struct inode *inode) } #endif -/** - * v9fs_get_fsgid_for_create - Helper function to get the gid for creating a - * new file system object. This checks the S_ISGID to determine the owning - * group of the new file system object. - */ - -static gid_t v9fs_get_fsgid_for_create(struct inode *dir_inode) -{ - BUG_ON(dir_inode == NULL); - - if (dir_inode->i_mode & S_ISGID) { - /* set_gid bit is set.*/ - return dir_inode->i_gid; - } - return current_fsgid(); -} - -/** - * v9fs_dentry_from_dir_inode - helper function to get the dentry from - * dir inode. - * - */ - -static struct dentry *v9fs_dentry_from_dir_inode(struct inode *inode) -{ - struct dentry *dentry; - - spin_lock(&inode->i_lock); - /* Directory should have only one entry. */ - BUG_ON(S_ISDIR(inode->i_mode) && !list_is_singular(&inode->i_dentry)); - dentry = list_entry(inode->i_dentry.next, struct dentry, d_alias); - spin_unlock(&inode->i_lock); - return dentry; -} - /** * v9fs_get_inode - helper function to setup an inode * @sb: superblock @@ -454,7 +412,7 @@ void v9fs_evict_inode(struct inode *inode) #endif } -static struct inode * +struct inode * v9fs_inode(struct v9fs_session_info *v9ses, struct p9_fid *fid, struct super_block *sb) { @@ -489,60 +447,6 @@ error: return ERR_PTR(err); } -static struct inode * -v9fs_inode_dotl(struct v9fs_session_info *v9ses, struct p9_fid *fid, - struct super_block *sb) -{ - struct inode *ret = NULL; - int err; - struct p9_stat_dotl *st; - - st = p9_client_getattr_dotl(fid, P9_STATS_BASIC); - if (IS_ERR(st)) - return ERR_CAST(st); - - ret = v9fs_get_inode(sb, st->st_mode); - if (IS_ERR(ret)) { - err = PTR_ERR(ret); - goto error; - } - - v9fs_stat2inode_dotl(st, ret); - ret->i_ino = v9fs_qid2ino(&st->qid); -#ifdef CONFIG_9P_FSCACHE - v9fs_vcookie_set_qid(ret, &st->qid); - v9fs_cache_inode_get_cookie(ret); -#endif - err = v9fs_get_acl(ret, fid); - if (err) { - iput(ret); - goto error; - } - kfree(st); - return ret; -error: - kfree(st); - return ERR_PTR(err); -} - -/** - * v9fs_inode_from_fid - Helper routine to populate an inode by - * issuing a attribute request - * @v9ses: session information - * @fid: fid to issue attribute request for - * @sb: superblock on which to create inode - * - */ -static inline struct inode * -v9fs_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid, - struct super_block *sb) -{ - if (v9fs_proto_dotl(v9ses)) - return v9fs_inode_dotl(v9ses, fid, sb); - else - return v9fs_inode(v9ses, fid, sb); -} - /** * v9fs_remove - helper function to remove files and directories * @dir: directory inode that is being deleted @@ -656,144 +560,6 @@ error: return ERR_PTR(err); } -/** - * v9fs_vfs_create_dotl - VFS hook to create files for 9P2000.L protocol. - * @dir: directory inode that is being created - * @dentry: dentry that is being deleted - * @mode: create permissions - * @nd: path information - * - */ - -static int -v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode, - struct nameidata *nd) -{ - int err = 0; - char *name = NULL; - gid_t gid; - int flags; - mode_t mode; - struct v9fs_session_info *v9ses; - struct p9_fid *fid = NULL; - struct p9_fid *dfid, *ofid; - struct file *filp; - struct p9_qid qid; - struct inode *inode; - struct posix_acl *pacl = NULL, *dacl = NULL; - - v9ses = v9fs_inode2v9ses(dir); - if (nd && nd->flags & LOOKUP_OPEN) - flags = nd->intent.open.flags - 1; - else { - /* - * create call without LOOKUP_OPEN is due - * to mknod of regular files. So use mknod - * operation. - */ - return v9fs_vfs_mknod_dotl(dir, dentry, omode, 0); - } - - name = (char *) dentry->d_name.name; - P9_DPRINTK(P9_DEBUG_VFS, "v9fs_vfs_create_dotl: name:%s flags:0x%x " - "mode:0x%x\n", name, flags, omode); - - dfid = v9fs_fid_lookup(dentry->d_parent); - if (IS_ERR(dfid)) { - err = PTR_ERR(dfid); - P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err); - return err; - } - - /* clone a fid to use for creation */ - ofid = p9_client_walk(dfid, 0, NULL, 1); - if (IS_ERR(ofid)) { - err = PTR_ERR(ofid); - P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err); - return err; - } - - gid = v9fs_get_fsgid_for_create(dir); - - mode = omode; - /* Update mode based on ACL value */ - err = v9fs_acl_mode(dir, &mode, &dacl, &pacl); - if (err) { - P9_DPRINTK(P9_DEBUG_VFS, - "Failed to get acl values in creat %d\n", err); - goto error; - } - err = p9_client_create_dotl(ofid, name, flags, mode, gid, &qid); - if (err < 0) { - P9_DPRINTK(P9_DEBUG_VFS, - "p9_client_open_dotl failed in creat %d\n", - err); - goto error; - } - /* instantiate inode and assign the unopened fid to the dentry */ - if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE || - (nd && nd->flags & LOOKUP_OPEN)) { - fid = p9_client_walk(dfid, 1, &name, 1); - if (IS_ERR(fid)) { - err = PTR_ERR(fid); - P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n", - err); - fid = NULL; - goto error; - } - - inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb); - if (IS_ERR(inode)) { - err = PTR_ERR(inode); - P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", - err); - goto error; - } - d_set_d_op(dentry, &v9fs_cached_dentry_operations); - d_instantiate(dentry, inode); - err = v9fs_fid_add(dentry, fid); - if (err < 0) - goto error; - /* The fid would get clunked via a dput */ - fid = NULL; - } else { - /* - * Not in cached mode. No need to populate - * inode with stat. We need to get an inode - * so that we can set the acl with dentry - */ - inode = v9fs_get_inode(dir->i_sb, mode); - if (IS_ERR(inode)) { - err = PTR_ERR(inode); - goto error; - } - d_set_d_op(dentry, &v9fs_dentry_operations); - d_instantiate(dentry, inode); - } - /* Now set the ACL based on the default value */ - v9fs_set_create_acl(dentry, dacl, pacl); - - /* if we are opening a file, assign the open fid to the file */ - if (nd && nd->flags & LOOKUP_OPEN) { - filp = lookup_instantiate_filp(nd, dentry, generic_file_open); - if (IS_ERR(filp)) { - p9_client_clunk(ofid); - return PTR_ERR(filp); - } - filp->private_data = ofid; - } else - p9_client_clunk(ofid); - - return 0; - -error: - if (ofid) - p9_client_clunk(ofid); - if (fid) - p9_client_clunk(fid); - return err; -} - /** * v9fs_vfs_create - VFS hook to create files * @dir: directory inode that is being created @@ -884,107 +650,6 @@ static int v9fs_vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) return err; } - -/** - * v9fs_vfs_mkdir_dotl - VFS mkdir hook to create a directory - * @dir: inode that is being unlinked - * @dentry: dentry that is being unlinked - * @mode: mode for new directory - * - */ - -static int v9fs_vfs_mkdir_dotl(struct inode *dir, - struct dentry *dentry, int omode) -{ - int err; - struct v9fs_session_info *v9ses; - struct p9_fid *fid = NULL, *dfid = NULL; - gid_t gid; - char *name; - mode_t mode; - struct inode *inode; - struct p9_qid qid; - struct dentry *dir_dentry; - struct posix_acl *dacl = NULL, *pacl = NULL; - - P9_DPRINTK(P9_DEBUG_VFS, "name %s\n", dentry->d_name.name); - err = 0; - v9ses = v9fs_inode2v9ses(dir); - - omode |= S_IFDIR; - if (dir->i_mode & S_ISGID) - omode |= S_ISGID; - - dir_dentry = v9fs_dentry_from_dir_inode(dir); - dfid = v9fs_fid_lookup(dir_dentry); - if (IS_ERR(dfid)) { - err = PTR_ERR(dfid); - P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err); - dfid = NULL; - goto error; - } - - gid = v9fs_get_fsgid_for_create(dir); - mode = omode; - /* Update mode based on ACL value */ - err = v9fs_acl_mode(dir, &mode, &dacl, &pacl); - if (err) { - P9_DPRINTK(P9_DEBUG_VFS, - "Failed to get acl values in mkdir %d\n", err); - goto error; - } - name = (char *) dentry->d_name.name; - err = p9_client_mkdir_dotl(dfid, name, mode, gid, &qid); - if (err < 0) - goto error; - - /* instantiate inode and assign the unopened fid to the dentry */ - if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) { - fid = p9_client_walk(dfid, 1, &name, 1); - if (IS_ERR(fid)) { - err = PTR_ERR(fid); - P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n", - err); - fid = NULL; - goto error; - } - - inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb); - if (IS_ERR(inode)) { - err = PTR_ERR(inode); - P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", - err); - goto error; - } - d_set_d_op(dentry, &v9fs_cached_dentry_operations); - d_instantiate(dentry, inode); - err = v9fs_fid_add(dentry, fid); - if (err < 0) - goto error; - fid = NULL; - } else { - /* - * Not in cached mode. No need to populate - * inode with stat. We need to get an inode - * so that we can set the acl with dentry - */ - inode = v9fs_get_inode(dir->i_sb, mode); - if (IS_ERR(inode)) { - err = PTR_ERR(inode); - goto error; - } - d_set_d_op(dentry, &v9fs_dentry_operations); - d_instantiate(dentry, inode); - } - /* Now set the ACL based on the default value */ - v9fs_set_create_acl(dentry, dacl, pacl); - -error: - if (fid) - p9_client_clunk(fid); - return err; -} - /** * v9fs_vfs_lookup - VFS lookup hook to "walk" to a new inode * @dir: inode that is being walked from @@ -993,7 +658,7 @@ error: * */ -static struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry, +struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nameidata) { struct super_block *sb; @@ -1063,7 +728,7 @@ error: * */ -static int v9fs_vfs_unlink(struct inode *i, struct dentry *d) +int v9fs_vfs_unlink(struct inode *i, struct dentry *d) { return v9fs_remove(i, d, 0); } @@ -1075,7 +740,7 @@ static int v9fs_vfs_unlink(struct inode *i, struct dentry *d) * */ -static int v9fs_vfs_rmdir(struct inode *i, struct dentry *d) +int v9fs_vfs_rmdir(struct inode *i, struct dentry *d) { return v9fs_remove(i, d, 1); } @@ -1089,7 +754,7 @@ static int v9fs_vfs_rmdir(struct inode *i, struct dentry *d) * */ -static int +int v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { @@ -1196,42 +861,6 @@ v9fs_vfs_getattr(struct vfsmount *mnt, struct dentry *dentry, return 0; } -static int -v9fs_vfs_getattr_dotl(struct vfsmount *mnt, struct dentry *dentry, - struct kstat *stat) -{ - int err; - struct v9fs_session_info *v9ses; - struct p9_fid *fid; - struct p9_stat_dotl *st; - - P9_DPRINTK(P9_DEBUG_VFS, "dentry: %p\n", dentry); - err = -EPERM; - v9ses = v9fs_inode2v9ses(dentry->d_inode); - if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) - return simple_getattr(mnt, dentry, stat); - - fid = v9fs_fid_lookup(dentry); - if (IS_ERR(fid)) - return PTR_ERR(fid); - - /* Ask for all the fields in stat structure. Server will return - * whatever it supports - */ - - st = p9_client_getattr_dotl(fid, P9_STATS_ALL); - if (IS_ERR(st)) - return PTR_ERR(st); - - v9fs_stat2inode_dotl(st, dentry->d_inode); - generic_fillattr(dentry->d_inode, stat); - /* Change block size to what the server returned */ - stat->blksize = st->st_blksize; - - kfree(st); - return 0; -} - /** * v9fs_vfs_setattr - set file metadata * @dentry: file whose metadata to set @@ -1290,64 +919,6 @@ static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr) return 0; } -/** - * v9fs_vfs_setattr_dotl - set file metadata - * @dentry: file whose metadata to set - * @iattr: metadata assignment structure - * - */ - -int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr) -{ - int retval; - struct v9fs_session_info *v9ses; - struct p9_fid *fid; - struct p9_iattr_dotl p9attr; - - P9_DPRINTK(P9_DEBUG_VFS, "\n"); - - retval = inode_change_ok(dentry->d_inode, iattr); - if (retval) - return retval; - - p9attr.valid = iattr->ia_valid; - p9attr.mode = iattr->ia_mode; - p9attr.uid = iattr->ia_uid; - p9attr.gid = iattr->ia_gid; - p9attr.size = iattr->ia_size; - p9attr.atime_sec = iattr->ia_atime.tv_sec; - p9attr.atime_nsec = iattr->ia_atime.tv_nsec; - p9attr.mtime_sec = iattr->ia_mtime.tv_sec; - p9attr.mtime_nsec = iattr->ia_mtime.tv_nsec; - - retval = -EPERM; - v9ses = v9fs_inode2v9ses(dentry->d_inode); - fid = v9fs_fid_lookup(dentry); - if (IS_ERR(fid)) - return PTR_ERR(fid); - - retval = p9_client_setattr(fid, &p9attr); - if (retval < 0) - return retval; - - if ((iattr->ia_valid & ATTR_SIZE) && - iattr->ia_size != i_size_read(dentry->d_inode)) { - retval = vmtruncate(dentry->d_inode, iattr->ia_size); - if (retval) - return retval; - } - - setattr_copy(dentry->d_inode, iattr); - mark_inode_dirty(dentry->d_inode); - if (iattr->ia_valid & ATTR_MODE) { - /* We also want to update ACL when we update mode bits */ - retval = v9fs_acl_chmod(dentry); - if (retval < 0) - return retval; - } - return 0; -} - /** * v9fs_stat2inode - populate an inode structure with mistat info * @stat: Plan 9 metadata (mistat) structure @@ -1425,77 +996,6 @@ v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode, inode->i_blocks = (i_size_read(inode) + 512 - 1) >> 9; } -/** - * v9fs_stat2inode_dotl - populate an inode structure with stat info - * @stat: stat structure - * @inode: inode to populate - * @sb: superblock of filesystem - * - */ - -void -v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode) -{ - - if ((stat->st_result_mask & P9_STATS_BASIC) == P9_STATS_BASIC) { - inode->i_atime.tv_sec = stat->st_atime_sec; - inode->i_atime.tv_nsec = stat->st_atime_nsec; - inode->i_mtime.tv_sec = stat->st_mtime_sec; - inode->i_mtime.tv_nsec = stat->st_mtime_nsec; - inode->i_ctime.tv_sec = stat->st_ctime_sec; - inode->i_ctime.tv_nsec = stat->st_ctime_nsec; - inode->i_uid = stat->st_uid; - inode->i_gid = stat->st_gid; - inode->i_nlink = stat->st_nlink; - inode->i_mode = stat->st_mode; - inode->i_rdev = new_decode_dev(stat->st_rdev); - - if ((S_ISBLK(inode->i_mode)) || (S_ISCHR(inode->i_mode))) - init_special_inode(inode, inode->i_mode, inode->i_rdev); - - i_size_write(inode, stat->st_size); - inode->i_blocks = stat->st_blocks; - } else { - if (stat->st_result_mask & P9_STATS_ATIME) { - inode->i_atime.tv_sec = stat->st_atime_sec; - inode->i_atime.tv_nsec = stat->st_atime_nsec; - } - if (stat->st_result_mask & P9_STATS_MTIME) { - inode->i_mtime.tv_sec = stat->st_mtime_sec; - inode->i_mtime.tv_nsec = stat->st_mtime_nsec; - } - if (stat->st_result_mask & P9_STATS_CTIME) { - inode->i_ctime.tv_sec = stat->st_ctime_sec; - inode->i_ctime.tv_nsec = stat->st_ctime_nsec; - } - if (stat->st_result_mask & P9_STATS_UID) - inode->i_uid = stat->st_uid; - if (stat->st_result_mask & P9_STATS_GID) - inode->i_gid = stat->st_gid; - if (stat->st_result_mask & P9_STATS_NLINK) - inode->i_nlink = stat->st_nlink; - if (stat->st_result_mask & P9_STATS_MODE) { - inode->i_mode = stat->st_mode; - if ((S_ISBLK(inode->i_mode)) || - (S_ISCHR(inode->i_mode))) - init_special_inode(inode, inode->i_mode, - inode->i_rdev); - } - if (stat->st_result_mask & P9_STATS_RDEV) - inode->i_rdev = new_decode_dev(stat->st_rdev); - if (stat->st_result_mask & P9_STATS_SIZE) - i_size_write(inode, stat->st_size); - if (stat->st_result_mask & P9_STATS_BLOCKS) - inode->i_blocks = stat->st_blocks; - } - if (stat->st_result_mask & P9_STATS_GEN) - inode->i_generation = stat->st_gen; - - /* Currently we don't support P9_STATS_BTIME and P9_STATS_DATA_VERSION - * because the inode structure does not have fields for them. - */ -} - /** * v9fs_qid2ino - convert qid into inode number * @qid: qid to hash @@ -1602,7 +1102,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd) * */ -static void +void v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p) { char *s = nd_get_link(nd); @@ -1645,94 +1145,6 @@ static int v9fs_vfs_mkspecial(struct inode *dir, struct dentry *dentry, return 0; } -/** - * v9fs_vfs_symlink_dotl - helper function to create symlinks - * @dir: directory inode containing symlink - * @dentry: dentry for symlink - * @symname: symlink data - * - * See Also: 9P2000.L RFC for more information - * - */ - -static int -v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry, - const char *symname) -{ - struct v9fs_session_info *v9ses; - struct p9_fid *dfid; - struct p9_fid *fid = NULL; - struct inode *inode; - struct p9_qid qid; - char *name; - int err; - gid_t gid; - - name = (char *) dentry->d_name.name; - P9_DPRINTK(P9_DEBUG_VFS, "v9fs_vfs_symlink_dotl : %lu,%s,%s\n", - dir->i_ino, name, symname); - v9ses = v9fs_inode2v9ses(dir); - - dfid = v9fs_fid_lookup(dentry->d_parent); - if (IS_ERR(dfid)) { - err = PTR_ERR(dfid); - P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err); - return err; - } - - gid = v9fs_get_fsgid_for_create(dir); - - /* Server doesn't alter fid on TSYMLINK. Hence no need to clone it. */ - err = p9_client_symlink(dfid, name, (char *)symname, gid, &qid); - - if (err < 0) { - P9_DPRINTK(P9_DEBUG_VFS, "p9_client_symlink failed %d\n", err); - goto error; - } - - if (v9ses->cache) { - /* Now walk from the parent so we can get an unopened fid. */ - fid = p9_client_walk(dfid, 1, &name, 1); - if (IS_ERR(fid)) { - err = PTR_ERR(fid); - P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n", - err); - fid = NULL; - goto error; - } - - /* instantiate inode and assign the unopened fid to dentry */ - inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb); - if (IS_ERR(inode)) { - err = PTR_ERR(inode); - P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", - err); - goto error; - } - d_set_d_op(dentry, &v9fs_cached_dentry_operations); - d_instantiate(dentry, inode); - err = v9fs_fid_add(dentry, fid); - if (err < 0) - goto error; - fid = NULL; - } else { - /* Not in cached mode. No need to populate inode with stat */ - inode = v9fs_get_inode(dir->i_sb, S_IFLNK); - if (IS_ERR(inode)) { - err = PTR_ERR(inode); - goto error; - } - d_set_d_op(dentry, &v9fs_dentry_operations); - d_instantiate(dentry, inode); - } - -error: - if (fid) - p9_client_clunk(fid); - - return err; -} - /** * v9fs_vfs_symlink - helper function to create symlinks * @dir: directory inode containing symlink @@ -1791,77 +1203,6 @@ clunk_fid: return retval; } -/** - * v9fs_vfs_link_dotl - create a hardlink for dotl - * @old_dentry: dentry for file to link to - * @dir: inode destination for new link - * @dentry: dentry for link - * - */ - -static int -v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir, - struct dentry *dentry) -{ - int err; - struct p9_fid *dfid, *oldfid; - char *name; - struct v9fs_session_info *v9ses; - struct dentry *dir_dentry; - - P9_DPRINTK(P9_DEBUG_VFS, "dir ino: %lu, old_name: %s, new_name: %s\n", - dir->i_ino, old_dentry->d_name.name, - dentry->d_name.name); - - v9ses = v9fs_inode2v9ses(dir); - dir_dentry = v9fs_dentry_from_dir_inode(dir); - dfid = v9fs_fid_lookup(dir_dentry); - if (IS_ERR(dfid)) - return PTR_ERR(dfid); - - oldfid = v9fs_fid_lookup(old_dentry); - if (IS_ERR(oldfid)) - return PTR_ERR(oldfid); - - name = (char *) dentry->d_name.name; - - err = p9_client_link(dfid, oldfid, (char *)dentry->d_name.name); - - if (err < 0) { - P9_DPRINTK(P9_DEBUG_VFS, "p9_client_link failed %d\n", err); - return err; - } - - if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) { - /* Get the latest stat info from server. */ - struct p9_fid *fid; - struct p9_stat_dotl *st; - - fid = v9fs_fid_lookup(old_dentry); - if (IS_ERR(fid)) - return PTR_ERR(fid); - - st = p9_client_getattr_dotl(fid, P9_STATS_BASIC); - if (IS_ERR(st)) - return PTR_ERR(st); - - v9fs_stat2inode_dotl(st, old_dentry->d_inode); - - kfree(st); - } else { - /* Caching disabled. No need to get upto date stat info. - * This dentry will be released immediately. So, just hold the - * inode - */ - ihold(old_dentry->d_inode); - } - - d_set_d_op(dentry, old_dentry->d_op); - d_instantiate(dentry, old_dentry->d_inode); - - return err; -} - /** * v9fs_vfs_mknod - create a special file * @dir: inode destination for new link @@ -1907,160 +1248,6 @@ v9fs_vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev) return retval; } -/** - * v9fs_vfs_mknod_dotl - create a special file - * @dir: inode destination for new link - * @dentry: dentry for file - * @mode: mode for creation - * @rdev: device associated with special file - * - */ -static int -v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode, - dev_t rdev) -{ - int err; - char *name; - mode_t mode; - struct v9fs_session_info *v9ses; - struct p9_fid *fid = NULL, *dfid = NULL; - struct inode *inode; - gid_t gid; - struct p9_qid qid; - struct dentry *dir_dentry; - struct posix_acl *dacl = NULL, *pacl = NULL; - - P9_DPRINTK(P9_DEBUG_VFS, - " %lu,%s mode: %x MAJOR: %u MINOR: %u\n", dir->i_ino, - dentry->d_name.name, omode, MAJOR(rdev), MINOR(rdev)); - - if (!new_valid_dev(rdev)) - return -EINVAL; - - v9ses = v9fs_inode2v9ses(dir); - dir_dentry = v9fs_dentry_from_dir_inode(dir); - dfid = v9fs_fid_lookup(dir_dentry); - if (IS_ERR(dfid)) { - err = PTR_ERR(dfid); - P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err); - dfid = NULL; - goto error; - } - - gid = v9fs_get_fsgid_for_create(dir); - mode = omode; - /* Update mode based on ACL value */ - err = v9fs_acl_mode(dir, &mode, &dacl, &pacl); - if (err) { - P9_DPRINTK(P9_DEBUG_VFS, - "Failed to get acl values in mknod %d\n", err); - goto error; - } - name = (char *) dentry->d_name.name; - - err = p9_client_mknod_dotl(dfid, name, mode, rdev, gid, &qid); - if (err < 0) - goto error; - - /* instantiate inode and assign the unopened fid to the dentry */ - if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) { - fid = p9_client_walk(dfid, 1, &name, 1); - if (IS_ERR(fid)) { - err = PTR_ERR(fid); - P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n", - err); - fid = NULL; - goto error; - } - - inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb); - if (IS_ERR(inode)) { - err = PTR_ERR(inode); - P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", - err); - goto error; - } - d_set_d_op(dentry, &v9fs_cached_dentry_operations); - d_instantiate(dentry, inode); - err = v9fs_fid_add(dentry, fid); - if (err < 0) - goto error; - fid = NULL; - } else { - /* - * Not in cached mode. No need to populate inode with stat. - * socket syscall returns a fd, so we need instantiate - */ - inode = v9fs_get_inode(dir->i_sb, mode); - if (IS_ERR(inode)) { - err = PTR_ERR(inode); - goto error; - } - d_set_d_op(dentry, &v9fs_dentry_operations); - d_instantiate(dentry, inode); - } - /* Now set the ACL based on the default value */ - v9fs_set_create_acl(dentry, dacl, pacl); -error: - if (fid) - p9_client_clunk(fid); - return err; -} - -static int -v9fs_vfs_readlink_dotl(struct dentry *dentry, char *buffer, int buflen) -{ - int retval; - struct p9_fid *fid; - char *target = NULL; - - P9_DPRINTK(P9_DEBUG_VFS, " %s\n", dentry->d_name.name); - retval = -EPERM; - fid = v9fs_fid_lookup(dentry); - if (IS_ERR(fid)) - return PTR_ERR(fid); - - retval = p9_client_readlink(fid, &target); - if (retval < 0) - return retval; - - strncpy(buffer, target, buflen); - P9_DPRINTK(P9_DEBUG_VFS, "%s -> %s\n", dentry->d_name.name, buffer); - - retval = strnlen(buffer, buflen); - return retval; -} - -/** - * v9fs_vfs_follow_link_dotl - follow a symlink path - * @dentry: dentry for symlink - * @nd: nameidata - * - */ - -static void * -v9fs_vfs_follow_link_dotl(struct dentry *dentry, struct nameidata *nd) -{ - int len = 0; - char *link = __getname(); - - P9_DPRINTK(P9_DEBUG_VFS, "%s n", dentry->d_name.name); - - if (!link) - link = ERR_PTR(-ENOMEM); - else { - len = v9fs_vfs_readlink_dotl(dentry, link, PATH_MAX); - if (len < 0) { - __putname(link); - link = ERR_PTR(len); - } else - link[min(len, PATH_MAX-1)] = 0; - } - nd_set_link(nd, link); - - return NULL; -} - static const struct inode_operations v9fs_dir_inode_operations_dotu = { .create = v9fs_vfs_create, .lookup = v9fs_vfs_lookup, @@ -2075,25 +1262,6 @@ static const struct inode_operations v9fs_dir_inode_operations_dotu = { .setattr = v9fs_vfs_setattr, }; -static const struct inode_operations v9fs_dir_inode_operations_dotl = { - .create = v9fs_vfs_create_dotl, - .lookup = v9fs_vfs_lookup, - .link = v9fs_vfs_link_dotl, - .symlink = v9fs_vfs_symlink_dotl, - .unlink = v9fs_vfs_unlink, - .mkdir = v9fs_vfs_mkdir_dotl, - .rmdir = v9fs_vfs_rmdir, - .mknod = v9fs_vfs_mknod_dotl, - .rename = v9fs_vfs_rename, - .getattr = v9fs_vfs_getattr_dotl, - .setattr = v9fs_vfs_setattr_dotl, - .setxattr = generic_setxattr, - .getxattr = generic_getxattr, - .removexattr = generic_removexattr, - .listxattr = v9fs_listxattr, - .check_acl = v9fs_check_acl, -}; - static const struct inode_operations v9fs_dir_inode_operations = { .create = v9fs_vfs_create, .lookup = v9fs_vfs_lookup, @@ -2111,16 +1279,6 @@ static const struct inode_operations v9fs_file_inode_operations = { .setattr = v9fs_vfs_setattr, }; -static const struct inode_operations v9fs_file_inode_operations_dotl = { - .getattr = v9fs_vfs_getattr_dotl, - .setattr = v9fs_vfs_setattr_dotl, - .setxattr = generic_setxattr, - .getxattr = generic_getxattr, - .removexattr = generic_removexattr, - .listxattr = v9fs_listxattr, - .check_acl = v9fs_check_acl, -}; - static const struct inode_operations v9fs_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = v9fs_vfs_follow_link, @@ -2129,14 +1287,3 @@ static const struct inode_operations v9fs_symlink_inode_operations = { .setattr = v9fs_vfs_setattr, }; -static const struct inode_operations v9fs_symlink_inode_operations_dotl = { - .readlink = v9fs_vfs_readlink_dotl, - .follow_link = v9fs_vfs_follow_link_dotl, - .put_link = v9fs_vfs_put_link, - .getattr = v9fs_vfs_getattr_dotl, - .setattr = v9fs_vfs_setattr_dotl, - .setxattr = generic_setxattr, - .getxattr = generic_getxattr, - .removexattr = generic_removexattr, - .listxattr = v9fs_listxattr, -}; diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c new file mode 100644 index 000000000000..daf2f0665a6f --- /dev/null +++ b/fs/9p/vfs_inode_dotl.c @@ -0,0 +1,870 @@ +/* + * linux/fs/9p/vfs_inode_dotl.c + * + * This file contains vfs inode ops for the 9P2000.L protocol. + * + * Copyright (C) 2004 by Eric Van Hensbergen + * Copyright (C) 2002 by Ron Minnich + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to: + * Free Software Foundation + * 51 Franklin Street, Fifth Floor + * Boston, MA 02111-1301 USA + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "v9fs.h" +#include "v9fs_vfs.h" +#include "fid.h" +#include "cache.h" +#include "xattr.h" +#include "acl.h" + +static int +v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode, + dev_t rdev); + +/** + * v9fs_get_fsgid_for_create - Helper function to get the gid for creating a + * new file system object. This checks the S_ISGID to determine the owning + * group of the new file system object. + */ + +static gid_t v9fs_get_fsgid_for_create(struct inode *dir_inode) +{ + BUG_ON(dir_inode == NULL); + + if (dir_inode->i_mode & S_ISGID) { + /* set_gid bit is set.*/ + return dir_inode->i_gid; + } + return current_fsgid(); +} + +/** + * v9fs_dentry_from_dir_inode - helper function to get the dentry from + * dir inode. + * + */ + +static struct dentry *v9fs_dentry_from_dir_inode(struct inode *inode) +{ + struct dentry *dentry; + + spin_lock(&inode->i_lock); + /* Directory should have only one entry. */ + BUG_ON(S_ISDIR(inode->i_mode) && !list_is_singular(&inode->i_dentry)); + dentry = list_entry(inode->i_dentry.next, struct dentry, d_alias); + spin_unlock(&inode->i_lock); + return dentry; +} + +struct inode * +v9fs_inode_dotl(struct v9fs_session_info *v9ses, struct p9_fid *fid, + struct super_block *sb) +{ + struct inode *ret = NULL; + int err; + struct p9_stat_dotl *st; + + st = p9_client_getattr_dotl(fid, P9_STATS_BASIC); + if (IS_ERR(st)) + return ERR_CAST(st); + + ret = v9fs_get_inode(sb, st->st_mode); + if (IS_ERR(ret)) { + err = PTR_ERR(ret); + goto error; + } + + v9fs_stat2inode_dotl(st, ret); + ret->i_ino = v9fs_qid2ino(&st->qid); +#ifdef CONFIG_9P_FSCACHE + v9fs_vcookie_set_qid(ret, &st->qid); + v9fs_cache_inode_get_cookie(ret); +#endif + err = v9fs_get_acl(ret, fid); + if (err) { + iput(ret); + goto error; + } + kfree(st); + return ret; +error: + kfree(st); + return ERR_PTR(err); +} + +/** + * v9fs_vfs_create_dotl - VFS hook to create files for 9P2000.L protocol. + * @dir: directory inode that is being created + * @dentry: dentry that is being deleted + * @mode: create permissions + * @nd: path information + * + */ + +static int +v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode, + struct nameidata *nd) +{ + int err = 0; + char *name = NULL; + gid_t gid; + int flags; + mode_t mode; + struct v9fs_session_info *v9ses; + struct p9_fid *fid = NULL; + struct p9_fid *dfid, *ofid; + struct file *filp; + struct p9_qid qid; + struct inode *inode; + struct posix_acl *pacl = NULL, *dacl = NULL; + + v9ses = v9fs_inode2v9ses(dir); + if (nd && nd->flags & LOOKUP_OPEN) + flags = nd->intent.open.flags - 1; + else { + /* + * create call without LOOKUP_OPEN is due + * to mknod of regular files. So use mknod + * operation. + */ + return v9fs_vfs_mknod_dotl(dir, dentry, omode, 0); + } + + name = (char *) dentry->d_name.name; + P9_DPRINTK(P9_DEBUG_VFS, "v9fs_vfs_create_dotl: name:%s flags:0x%x " + "mode:0x%x\n", name, flags, omode); + + dfid = v9fs_fid_lookup(dentry->d_parent); + if (IS_ERR(dfid)) { + err = PTR_ERR(dfid); + P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err); + return err; + } + + /* clone a fid to use for creation */ + ofid = p9_client_walk(dfid, 0, NULL, 1); + if (IS_ERR(ofid)) { + err = PTR_ERR(ofid); + P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err); + return err; + } + + gid = v9fs_get_fsgid_for_create(dir); + + mode = omode; + /* Update mode based on ACL value */ + err = v9fs_acl_mode(dir, &mode, &dacl, &pacl); + if (err) { + P9_DPRINTK(P9_DEBUG_VFS, + "Failed to get acl values in creat %d\n", err); + goto error; + } + err = p9_client_create_dotl(ofid, name, flags, mode, gid, &qid); + if (err < 0) { + P9_DPRINTK(P9_DEBUG_VFS, + "p9_client_open_dotl failed in creat %d\n", + err); + goto error; + } + /* instantiate inode and assign the unopened fid to the dentry */ + if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE || + (nd && nd->flags & LOOKUP_OPEN)) { + fid = p9_client_walk(dfid, 1, &name, 1); + if (IS_ERR(fid)) { + err = PTR_ERR(fid); + P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n", + err); + fid = NULL; + goto error; + } + + inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb); + if (IS_ERR(inode)) { + err = PTR_ERR(inode); + P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", + err); + goto error; + } + d_set_d_op(dentry, &v9fs_cached_dentry_operations); + d_instantiate(dentry, inode); + err = v9fs_fid_add(dentry, fid); + if (err < 0) + goto error; + /* The fid would get clunked via a dput */ + fid = NULL; + } else { + /* + * Not in cached mode. No need to populate + * inode with stat. We need to get an inode + * so that we can set the acl with dentry + */ + inode = v9fs_get_inode(dir->i_sb, mode); + if (IS_ERR(inode)) { + err = PTR_ERR(inode); + goto error; + } + d_set_d_op(dentry, &v9fs_dentry_operations); + d_instantiate(dentry, inode); + } + /* Now set the ACL based on the default value */ + v9fs_set_create_acl(dentry, dacl, pacl); + + /* if we are opening a file, assign the open fid to the file */ + if (nd && nd->flags & LOOKUP_OPEN) { + filp = lookup_instantiate_filp(nd, dentry, generic_file_open); + if (IS_ERR(filp)) { + p9_client_clunk(ofid); + return PTR_ERR(filp); + } + filp->private_data = ofid; + } else + p9_client_clunk(ofid); + + return 0; + +error: + if (ofid) + p9_client_clunk(ofid); + if (fid) + p9_client_clunk(fid); + return err; +} + +/** + * v9fs_vfs_mkdir_dotl - VFS mkdir hook to create a directory + * @dir: inode that is being unlinked + * @dentry: dentry that is being unlinked + * @mode: mode for new directory + * + */ + +static int v9fs_vfs_mkdir_dotl(struct inode *dir, + struct dentry *dentry, int omode) +{ + int err; + struct v9fs_session_info *v9ses; + struct p9_fid *fid = NULL, *dfid = NULL; + gid_t gid; + char *name; + mode_t mode; + struct inode *inode; + struct p9_qid qid; + struct dentry *dir_dentry; + struct posix_acl *dacl = NULL, *pacl = NULL; + + P9_DPRINTK(P9_DEBUG_VFS, "name %s\n", dentry->d_name.name); + err = 0; + v9ses = v9fs_inode2v9ses(dir); + + omode |= S_IFDIR; + if (dir->i_mode & S_ISGID) + omode |= S_ISGID; + + dir_dentry = v9fs_dentry_from_dir_inode(dir); + dfid = v9fs_fid_lookup(dir_dentry); + if (IS_ERR(dfid)) { + err = PTR_ERR(dfid); + P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err); + dfid = NULL; + goto error; + } + + gid = v9fs_get_fsgid_for_create(dir); + mode = omode; + /* Update mode based on ACL value */ + err = v9fs_acl_mode(dir, &mode, &dacl, &pacl); + if (err) { + P9_DPRINTK(P9_DEBUG_VFS, + "Failed to get acl values in mkdir %d\n", err); + goto error; + } + name = (char *) dentry->d_name.name; + err = p9_client_mkdir_dotl(dfid, name, mode, gid, &qid); + if (err < 0) + goto error; + + /* instantiate inode and assign the unopened fid to the dentry */ + if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) { + fid = p9_client_walk(dfid, 1, &name, 1); + if (IS_ERR(fid)) { + err = PTR_ERR(fid); + P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n", + err); + fid = NULL; + goto error; + } + + inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb); + if (IS_ERR(inode)) { + err = PTR_ERR(inode); + P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", + err); + goto error; + } + d_set_d_op(dentry, &v9fs_cached_dentry_operations); + d_instantiate(dentry, inode); + err = v9fs_fid_add(dentry, fid); + if (err < 0) + goto error; + fid = NULL; + } else { + /* + * Not in cached mode. No need to populate + * inode with stat. We need to get an inode + * so that we can set the acl with dentry + */ + inode = v9fs_get_inode(dir->i_sb, mode); + if (IS_ERR(inode)) { + err = PTR_ERR(inode); + goto error; + } + d_set_d_op(dentry, &v9fs_dentry_operations); + d_instantiate(dentry, inode); + } + /* Now set the ACL based on the default value */ + v9fs_set_create_acl(dentry, dacl, pacl); + +error: + if (fid) + p9_client_clunk(fid); + return err; +} + +static int +v9fs_vfs_getattr_dotl(struct vfsmount *mnt, struct dentry *dentry, + struct kstat *stat) +{ + int err; + struct v9fs_session_info *v9ses; + struct p9_fid *fid; + struct p9_stat_dotl *st; + + P9_DPRINTK(P9_DEBUG_VFS, "dentry: %p\n", dentry); + err = -EPERM; + v9ses = v9fs_inode2v9ses(dentry->d_inode); + if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) + return simple_getattr(mnt, dentry, stat); + + fid = v9fs_fid_lookup(dentry); + if (IS_ERR(fid)) + return PTR_ERR(fid); + + /* Ask for all the fields in stat structure. Server will return + * whatever it supports + */ + + st = p9_client_getattr_dotl(fid, P9_STATS_ALL); + if (IS_ERR(st)) + return PTR_ERR(st); + + v9fs_stat2inode_dotl(st, dentry->d_inode); + generic_fillattr(dentry->d_inode, stat); + /* Change block size to what the server returned */ + stat->blksize = st->st_blksize; + + kfree(st); + return 0; +} + +/** + * v9fs_vfs_setattr_dotl - set file metadata + * @dentry: file whose metadata to set + * @iattr: metadata assignment structure + * + */ + +int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr) +{ + int retval; + struct v9fs_session_info *v9ses; + struct p9_fid *fid; + struct p9_iattr_dotl p9attr; + + P9_DPRINTK(P9_DEBUG_VFS, "\n"); + + retval = inode_change_ok(dentry->d_inode, iattr); + if (retval) + return retval; + + p9attr.valid = iattr->ia_valid; + p9attr.mode = iattr->ia_mode; + p9attr.uid = iattr->ia_uid; + p9attr.gid = iattr->ia_gid; + p9attr.size = iattr->ia_size; + p9attr.atime_sec = iattr->ia_atime.tv_sec; + p9attr.atime_nsec = iattr->ia_atime.tv_nsec; + p9attr.mtime_sec = iattr->ia_mtime.tv_sec; + p9attr.mtime_nsec = iattr->ia_mtime.tv_nsec; + + retval = -EPERM; + v9ses = v9fs_inode2v9ses(dentry->d_inode); + fid = v9fs_fid_lookup(dentry); + if (IS_ERR(fid)) + return PTR_ERR(fid); + + retval = p9_client_setattr(fid, &p9attr); + if (retval < 0) + return retval; + + if ((iattr->ia_valid & ATTR_SIZE) && + iattr->ia_size != i_size_read(dentry->d_inode)) { + retval = vmtruncate(dentry->d_inode, iattr->ia_size); + if (retval) + return retval; + } + + setattr_copy(dentry->d_inode, iattr); + mark_inode_dirty(dentry->d_inode); + if (iattr->ia_valid & ATTR_MODE) { + /* We also want to update ACL when we update mode bits */ + retval = v9fs_acl_chmod(dentry); + if (retval < 0) + return retval; + } + return 0; +} + +/** + * v9fs_stat2inode_dotl - populate an inode structure with stat info + * @stat: stat structure + * @inode: inode to populate + * @sb: superblock of filesystem + * + */ + +void +v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode) +{ + + if ((stat->st_result_mask & P9_STATS_BASIC) == P9_STATS_BASIC) { + inode->i_atime.tv_sec = stat->st_atime_sec; + inode->i_atime.tv_nsec = stat->st_atime_nsec; + inode->i_mtime.tv_sec = stat->st_mtime_sec; + inode->i_mtime.tv_nsec = stat->st_mtime_nsec; + inode->i_ctime.tv_sec = stat->st_ctime_sec; + inode->i_ctime.tv_nsec = stat->st_ctime_nsec; + inode->i_uid = stat->st_uid; + inode->i_gid = stat->st_gid; + inode->i_nlink = stat->st_nlink; + inode->i_mode = stat->st_mode; + inode->i_rdev = new_decode_dev(stat->st_rdev); + + if ((S_ISBLK(inode->i_mode)) || (S_ISCHR(inode->i_mode))) + init_special_inode(inode, inode->i_mode, inode->i_rdev); + + i_size_write(inode, stat->st_size); + inode->i_blocks = stat->st_blocks; + } else { + if (stat->st_result_mask & P9_STATS_ATIME) { + inode->i_atime.tv_sec = stat->st_atime_sec; + inode->i_atime.tv_nsec = stat->st_atime_nsec; + } + if (stat->st_result_mask & P9_STATS_MTIME) { + inode->i_mtime.tv_sec = stat->st_mtime_sec; + inode->i_mtime.tv_nsec = stat->st_mtime_nsec; + } + if (stat->st_result_mask & P9_STATS_CTIME) { + inode->i_ctime.tv_sec = stat->st_ctime_sec; + inode->i_ctime.tv_nsec = stat->st_ctime_nsec; + } + if (stat->st_result_mask & P9_STATS_UID) + inode->i_uid = stat->st_uid; + if (stat->st_result_mask & P9_STATS_GID) + inode->i_gid = stat->st_gid; + if (stat->st_result_mask & P9_STATS_NLINK) + inode->i_nlink = stat->st_nlink; + if (stat->st_result_mask & P9_STATS_MODE) { + inode->i_mode = stat->st_mode; + if ((S_ISBLK(inode->i_mode)) || + (S_ISCHR(inode->i_mode))) + init_special_inode(inode, inode->i_mode, + inode->i_rdev); + } + if (stat->st_result_mask & P9_STATS_RDEV) + inode->i_rdev = new_decode_dev(stat->st_rdev); + if (stat->st_result_mask & P9_STATS_SIZE) + i_size_write(inode, stat->st_size); + if (stat->st_result_mask & P9_STATS_BLOCKS) + inode->i_blocks = stat->st_blocks; + } + if (stat->st_result_mask & P9_STATS_GEN) + inode->i_generation = stat->st_gen; + + /* Currently we don't support P9_STATS_BTIME and P9_STATS_DATA_VERSION + * because the inode structure does not have fields for them. + */ +} + +static int +v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry, + const char *symname) +{ + struct v9fs_session_info *v9ses; + struct p9_fid *dfid; + struct p9_fid *fid = NULL; + struct inode *inode; + struct p9_qid qid; + char *name; + int err; + gid_t gid; + + name = (char *) dentry->d_name.name; + P9_DPRINTK(P9_DEBUG_VFS, "v9fs_vfs_symlink_dotl : %lu,%s,%s\n", + dir->i_ino, name, symname); + v9ses = v9fs_inode2v9ses(dir); + + dfid = v9fs_fid_lookup(dentry->d_parent); + if (IS_ERR(dfid)) { + err = PTR_ERR(dfid); + P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err); + return err; + } + + gid = v9fs_get_fsgid_for_create(dir); + + /* Server doesn't alter fid on TSYMLINK. Hence no need to clone it. */ + err = p9_client_symlink(dfid, name, (char *)symname, gid, &qid); + + if (err < 0) { + P9_DPRINTK(P9_DEBUG_VFS, "p9_client_symlink failed %d\n", err); + goto error; + } + + if (v9ses->cache) { + /* Now walk from the parent so we can get an unopened fid. */ + fid = p9_client_walk(dfid, 1, &name, 1); + if (IS_ERR(fid)) { + err = PTR_ERR(fid); + P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n", + err); + fid = NULL; + goto error; + } + + /* instantiate inode and assign the unopened fid to dentry */ + inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb); + if (IS_ERR(inode)) { + err = PTR_ERR(inode); + P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", + err); + goto error; + } + d_set_d_op(dentry, &v9fs_cached_dentry_operations); + d_instantiate(dentry, inode); + err = v9fs_fid_add(dentry, fid); + if (err < 0) + goto error; + fid = NULL; + } else { + /* Not in cached mode. No need to populate inode with stat */ + inode = v9fs_get_inode(dir->i_sb, S_IFLNK); + if (IS_ERR(inode)) { + err = PTR_ERR(inode); + goto error; + } + d_set_d_op(dentry, &v9fs_dentry_operations); + d_instantiate(dentry, inode); + } + +error: + if (fid) + p9_client_clunk(fid); + + return err; +} + +/** + * v9fs_vfs_link_dotl - create a hardlink for dotl + * @old_dentry: dentry for file to link to + * @dir: inode destination for new link + * @dentry: dentry for link + * + */ + +static int +v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir, + struct dentry *dentry) +{ + int err; + struct p9_fid *dfid, *oldfid; + char *name; + struct v9fs_session_info *v9ses; + struct dentry *dir_dentry; + + P9_DPRINTK(P9_DEBUG_VFS, "dir ino: %lu, old_name: %s, new_name: %s\n", + dir->i_ino, old_dentry->d_name.name, + dentry->d_name.name); + + v9ses = v9fs_inode2v9ses(dir); + dir_dentry = v9fs_dentry_from_dir_inode(dir); + dfid = v9fs_fid_lookup(dir_dentry); + if (IS_ERR(dfid)) + return PTR_ERR(dfid); + + oldfid = v9fs_fid_lookup(old_dentry); + if (IS_ERR(oldfid)) + return PTR_ERR(oldfid); + + name = (char *) dentry->d_name.name; + + err = p9_client_link(dfid, oldfid, (char *)dentry->d_name.name); + + if (err < 0) { + P9_DPRINTK(P9_DEBUG_VFS, "p9_client_link failed %d\n", err); + return err; + } + + if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) { + /* Get the latest stat info from server. */ + struct p9_fid *fid; + struct p9_stat_dotl *st; + + fid = v9fs_fid_lookup(old_dentry); + if (IS_ERR(fid)) + return PTR_ERR(fid); + + st = p9_client_getattr_dotl(fid, P9_STATS_BASIC); + if (IS_ERR(st)) + return PTR_ERR(st); + + v9fs_stat2inode_dotl(st, old_dentry->d_inode); + + kfree(st); + } else { + /* Caching disabled. No need to get upto date stat info. + * This dentry will be released immediately. So, just hold the + * inode + */ + ihold(old_dentry->d_inode); + } + + d_set_d_op(dentry, old_dentry->d_op); + d_instantiate(dentry, old_dentry->d_inode); + + return err; +} + +/** + * v9fs_vfs_mknod_dotl - create a special file + * @dir: inode destination for new link + * @dentry: dentry for file + * @mode: mode for creation + * @rdev: device associated with special file + * + */ +static int +v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode, + dev_t rdev) +{ + int err; + char *name; + mode_t mode; + struct v9fs_session_info *v9ses; + struct p9_fid *fid = NULL, *dfid = NULL; + struct inode *inode; + gid_t gid; + struct p9_qid qid; + struct dentry *dir_dentry; + struct posix_acl *dacl = NULL, *pacl = NULL; + + P9_DPRINTK(P9_DEBUG_VFS, + " %lu,%s mode: %x MAJOR: %u MINOR: %u\n", dir->i_ino, + dentry->d_name.name, omode, MAJOR(rdev), MINOR(rdev)); + + if (!new_valid_dev(rdev)) + return -EINVAL; + + v9ses = v9fs_inode2v9ses(dir); + dir_dentry = v9fs_dentry_from_dir_inode(dir); + dfid = v9fs_fid_lookup(dir_dentry); + if (IS_ERR(dfid)) { + err = PTR_ERR(dfid); + P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err); + dfid = NULL; + goto error; + } + + gid = v9fs_get_fsgid_for_create(dir); + mode = omode; + /* Update mode based on ACL value */ + err = v9fs_acl_mode(dir, &mode, &dacl, &pacl); + if (err) { + P9_DPRINTK(P9_DEBUG_VFS, + "Failed to get acl values in mknod %d\n", err); + goto error; + } + name = (char *) dentry->d_name.name; + + err = p9_client_mknod_dotl(dfid, name, mode, rdev, gid, &qid); + if (err < 0) + goto error; + + /* instantiate inode and assign the unopened fid to the dentry */ + if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) { + fid = p9_client_walk(dfid, 1, &name, 1); + if (IS_ERR(fid)) { + err = PTR_ERR(fid); + P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n", + err); + fid = NULL; + goto error; + } + + inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb); + if (IS_ERR(inode)) { + err = PTR_ERR(inode); + P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", + err); + goto error; + } + d_set_d_op(dentry, &v9fs_cached_dentry_operations); + d_instantiate(dentry, inode); + err = v9fs_fid_add(dentry, fid); + if (err < 0) + goto error; + fid = NULL; + } else { + /* + * Not in cached mode. No need to populate inode with stat. + * socket syscall returns a fd, so we need instantiate + */ + inode = v9fs_get_inode(dir->i_sb, mode); + if (IS_ERR(inode)) { + err = PTR_ERR(inode); + goto error; + } + d_set_d_op(dentry, &v9fs_dentry_operations); + d_instantiate(dentry, inode); + } + /* Now set the ACL based on the default value */ + v9fs_set_create_acl(dentry, dacl, pacl); +error: + if (fid) + p9_client_clunk(fid); + return err; +} + +static int +v9fs_vfs_readlink_dotl(struct dentry *dentry, char *buffer, int buflen) +{ + int retval; + struct p9_fid *fid; + char *target = NULL; + + P9_DPRINTK(P9_DEBUG_VFS, " %s\n", dentry->d_name.name); + retval = -EPERM; + fid = v9fs_fid_lookup(dentry); + if (IS_ERR(fid)) + return PTR_ERR(fid); + + retval = p9_client_readlink(fid, &target); + if (retval < 0) + return retval; + + strncpy(buffer, target, buflen); + P9_DPRINTK(P9_DEBUG_VFS, "%s -> %s\n", dentry->d_name.name, buffer); + + retval = strnlen(buffer, buflen); + return retval; +} + +/** + * v9fs_vfs_follow_link_dotl - follow a symlink path + * @dentry: dentry for symlink + * @nd: nameidata + * + */ + +static void * +v9fs_vfs_follow_link_dotl(struct dentry *dentry, struct nameidata *nd) +{ + int len = 0; + char *link = __getname(); + + P9_DPRINTK(P9_DEBUG_VFS, "%s n", dentry->d_name.name); + + if (!link) + link = ERR_PTR(-ENOMEM); + else { + len = v9fs_vfs_readlink_dotl(dentry, link, PATH_MAX); + if (len < 0) { + __putname(link); + link = ERR_PTR(len); + } else + link[min(len, PATH_MAX-1)] = 0; + } + nd_set_link(nd, link); + + return NULL; +} + +const struct inode_operations v9fs_dir_inode_operations_dotl = { + .create = v9fs_vfs_create_dotl, + .lookup = v9fs_vfs_lookup, + .link = v9fs_vfs_link_dotl, + .symlink = v9fs_vfs_symlink_dotl, + .unlink = v9fs_vfs_unlink, + .mkdir = v9fs_vfs_mkdir_dotl, + .rmdir = v9fs_vfs_rmdir, + .mknod = v9fs_vfs_mknod_dotl, + .rename = v9fs_vfs_rename, + .getattr = v9fs_vfs_getattr_dotl, + .setattr = v9fs_vfs_setattr_dotl, + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, + .removexattr = generic_removexattr, + .listxattr = v9fs_listxattr, + .check_acl = v9fs_check_acl, +}; + +const struct inode_operations v9fs_file_inode_operations_dotl = { + .getattr = v9fs_vfs_getattr_dotl, + .setattr = v9fs_vfs_setattr_dotl, + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, + .removexattr = generic_removexattr, + .listxattr = v9fs_listxattr, + .check_acl = v9fs_check_acl, +}; + +const struct inode_operations v9fs_symlink_inode_operations_dotl = { + .readlink = v9fs_vfs_readlink_dotl, + .follow_link = v9fs_vfs_follow_link_dotl, + .put_link = v9fs_vfs_put_link, + .getattr = v9fs_vfs_getattr_dotl, + .setattr = v9fs_vfs_setattr_dotl, + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, + .removexattr = generic_removexattr, + .listxattr = v9fs_listxattr, +}; -- cgit v1.2.2 From af7542fc8ac678ce69dbd5c9643c52897b47c66f Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Mon, 10 Jan 2011 14:22:21 -0600 Subject: fs/9p: Simplify the .L create operation Signed-off-by: Aneesh Kumar K.V Signed-off-by: Venkateswararao Jujjuri Signed-off-by: Eric Van Hensbergen --- fs/9p/vfs_inode_dotl.c | 75 +++++++++++++++++++------------------------------- 1 file changed, 28 insertions(+), 47 deletions(-) (limited to 'fs') diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c index daf2f0665a6f..b6f3977545f7 100644 --- a/fs/9p/vfs_inode_dotl.c +++ b/fs/9p/vfs_inode_dotl.c @@ -196,60 +196,41 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode, err); goto error; } - /* instantiate inode and assign the unopened fid to the dentry */ - if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE || - (nd && nd->flags & LOOKUP_OPEN)) { - fid = p9_client_walk(dfid, 1, &name, 1); - if (IS_ERR(fid)) { - err = PTR_ERR(fid); - P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n", - err); - fid = NULL; - goto error; - } - inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb); - if (IS_ERR(inode)) { - err = PTR_ERR(inode); - P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", - err); - goto error; - } - d_set_d_op(dentry, &v9fs_cached_dentry_operations); - d_instantiate(dentry, inode); - err = v9fs_fid_add(dentry, fid); - if (err < 0) - goto error; - /* The fid would get clunked via a dput */ + /* instantiate inode and assign the unopened fid to the dentry */ + fid = p9_client_walk(dfid, 1, &name, 1); + if (IS_ERR(fid)) { + err = PTR_ERR(fid); + P9_DPRINTK(P9_DEBUG_VFS, "p9_clinet_walk failed %d\n", err); fid = NULL; - } else { - /* - * Not in cached mode. No need to populate - * inode with stat. We need to get an inode - * so that we can set the acl with dentry - */ - inode = v9fs_get_inode(dir->i_sb, mode); - if (IS_ERR(inode)) { - err = PTR_ERR(inode); - goto error; - } - d_set_d_op(dentry, &v9fs_dentry_operations); - d_instantiate(dentry, inode); + goto error; + } + inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb); + if (IS_ERR(inode)) { + err = PTR_ERR(inode); + P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err); + goto error; } + if (v9ses->cache) + dentry->d_op = &v9fs_cached_dentry_operations; + else + dentry->d_op = &v9fs_dentry_operations; + + d_instantiate(dentry, inode); + err = v9fs_fid_add(dentry, fid); + if (err < 0) + goto error; + /* Now set the ACL based on the default value */ v9fs_set_create_acl(dentry, dacl, pacl); - /* if we are opening a file, assign the open fid to the file */ - if (nd && nd->flags & LOOKUP_OPEN) { - filp = lookup_instantiate_filp(nd, dentry, generic_file_open); - if (IS_ERR(filp)) { - p9_client_clunk(ofid); - return PTR_ERR(filp); - } - filp->private_data = ofid; - } else + /* Since we are opening a file, assign the open fid to the file */ + filp = lookup_instantiate_filp(nd, dentry, generic_file_open); + if (IS_ERR(filp)) { p9_client_clunk(ofid); - + return PTR_ERR(filp); + } + filp->private_data = ofid; return 0; error: -- cgit v1.2.2 From 31b6ceac497954c160c61f07e76b891b1cf53c90 Mon Sep 17 00:00:00 2001 From: "M. Mohan Kumar" Date: Sat, 8 Jan 2011 07:28:46 +0530 Subject: fs/9p: TREADLINK bugfix Remove v9fs_vfs_readlink_dotl function and use generic_readlink. Update v9fs_vfs_follow_link_dotl function to accommodate this change Signed-off-by: M. Mohan Kumar Reported-by: Dr. David Alan Gilbert Signed-off-by: Venkateswararao Jujjuri Signed-off-by: Eric Van Hensbergen --- fs/9p/vfs_inode_dotl.c | 58 +++++++++++++++++++------------------------------- 1 file changed, 22 insertions(+), 36 deletions(-) (limited to 'fs') diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c index b6f3977545f7..b7f8dcbabdb2 100644 --- a/fs/9p/vfs_inode_dotl.c +++ b/fs/9p/vfs_inode_dotl.c @@ -755,30 +755,6 @@ error: return err; } -static int -v9fs_vfs_readlink_dotl(struct dentry *dentry, char *buffer, int buflen) -{ - int retval; - struct p9_fid *fid; - char *target = NULL; - - P9_DPRINTK(P9_DEBUG_VFS, " %s\n", dentry->d_name.name); - retval = -EPERM; - fid = v9fs_fid_lookup(dentry); - if (IS_ERR(fid)) - return PTR_ERR(fid); - - retval = p9_client_readlink(fid, &target); - if (retval < 0) - return retval; - - strncpy(buffer, target, buflen); - P9_DPRINTK(P9_DEBUG_VFS, "%s -> %s\n", dentry->d_name.name, buffer); - - retval = strnlen(buffer, buflen); - return retval; -} - /** * v9fs_vfs_follow_link_dotl - follow a symlink path * @dentry: dentry for symlink @@ -789,23 +765,33 @@ v9fs_vfs_readlink_dotl(struct dentry *dentry, char *buffer, int buflen) static void * v9fs_vfs_follow_link_dotl(struct dentry *dentry, struct nameidata *nd) { - int len = 0; + int retval; + struct p9_fid *fid; char *link = __getname(); + char *target; - P9_DPRINTK(P9_DEBUG_VFS, "%s n", dentry->d_name.name); + P9_DPRINTK(P9_DEBUG_VFS, "%s\n", dentry->d_name.name); - if (!link) + if (!link) { link = ERR_PTR(-ENOMEM); - else { - len = v9fs_vfs_readlink_dotl(dentry, link, PATH_MAX); - if (len < 0) { - __putname(link); - link = ERR_PTR(len); - } else - link[min(len, PATH_MAX-1)] = 0; + goto ndset; } + fid = v9fs_fid_lookup(dentry); + if (IS_ERR(fid)) { + __putname(link); + link = ERR_PTR(PTR_ERR(fid)); + goto ndset; + } + retval = p9_client_readlink(fid, &target); + if (!retval) { + strcpy(link, target); + kfree(target); + goto ndset; + } + __putname(link); + link = ERR_PTR(retval); +ndset: nd_set_link(nd, link); - return NULL; } @@ -839,7 +825,7 @@ const struct inode_operations v9fs_file_inode_operations_dotl = { }; const struct inode_operations v9fs_symlink_inode_operations_dotl = { - .readlink = v9fs_vfs_readlink_dotl, + .readlink = generic_readlink, .follow_link = v9fs_vfs_follow_link_dotl, .put_link = v9fs_vfs_put_link, .getattr = v9fs_vfs_getattr_dotl, -- cgit v1.2.2 From c25a61f542ccb81e74d3f822992f3d74392f386d Mon Sep 17 00:00:00 2001 From: Eric Van Hensbergen Date: Tue, 11 Jan 2011 09:49:03 -0600 Subject: fs/9p: fix spelling typo introduced a typo somehow during a hand merge Reported by: Aneesh Kumar K.V Signed-off-by: Eric Van Hensbergen --- fs/9p/vfs_inode_dotl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c index b7f8dcbabdb2..ee3ae9b5afdd 100644 --- a/fs/9p/vfs_inode_dotl.c +++ b/fs/9p/vfs_inode_dotl.c @@ -201,7 +201,7 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode, fid = p9_client_walk(dfid, 1, &name, 1); if (IS_ERR(fid)) { err = PTR_ERR(fid); - P9_DPRINTK(P9_DEBUG_VFS, "p9_clinet_walk failed %d\n", err); + P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err); fid = NULL; goto error; } -- cgit v1.2.2 From b8b80cf37c7f0e32729262f805bc0fa81c3e9d12 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Tue, 11 Jan 2011 08:14:47 +0000 Subject: fs/9p: Don't set dentry->d_op in create routines We do set dentry->d_op in lookup even in case of EOENT entries. That implies we should have dentry->d_op already set when create/mkdir/mknod/link/symlink routines are called Signed-off-by: Aneesh Kumar K.V Signed-off-by: Eric Van Hensbergen --- fs/9p/vfs_inode.c | 6 ------ fs/9p/vfs_inode_dotl.c | 13 ------------- 2 files changed, 19 deletions(-) (limited to 'fs') diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index 392358672483..5076eeb95502 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c @@ -537,12 +537,6 @@ v9fs_create(struct v9fs_session_info *v9ses, struct inode *dir, P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err); goto error; } - - if (v9ses->cache) - d_set_d_op(dentry, &v9fs_cached_dentry_operations); - else - d_set_d_op(dentry, &v9fs_dentry_operations); - d_instantiate(dentry, inode); err = v9fs_fid_add(dentry, fid); if (err < 0) diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c index ee3ae9b5afdd..fe3ffa9aace4 100644 --- a/fs/9p/vfs_inode_dotl.c +++ b/fs/9p/vfs_inode_dotl.c @@ -211,11 +211,6 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode, P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err); goto error; } - if (v9ses->cache) - dentry->d_op = &v9fs_cached_dentry_operations; - else - dentry->d_op = &v9fs_dentry_operations; - d_instantiate(dentry, inode); err = v9fs_fid_add(dentry, fid); if (err < 0) @@ -312,7 +307,6 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir, err); goto error; } - d_set_d_op(dentry, &v9fs_cached_dentry_operations); d_instantiate(dentry, inode); err = v9fs_fid_add(dentry, fid); if (err < 0) @@ -329,7 +323,6 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir, err = PTR_ERR(inode); goto error; } - d_set_d_op(dentry, &v9fs_dentry_operations); d_instantiate(dentry, inode); } /* Now set the ACL based on the default value */ @@ -560,7 +553,6 @@ v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry, err); goto error; } - d_set_d_op(dentry, &v9fs_cached_dentry_operations); d_instantiate(dentry, inode); err = v9fs_fid_add(dentry, fid); if (err < 0) @@ -573,7 +565,6 @@ v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry, err = PTR_ERR(inode); goto error; } - d_set_d_op(dentry, &v9fs_dentry_operations); d_instantiate(dentry, inode); } @@ -648,8 +639,6 @@ v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir, */ ihold(old_dentry->d_inode); } - - d_set_d_op(dentry, old_dentry->d_op); d_instantiate(dentry, old_dentry->d_inode); return err; @@ -728,7 +717,6 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode, err); goto error; } - d_set_d_op(dentry, &v9fs_cached_dentry_operations); d_instantiate(dentry, inode); err = v9fs_fid_add(dentry, fid); if (err < 0) @@ -744,7 +732,6 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode, err = PTR_ERR(inode); goto error; } - d_set_d_op(dentry, &v9fs_dentry_operations); d_instantiate(dentry, inode); } /* Now set the ACL based on the default value */ -- cgit v1.2.2 From 357f54d6b38252737116a6d631f6ac28ded018ed Mon Sep 17 00:00:00 2001 From: Andy Adamson Date: Tue, 14 Dec 2010 10:11:57 -0500 Subject: NFS fix the setting of exchange id flag Indicate support for referrals. Do not set any PNFS roles. Check the flags returned by the server for validity. Do not use exchange flags from an old client ID instance when recovering a client ID. Update the EXCHID4_FLAG_XXX set to RFC 5661. Signed-off-by: Andy Adamson Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 25 +++++++++++++++++++++---- 1 file changed, 21 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index f2b92f6a7efb..9d992b0346e3 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -4518,6 +4518,25 @@ int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name, } #ifdef CONFIG_NFS_V4_1 +/* + * Check the exchange flags returned by the server for invalid flags, having + * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or + * DS flags set. + */ +static int nfs4_check_cl_exchange_flags(u32 flags) +{ + if (flags & ~EXCHGID4_FLAG_MASK_R) + goto out_inval; + if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) && + (flags & EXCHGID4_FLAG_USE_NON_PNFS)) + goto out_inval; + if (!(flags & (EXCHGID4_FLAG_MASK_PNFS))) + goto out_inval; + return NFS_OK; +out_inval: + return -NFS4ERR_INVAL; +} + /* * nfs4_proc_exchange_id() * @@ -4531,7 +4550,7 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred) nfs4_verifier verifier; struct nfs41_exchange_id_args args = { .client = clp, - .flags = clp->cl_exchange_flags, + .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER, }; struct nfs41_exchange_id_res res = { .client = clp, @@ -4548,9 +4567,6 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred) dprintk("--> %s\n", __func__); BUG_ON(clp == NULL); - /* Remove server-only flags */ - args.flags &= ~EXCHGID4_FLAG_CONFIRMED_R; - p = (u32 *)verifier.data; *p++ = htonl((u32)clp->cl_boot_time.tv_sec); *p = htonl((u32)clp->cl_boot_time.tv_nsec); @@ -4576,6 +4592,7 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred) break; } + status = nfs4_check_cl_exchange_flags(clp->cl_exchange_flags); dprintk("<-- %s status= %d\n", __func__, status); return status; } -- cgit v1.2.2 From 0a2179b169089f871e071c74316371ed43e6c8eb Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Tue, 11 Jan 2011 14:42:29 -0500 Subject: ext4: revert buggy trim overflow patch This reverts commit 4f531501e44: ext4: fix possible overflow in ext4_trim_fs() Signed-off-by: "Theodore Ts'o" --- fs/ext4/mballoc.c | 6 ------ 1 file changed, 6 deletions(-) (limited to 'fs') diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index cd5214f75397..cc1297e15f1b 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -4804,7 +4804,6 @@ ext4_grpblk_t ext4_trim_all_free(struct super_block *sb, struct ext4_buddy *e4b, int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) { struct ext4_buddy e4b; - ext4_fsblk_t blocks_count = ext4_blocks_count(EXT4_SB(sb)->s_es); ext4_group_t first_group, last_group; ext4_group_t group, ngroups = ext4_get_groups_count(sb); ext4_grpblk_t cnt = 0, first_block, last_block; @@ -4816,11 +4815,6 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) minlen = range->minlen >> sb->s_blocksize_bits; trimmed = 0; - if (start >= blocks_count) - return -EINVAL; - if (start + len > blocks_count) - len = blocks_count - start; - if (unlikely(minlen > EXT4_BLOCKS_PER_GROUP(sb))) return -EINVAL; -- cgit v1.2.2 From 4c6493785a1ea9c3b3522f199760a90a30e1626c Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Tue, 15 Jun 2010 14:22:37 -0400 Subject: nfsd4: modify session list under cl_lock We want to traverse this from the callback code. Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4state.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'fs') diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index b583e4e800ab..3cf9900d5f32 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -771,7 +771,9 @@ static struct nfsd4_session *alloc_init_session(struct svc_rqst *rqstp, struct n idx = hash_sessionid(&new->se_sessionid); spin_lock(&client_lock); list_add(&new->se_hash, &sessionid_hashtbl[idx]); + spin_lock(&clp->cl_lock); list_add(&new->se_perclnt, &clp->cl_sessions); + spin_unlock(&clp->cl_lock); spin_unlock(&client_lock); status = nfsd4_new_conn(rqstp, new); @@ -819,7 +821,9 @@ static void unhash_session(struct nfsd4_session *ses) { list_del(&ses->se_hash); + spin_lock(&ses->se_client->cl_lock); list_del(&ses->se_perclnt); + spin_unlock(&ses->se_client->cl_lock); } /* must be called under the client_lock */ @@ -925,8 +929,10 @@ unhash_client_locked(struct nfs4_client *clp) mark_client_expired(clp); list_del(&clp->cl_lru); + spin_lock(&clp->cl_lock); list_for_each_entry(ses, &clp->cl_sessions, se_perclnt) list_del_init(&ses->se_hash); + spin_unlock(&clp->cl_lock); } static void -- cgit v1.2.2 From 1d1bc8f2074f0b728dfca2a3c16f2f5a3f298ffc Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Mon, 4 Oct 2010 23:12:59 -0400 Subject: nfsd4: support BIND_CONN_TO_SESSION Basic xdr and processing for BIND_CONN_TO_SESSION. This adds a connection to the list of connections associated with a session. Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4proc.c | 9 ++++++-- fs/nfsd/nfs4state.c | 59 +++++++++++++++++++++++++++++++++++++++++++++++------ fs/nfsd/nfs4xdr.c | 34 ++++++++++++++++++++++++++++-- fs/nfsd/state.h | 5 +++++ fs/nfsd/xdr4.h | 2 ++ 5 files changed, 99 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index fd6694b49e1c..db52546143d1 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -1004,8 +1004,8 @@ static const char *nfsd4_op_name(unsigned opnum); * Also note, enforced elsewhere: * - SEQUENCE other than as first op results in * NFS4ERR_SEQUENCE_POS. (Enforced in nfsd4_sequence().) - * - BIND_CONN_TO_SESSION must be the only op in its compound - * (Will be enforced in nfsd4_bind_conn_to_session().) + * - BIND_CONN_TO_SESSION must be the only op in its compound. + * (Enforced in nfsd4_bind_conn_to_session().) * - DESTROY_SESSION must be the final operation in a compound, if * sessionid's in SEQUENCE and DESTROY_SESSION are the same. * (Enforced in nfsd4_destroy_session().) @@ -1326,6 +1326,11 @@ static struct nfsd4_operation nfsd4_ops[] = { .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP, .op_name = "OP_EXCHANGE_ID", }, + [OP_BIND_CONN_TO_SESSION] = { + .op_func = (nfsd4op_func)nfsd4_bind_conn_to_session, + .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP, + .op_name = "OP_BIND_CONN_TO_SESSION", + }, [OP_CREATE_SESSION] = { .op_func = (nfsd4op_func)nfsd4_create_session, .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP, diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 3cf9900d5f32..956174f488a7 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -679,15 +679,12 @@ static int nfsd4_register_conn(struct nfsd4_conn *conn) return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user); } -static __be32 nfsd4_new_conn(struct svc_rqst *rqstp, struct nfsd4_session *ses) +static __be32 nfsd4_new_conn(struct svc_rqst *rqstp, struct nfsd4_session *ses, u32 dir) { struct nfsd4_conn *conn; - u32 flags = NFS4_CDFC4_FORE; int ret; - if (ses->se_flags & SESSION4_BACK_CHAN) - flags |= NFS4_CDFC4_BACK; - conn = alloc_conn(rqstp, flags); + conn = alloc_conn(rqstp, dir); if (!conn) return nfserr_jukebox; nfsd4_hash_conn(conn, ses); @@ -698,6 +695,17 @@ static __be32 nfsd4_new_conn(struct svc_rqst *rqstp, struct nfsd4_session *ses) return nfs_ok; } +static __be32 nfsd4_new_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_session *ses) +{ + u32 dir = NFS4_CDFC4_FORE; + + if (ses->se_flags & SESSION4_BACK_CHAN) + dir |= NFS4_CDFC4_BACK; + + return nfsd4_new_conn(rqstp, ses, dir); +} + +/* must be called under client_lock */ static void nfsd4_del_conns(struct nfsd4_session *s) { struct nfs4_client *clp = s->se_client; @@ -776,7 +784,7 @@ static struct nfsd4_session *alloc_init_session(struct svc_rqst *rqstp, struct n spin_unlock(&clp->cl_lock); spin_unlock(&client_lock); - status = nfsd4_new_conn(rqstp, new); + status = nfsd4_new_conn_from_crses(rqstp, new); /* whoops: benny points out, status is ignored! (err, or bogus) */ if (status) { free_session(&new->se_ref); @@ -1597,6 +1605,45 @@ static bool nfsd4_last_compound_op(struct svc_rqst *rqstp) return argp->opcnt == resp->opcnt; } +static __be32 nfsd4_map_bcts_dir(u32 *dir) +{ + switch (*dir) { + case NFS4_CDFC4_FORE: + case NFS4_CDFC4_BACK: + return nfs_ok; + case NFS4_CDFC4_FORE_OR_BOTH: + case NFS4_CDFC4_BACK_OR_BOTH: + *dir = NFS4_CDFC4_BOTH; + return nfs_ok; + }; + return nfserr_inval; +} + +__be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp, + struct nfsd4_compound_state *cstate, + struct nfsd4_bind_conn_to_session *bcts) +{ + __be32 status; + + if (!nfsd4_last_compound_op(rqstp)) + return nfserr_not_only_op; + spin_lock(&client_lock); + cstate->session = find_in_sessionid_hashtbl(&bcts->sessionid); + /* Sorta weird: we only need the refcnt'ing because new_conn acquires + * client_lock iself: */ + if (cstate->session) { + nfsd4_get_session(cstate->session); + atomic_inc(&cstate->session->se_client->cl_refcount); + } + spin_unlock(&client_lock); + if (!cstate->session) + return nfserr_badsession; + + status = nfsd4_map_bcts_dir(&bcts->dir); + nfsd4_new_conn(rqstp, cstate->session, bcts->dir); + return nfs_ok; +} + static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid) { if (!session) diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index ca3786905dec..4ff2c9e0b276 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -421,6 +421,21 @@ nfsd4_decode_access(struct nfsd4_compoundargs *argp, struct nfsd4_access *access DECODE_TAIL; } +static __be32 nfsd4_decode_bind_conn_to_session(struct nfsd4_compoundargs *argp, struct nfsd4_bind_conn_to_session *bcts) +{ + DECODE_HEAD; + u32 dummy; + + READ_BUF(NFS4_MAX_SESSIONID_LEN + 8); + COPYMEM(bcts->sessionid.data, NFS4_MAX_SESSIONID_LEN); + READ32(bcts->dir); + /* XXX: Perhaps Tom Tucker could help us figure out how we + * should be using ctsa_use_conn_in_rdma_mode: */ + READ32(dummy); + + DECODE_TAIL; +} + static __be32 nfsd4_decode_close(struct nfsd4_compoundargs *argp, struct nfsd4_close *close) { @@ -1359,7 +1374,7 @@ static nfsd4_dec nfsd41_dec_ops[] = { /* new operations for NFSv4.1 */ [OP_BACKCHANNEL_CTL] = (nfsd4_dec)nfsd4_decode_notsupp, - [OP_BIND_CONN_TO_SESSION]= (nfsd4_dec)nfsd4_decode_notsupp, + [OP_BIND_CONN_TO_SESSION]= (nfsd4_dec)nfsd4_decode_bind_conn_to_session, [OP_EXCHANGE_ID] = (nfsd4_dec)nfsd4_decode_exchange_id, [OP_CREATE_SESSION] = (nfsd4_dec)nfsd4_decode_create_session, [OP_DESTROY_SESSION] = (nfsd4_dec)nfsd4_decode_destroy_session, @@ -2383,6 +2398,21 @@ nfsd4_encode_access(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_ return nfserr; } +static __be32 nfsd4_encode_bind_conn_to_session(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_bind_conn_to_session *bcts) +{ + __be32 *p; + + if (!nfserr) { + RESERVE_SPACE(NFS4_MAX_SESSIONID_LEN + 8); + WRITEMEM(bcts->sessionid.data, NFS4_MAX_SESSIONID_LEN); + WRITE32(bcts->dir); + /* XXX: ? */ + WRITE32(0); + ADJUST_ARGS(); + } + return nfserr; +} + static __be32 nfsd4_encode_close(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_close *close) { @@ -3174,7 +3204,7 @@ static nfsd4_enc nfsd4_enc_ops[] = { /* NFSv4.1 operations */ [OP_BACKCHANNEL_CTL] = (nfsd4_enc)nfsd4_encode_noop, - [OP_BIND_CONN_TO_SESSION] = (nfsd4_enc)nfsd4_encode_noop, + [OP_BIND_CONN_TO_SESSION] = (nfsd4_enc)nfsd4_encode_bind_conn_to_session, [OP_EXCHANGE_ID] = (nfsd4_enc)nfsd4_encode_exchange_id, [OP_CREATE_SESSION] = (nfsd4_enc)nfsd4_encode_create_session, [OP_DESTROY_SESSION] = (nfsd4_enc)nfsd4_encode_destroy_session, diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h index cf6dc83fd545..442f6d8e024c 100644 --- a/fs/nfsd/state.h +++ b/fs/nfsd/state.h @@ -148,6 +148,11 @@ struct nfsd4_create_session { u32 gid; }; +struct nfsd4_bind_conn_to_session { + struct nfs4_sessionid sessionid; + u32 dir; +}; + /* The single slot clientid cache structure */ struct nfsd4_clid_slot { u32 sl_seqid; diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h index 799c30c3b495..3a7aa4d98c1f 100644 --- a/fs/nfsd/xdr4.h +++ b/fs/nfsd/xdr4.h @@ -427,6 +427,7 @@ struct nfsd4_op { /* NFSv4.1 */ struct nfsd4_exchange_id exchange_id; + struct nfsd4_bind_conn_to_session bind_conn_to_session; struct nfsd4_create_session create_session; struct nfsd4_destroy_session destroy_session; struct nfsd4_sequence sequence; @@ -523,6 +524,7 @@ extern __be32 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp, struct nfsd4_sequence *seq); extern __be32 nfsd4_exchange_id(struct svc_rqst *rqstp, struct nfsd4_compound_state *, struct nfsd4_exchange_id *); +extern __be32 nfsd4_bind_conn_to_session(struct svc_rqst *, struct nfsd4_compound_state *, struct nfsd4_bind_conn_to_session *); extern __be32 nfsd4_create_session(struct svc_rqst *, struct nfsd4_compound_state *, struct nfsd4_create_session *); -- cgit v1.2.2 From dcbeaa68dbbdacbbb330a86c7fc95a28473fc209 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Tue, 15 Jun 2010 17:25:45 -0400 Subject: nfsd4: allow backchannel recovery Now that we have a list of connections to choose from, we can teach the callback code to just pick a suitable connection and use that, instead of insisting on forever using the connection that the first create_session was sent with. Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4callback.c | 36 +++++++++++++++++++++++++++++++++--- fs/nfsd/nfs4state.c | 16 ++++++++++------ 2 files changed, 43 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index dd183af24fe6..18b740bd29ac 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -473,8 +473,7 @@ static int max_cb_time(void) /* Reference counting, callback cleanup, etc., all look racy as heck. * And why is cl_cb_set an atomic? */ -static int setup_callback_client(struct nfs4_client *clp, - struct nfs4_cb_conn *conn) +static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn, struct nfsd4_session *ses) { struct rpc_timeout timeparms = { .to_initval = max_cb_time(), @@ -501,6 +500,10 @@ static int setup_callback_client(struct nfs4_client *clp, args.protocol = XPRT_TRANSPORT_TCP; clp->cl_cb_ident = conn->cb_ident; } else { + if (!conn->cb_xprt) + return -EINVAL; + clp->cl_cb_conn.cb_xprt = conn->cb_xprt; + clp->cl_cb_session = ses; args.bc_xprt = conn->cb_xprt; args.prognumber = clp->cl_cb_session->se_cb_prog; args.protocol = XPRT_TRANSPORT_BC_TCP; @@ -756,10 +759,27 @@ static void nfsd4_release_cb(struct nfsd4_callback *cb) cb->cb_ops->rpc_release(cb); } +/* requires cl_lock: */ +static struct nfsd4_conn * __nfsd4_find_backchannel(struct nfs4_client *clp) +{ + struct nfsd4_session *s; + struct nfsd4_conn *c; + + list_for_each_entry(s, &clp->cl_sessions, se_perclnt) { + list_for_each_entry(c, &s->se_conns, cn_persession) { + if (c->cn_flags & NFS4_CDFC4_BACK) + return c; + } + } + return NULL; +} + static void nfsd4_process_cb_update(struct nfsd4_callback *cb) { struct nfs4_cb_conn conn; struct nfs4_client *clp = cb->cb_clp; + struct nfsd4_session *ses = NULL; + struct nfsd4_conn *c; int err; /* @@ -770,6 +790,10 @@ static void nfsd4_process_cb_update(struct nfsd4_callback *cb) rpc_shutdown_client(clp->cl_cb_client); clp->cl_cb_client = NULL; } + if (clp->cl_cb_conn.cb_xprt) { + svc_xprt_put(clp->cl_cb_conn.cb_xprt); + clp->cl_cb_conn.cb_xprt = NULL; + } if (test_bit(NFSD4_CLIENT_KILL, &clp->cl_cb_flags)) return; spin_lock(&clp->cl_lock); @@ -780,9 +804,15 @@ static void nfsd4_process_cb_update(struct nfsd4_callback *cb) BUG_ON(!clp->cl_cb_flags); clear_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_cb_flags); memcpy(&conn, &cb->cb_clp->cl_cb_conn, sizeof(struct nfs4_cb_conn)); + c = __nfsd4_find_backchannel(clp); + if (c) { + svc_xprt_get(c->cn_xprt); + conn.cb_xprt = c->cn_xprt; + ses = c->cn_session; + } spin_unlock(&clp->cl_lock); - err = setup_callback_client(clp, &conn); + err = setup_callback_client(clp, &conn, ses); if (err) warn_no_callback_path(clp, err); } diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 956174f488a7..290370bc9ae7 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -642,6 +642,7 @@ static void nfsd4_conn_lost(struct svc_xpt_user *u) free_conn(c); } spin_unlock(&clp->cl_lock); + /* XXX: mark callback for update, probe callback */ } static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags) @@ -790,16 +791,19 @@ static struct nfsd4_session *alloc_init_session(struct svc_rqst *rqstp, struct n free_session(&new->se_ref); return NULL; } - if (!clp->cl_cb_session && (cses->flags & SESSION4_BACK_CHAN)) { + if (cses->flags & SESSION4_BACK_CHAN) { struct sockaddr *sa = svc_addr(rqstp); - - clp->cl_cb_session = new; - clp->cl_cb_conn.cb_xprt = rqstp->rq_xprt; - svc_xprt_get(rqstp->rq_xprt); + /* + * This is a little silly; with sessions there's no real + * use for the callback address. Use the peer address + * as a reasonable default for now, but consider fixing + * the rpc client not to require an address in the + * future: + */ rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa); clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa); - nfsd4_probe_callback(clp); } + nfsd4_probe_callback(clp); return new; } -- cgit v1.2.2 From 77a3569d6c4e14e89fa628df383b6dccc0cce6be Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Fri, 30 Apr 2010 18:51:44 -0400 Subject: nfsd4: keep finer-grained callback status Distinguish between when the callback channel is known to be down, and when it is not yet confirmed. This will be useful in the 4.1 case. Also, we don't seem to be using the fact that this field is atomic. Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4callback.c | 26 ++++++++++++++------------ fs/nfsd/nfs4state.c | 8 ++++---- fs/nfsd/state.h | 5 ++++- 3 files changed, 22 insertions(+), 17 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index 18b740bd29ac..d32f49d6ca2c 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -470,8 +470,6 @@ static int max_cb_time(void) return max(nfsd4_lease/10, (time_t)1) * HZ; } -/* Reference counting, callback cleanup, etc., all look racy as heck. - * And why is cl_cb_set an atomic? */ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn, struct nfsd4_session *ses) { @@ -526,14 +524,20 @@ static void warn_no_callback_path(struct nfs4_client *clp, int reason) (int)clp->cl_name.len, clp->cl_name.data, reason); } +static void nfsd4_mark_cb_down(struct nfs4_client *clp, int reason) +{ + clp->cl_cb_state = NFSD4_CB_DOWN; + warn_no_callback_path(clp, reason); +} + static void nfsd4_cb_probe_done(struct rpc_task *task, void *calldata) { struct nfs4_client *clp = container_of(calldata, struct nfs4_client, cl_cb_null); if (task->tk_status) - warn_no_callback_path(clp, task->tk_status); + nfsd4_mark_cb_down(clp, task->tk_status); else - atomic_set(&clp->cl_cb_set, 1); + clp->cl_cb_state = NFSD4_CB_UP; } static const struct rpc_call_ops nfsd4_cb_probe_ops = { @@ -579,14 +583,15 @@ static void do_probe_callback(struct nfs4_client *clp) */ void nfsd4_probe_callback(struct nfs4_client *clp) { + /* XXX: atomicity? Also, should we be using cl_cb_flags? */ + clp->cl_cb_state = NFSD4_CB_UNKNOWN; set_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_cb_flags); do_probe_callback(clp); } void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *conn) { - BUG_ON(atomic_read(&clp->cl_cb_set)); - + clp->cl_cb_state = NFSD4_CB_UNKNOWN; spin_lock(&clp->cl_lock); memcpy(&clp->cl_cb_conn, conn, sizeof(struct nfs4_cb_conn)); spin_unlock(&clp->cl_lock); @@ -693,8 +698,7 @@ static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata) break; default: /* Network partition? */ - atomic_set(&clp->cl_cb_set, 0); - warn_no_callback_path(clp, task->tk_status); + nfsd4_mark_cb_down(clp, task->tk_status); if (current_rpc_client != task->tk_client) { /* queue a callback on the new connection: */ atomic_inc(&dp->dl_count); @@ -707,10 +711,8 @@ static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata) task->tk_status = 0; rpc_restart_call_prepare(task); return; - } else { - atomic_set(&clp->cl_cb_set, 0); - warn_no_callback_path(clp, task->tk_status); - } + } else + nfsd4_mark_cb_down(clp, task->tk_status); } static void nfsd4_cb_recall_release(void *calldata) diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 290370bc9ae7..919ad25660d6 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -1071,7 +1071,7 @@ static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir, memcpy(clp->cl_recdir, recdir, HEXDIR_LEN); atomic_set(&clp->cl_refcount, 0); - atomic_set(&clp->cl_cb_set, 0); + clp->cl_cb_state = NFSD4_CB_UNKNOWN; INIT_LIST_HEAD(&clp->cl_idhash); INIT_LIST_HEAD(&clp->cl_strhash); INIT_LIST_HEAD(&clp->cl_openowners); @@ -2003,7 +2003,6 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp, if (!same_creds(&conf->cl_cred, &unconf->cl_cred)) status = nfserr_clid_inuse; else { - atomic_set(&conf->cl_cb_set, 0); nfsd4_change_callback(conf, &unconf->cl_cb_conn); nfsd4_probe_callback(conf); expire_client(unconf); @@ -2633,7 +2632,8 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta { struct nfs4_delegation *dp; struct nfs4_stateowner *sop = stp->st_stateowner; - int cb_up = atomic_read(&sop->so_client->cl_cb_set); + /* XXX: or unknown and nfsv4.1: */ + int cb_up = (sop->so_client->cl_cb_state == NFSD4_CB_UP); struct file_lock *fl; int status, flag = 0; @@ -2823,7 +2823,7 @@ nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, renew_client(clp); status = nfserr_cb_path_down; if (!list_empty(&clp->cl_delegations) - && !atomic_read(&clp->cl_cb_set)) + && clp->cl_cb_state != NFSD4_CB_UP) goto out; status = nfs_ok; out: diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h index 442f6d8e024c..32ff615c36f4 100644 --- a/fs/nfsd/state.h +++ b/fs/nfsd/state.h @@ -242,7 +242,10 @@ struct nfs4_client { unsigned long cl_cb_flags; struct rpc_clnt *cl_cb_client; u32 cl_cb_ident; - atomic_t cl_cb_set; +#define NFSD4_CB_UP 0 +#define NFSD4_CB_UNKNOWN 1 +#define NFSD4_CB_DOWN 2 + int cl_cb_state; struct nfsd4_callback cl_cb_null; struct nfsd4_session *cl_cb_session; -- cgit v1.2.2 From 0d7bb71907546b2baf15d78edd3e508e12963dbf Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Thu, 18 Nov 2010 08:30:33 -0500 Subject: nfsd4: set sequence flag when backchannel is down Implement the SEQ4_STATUS_CB_PATH_DOWN flag. Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4state.c | 6 +++++- fs/nfsd/nfs4xdr.c | 8 ++------ fs/nfsd/xdr4.h | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 919ad25660d6..15bd1cc77de7 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -1800,8 +1800,12 @@ nfsd4_sequence(struct svc_rqst *rqstp, out: /* Hold a session reference until done processing the compound. */ if (cstate->session) { + struct nfs4_client *clp = session->se_client; + nfsd4_get_session(cstate->session); - atomic_inc(&session->se_client->cl_refcount); + atomic_inc(&clp->cl_refcount); + if (clp->cl_cb_state == NFSD4_CB_DOWN) + seq->status_flags |= SEQ4_STATUS_CB_PATH_DOWN; } kfree(conn); spin_unlock(&client_lock); diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 4ff2c9e0b276..956629b9cdc9 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -3137,13 +3137,9 @@ nfsd4_encode_sequence(struct nfsd4_compoundres *resp, int nfserr, WRITE32(seq->seqid); WRITE32(seq->slotid); WRITE32(seq->maxslots); - /* - * FIXME: for now: - * target_maxslots = maxslots - * status_flags = 0 - */ + /* For now: target_maxslots = maxslots */ WRITE32(seq->maxslots); - WRITE32(0); + WRITE32(seq->status_flags); ADJUST_ARGS(); resp->cstate.datap = p; /* DRC cache data pointer */ diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h index 3a7aa4d98c1f..366401e1a536 100644 --- a/fs/nfsd/xdr4.h +++ b/fs/nfsd/xdr4.h @@ -378,8 +378,8 @@ struct nfsd4_sequence { u32 cachethis; /* request */ #if 0 u32 target_maxslots; /* response */ - u32 status_flags; /* response */ #endif /* not yet */ + u32 status_flags; /* response */ }; struct nfsd4_destroy_session { -- cgit v1.2.2 From eea4980660bc204bb9d11bb3bf2b1bde5fd5175f Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Thu, 18 Nov 2010 08:34:12 -0500 Subject: nfsd4: re-probe callback on connection loss This makes sure we set the sequence flag when necessary. Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4state.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 15bd1cc77de7..b24f19d4187a 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -642,7 +642,7 @@ static void nfsd4_conn_lost(struct svc_xpt_user *u) free_conn(c); } spin_unlock(&clp->cl_lock); - /* XXX: mark callback for update, probe callback */ + nfsd4_probe_callback(clp); } static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags) -- cgit v1.2.2 From 84f5f7ccc59e628fc8754c0a837fd7e9559711ac Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Thu, 9 Dec 2010 15:52:19 -0500 Subject: nfsd4: make sure sequence flags are set after destroy_session If this loses any backchannel, make sure we have a chance to notice that and set the sequence flags. Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4callback.c | 6 ++++++ fs/nfsd/nfs4state.c | 3 +-- fs/nfsd/state.h | 1 + 3 files changed, 8 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index d32f49d6ca2c..cb002dce5630 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -589,6 +589,12 @@ void nfsd4_probe_callback(struct nfs4_client *clp) do_probe_callback(clp); } +void nfsd4_probe_callback_sync(struct nfs4_client *clp) +{ + nfsd4_probe_callback(clp); + flush_workqueue(callback_wq); +} + void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *conn) { clp->cl_cb_state = NFSD4_CB_UNKNOWN; diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index b24f19d4187a..00a50b8ac878 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -1686,8 +1686,7 @@ nfsd4_destroy_session(struct svc_rqst *r, spin_unlock(&client_lock); nfs4_lock_state(); - /* wait for callbacks */ - nfsd4_shutdown_callback(ses->se_client); + nfsd4_probe_callback_sync(ses->se_client); nfs4_unlock_state(); nfsd4_del_conns(ses); diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h index 32ff615c36f4..4e5bdfd9169c 100644 --- a/fs/nfsd/state.h +++ b/fs/nfsd/state.h @@ -464,6 +464,7 @@ extern __be32 nfs4_check_open_reclaim(clientid_t *clid); extern void nfs4_free_stateowner(struct kref *kref); extern int set_callback_cred(void); extern void nfsd4_probe_callback(struct nfs4_client *clp); +extern void nfsd4_probe_callback_sync(struct nfs4_client *clp); extern void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *); extern void nfsd4_do_callback_rpc(struct work_struct *); extern void nfsd4_cb_recall(struct nfs4_delegation *dp); -- cgit v1.2.2 From 229b2a0839870d0d4f91ad3b24ec13c57bbd50a0 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Fri, 10 Dec 2010 17:37:44 -0500 Subject: nfsd4: add helper function to run callbacks Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4callback.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index cb002dce5630..fff96dc7704e 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -560,6 +560,11 @@ int set_callback_cred(void) static struct workqueue_struct *callback_wq; +static void run_nfsd4_cb(struct nfsd4_callback *cb) +{ + queue_work(callback_wq, &cb->cb_work); +} + static void do_probe_callback(struct nfs4_client *clp) { struct nfsd4_callback *cb = &clp->cl_cb_null; @@ -574,7 +579,7 @@ static void do_probe_callback(struct nfs4_client *clp) cb->cb_ops = &nfsd4_cb_probe_ops; - queue_work(callback_wq, &cb->cb_work); + run_nfsd4_cb(cb); } /* @@ -859,5 +864,5 @@ void nfsd4_cb_recall(struct nfs4_delegation *dp) cb->cb_ops = &nfsd4_cb_recall_ops; dp->dl_retries = 1; - queue_work(callback_wq, &dp->dl_recall.cb_work); + run_nfsd4_cb(&dp->dl_recall); } -- cgit v1.2.2 From 14a24e99f4f506265b634c1cd04eca6394f49dbc Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Fri, 10 Dec 2010 19:02:49 -0500 Subject: nfsd4: give out delegations more quickly in 4.1 case Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4state.c | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 00a50b8ac878..408957cf6016 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -2627,6 +2627,19 @@ nfs4_set_claim_prev(struct nfsd4_open *open) open->op_stateowner->so_client->cl_firststate = 1; } +/* Should we give out recallable state?: */ +static bool nfsd4_cb_channel_good(struct nfs4_client *clp) +{ + if (clp->cl_cb_state == NFSD4_CB_UP) + return true; + /* + * In the sessions case, since we don't have to establish a + * separate connection for callbacks, we assume it's OK + * until we hear otherwise: + */ + return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN; +} + /* * Attempt to hand out a delegation. */ @@ -2635,11 +2648,11 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta { struct nfs4_delegation *dp; struct nfs4_stateowner *sop = stp->st_stateowner; - /* XXX: or unknown and nfsv4.1: */ - int cb_up = (sop->so_client->cl_cb_state == NFSD4_CB_UP); + int cb_up; struct file_lock *fl; int status, flag = 0; + cb_up = nfsd4_cb_channel_good(sop->so_client); flag = NFS4_OPEN_DELEGATE_NONE; open->op_recall = 0; switch (open->op_claim_type) { -- cgit v1.2.2 From 3ff3600e7eab16301e824293e8f49b9990bd4641 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Mon, 10 Jan 2011 16:37:51 -0500 Subject: nfsd4: simplify nfsd4_cb_prepare Remove handling for a nonexistant case (status && !-EAGAIN). Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4callback.c | 26 ++++---------------------- 1 file changed, 4 insertions(+), 22 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index fff96dc7704e..69955e98e086 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -613,24 +613,14 @@ void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *conn) * If the slot is available, then mark it busy. Otherwise, set the * thread for sleeping on the callback RPC wait queue. */ -static int nfsd41_cb_setup_sequence(struct nfs4_client *clp, - struct rpc_task *task) +static bool nfsd41_cb_get_slot(struct nfs4_client *clp, struct rpc_task *task) { - u32 *ptr = (u32 *)clp->cl_cb_session->se_sessionid.data; - int status = 0; - - dprintk("%s: %u:%u:%u:%u\n", __func__, - ptr[0], ptr[1], ptr[2], ptr[3]); - if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) { rpc_sleep_on(&clp->cl_cb_waitq, task, NULL); dprintk("%s slot is busy\n", __func__); - status = -EAGAIN; - goto out; + return false; } -out: - dprintk("%s status=%d\n", __func__, status); - return status; + return true; } /* @@ -643,19 +633,11 @@ static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata) struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall); struct nfs4_client *clp = dp->dl_client; u32 minorversion = clp->cl_minorversion; - int status = 0; cb->cb_minorversion = minorversion; if (minorversion) { - status = nfsd41_cb_setup_sequence(clp, task); - if (status) { - if (status != -EAGAIN) { - /* terminate rpc task */ - task->tk_status = status; - task->tk_action = NULL; - } + if (!nfsd41_cb_get_slot(clp, task)) return; - } } rpc_call_start(task); } -- cgit v1.2.2 From 5ce8ba25d657a71d6d8cdb05a2b90c5ae7debfda Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Mon, 10 Jan 2011 16:44:41 -0500 Subject: nfsd4: allow restarting callbacks If we lose the backchannel and then the client repairs the problem, resend any callbacks. We use a new cb_done flag to track whether there is still work to be done for the callback or whether it can be destroyed with the rpc. Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4callback.c | 34 ++++++++++++++++++++++++++++------ fs/nfsd/nfs4state.c | 1 + fs/nfsd/state.h | 3 +++ 3 files changed, 32 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index 69955e98e086..f1d9dd45553a 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -639,6 +639,10 @@ static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata) if (!nfsd41_cb_get_slot(clp, task)) return; } + cb->cb_done = false; + spin_lock(&clp->cl_lock); + list_add(&cb->cb_per_client, &clp->cl_callbacks); + spin_unlock(&clp->cl_lock); rpc_call_start(task); } @@ -681,8 +685,11 @@ static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata) return; } + if (cb->cb_done) + return; switch (task->tk_status) { case 0: + cb->cb_done = true; return; case -EBADHANDLE: case -NFS4ERR_BAD_STATEID: @@ -695,7 +702,7 @@ static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata) if (current_rpc_client != task->tk_client) { /* queue a callback on the new connection: */ atomic_inc(&dp->dl_count); - nfsd4_cb_recall(dp); + run_nfsd4_cb(&dp->dl_recall); return; } } @@ -704,16 +711,23 @@ static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata) task->tk_status = 0; rpc_restart_call_prepare(task); return; - } else - nfsd4_mark_cb_down(clp, task->tk_status); + } + nfsd4_mark_cb_down(clp, task->tk_status); + cb->cb_done = true; } static void nfsd4_cb_recall_release(void *calldata) { struct nfsd4_callback *cb = calldata; + struct nfs4_client *clp = cb->cb_clp; struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall); - nfs4_put_delegation(dp); + if (cb->cb_done) { + spin_lock(&clp->cl_lock); + list_del(&cb->cb_per_client); + spin_unlock(&clp->cl_lock); + nfs4_put_delegation(dp); + } } static const struct rpc_call_ops nfsd4_cb_recall_ops = { @@ -808,8 +822,13 @@ static void nfsd4_process_cb_update(struct nfsd4_callback *cb) spin_unlock(&clp->cl_lock); err = setup_callback_client(clp, &conn, ses); - if (err) + if (err) { warn_no_callback_path(clp, err); + return; + } + /* Yay, the callback channel's back! Restart any callbacks: */ + list_for_each_entry(cb, &clp->cl_callbacks, cb_per_client) + run_nfsd4_cb(cb); } void nfsd4_do_callback_rpc(struct work_struct *w) @@ -834,10 +853,11 @@ void nfsd4_do_callback_rpc(struct work_struct *w) void nfsd4_cb_recall(struct nfs4_delegation *dp) { struct nfsd4_callback *cb = &dp->dl_recall; + struct nfs4_client *clp = dp->dl_client; dp->dl_retries = 1; cb->cb_op = dp; - cb->cb_clp = dp->dl_client; + cb->cb_clp = clp; cb->cb_msg.rpc_proc = &nfs4_cb_procedures[NFSPROC4_CLNT_CB_RECALL]; cb->cb_msg.rpc_argp = cb; cb->cb_msg.rpc_resp = cb; @@ -846,5 +866,7 @@ void nfsd4_cb_recall(struct nfs4_delegation *dp) cb->cb_ops = &nfsd4_cb_recall_ops; dp->dl_retries = 1; + cb->cb_done = true; + run_nfsd4_cb(&dp->dl_recall); } diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 408957cf6016..6e1f9aadd439 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -1077,6 +1077,7 @@ static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir, INIT_LIST_HEAD(&clp->cl_openowners); INIT_LIST_HEAD(&clp->cl_delegations); INIT_LIST_HEAD(&clp->cl_lru); + INIT_LIST_HEAD(&clp->cl_callbacks); spin_lock_init(&clp->cl_lock); INIT_WORK(&clp->cl_cb_null.cb_work, nfsd4_do_callback_rpc); clp->cl_time = get_seconds(); diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h index 4e5bdfd9169c..3074656ba7bf 100644 --- a/fs/nfsd/state.h +++ b/fs/nfsd/state.h @@ -68,10 +68,12 @@ typedef struct { struct nfsd4_callback { void *cb_op; struct nfs4_client *cb_clp; + struct list_head cb_per_client; u32 cb_minorversion; struct rpc_message cb_msg; const struct rpc_call_ops *cb_ops; struct work_struct cb_work; + bool cb_done; }; struct nfs4_delegation { @@ -248,6 +250,7 @@ struct nfs4_client { int cl_cb_state; struct nfsd4_callback cl_cb_null; struct nfsd4_session *cl_cb_session; + struct list_head cl_callbacks; /* list of in-progress callbacks */ /* for all client information that callback code might need: */ spinlock_t cl_lock; -- cgit v1.2.2 From 0f0a25bf516843adae479636dc1cf75fd0bd003c Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Tue, 11 Jan 2011 15:16:31 -0500 Subject: ext4: fix trimming starting with block 0 with small blocksize When s_first_data_block is not zero (which happens e.g. when block size is 1KB) and trim ioctl is called to start trimming from block 0, the math in ext4_get_group_no_and_offset() overflows. The overall result is that ioctl returns EINVAL which is kind of unexpected and we probably don't want userspace tools to bother with internal details of filesystem structure. So just silently increase starting offset (and shorten length) when starting block is below s_first_data_block. CC: Lukas Czerner Signed-off-by: Jan Kara Signed-off-by: "Theodore Ts'o" --- fs/ext4/mballoc.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'fs') diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index cc1297e15f1b..851f49b2f9d2 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -4808,6 +4808,8 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) ext4_group_t group, ngroups = ext4_get_groups_count(sb); ext4_grpblk_t cnt = 0, first_block, last_block; uint64_t start, len, minlen, trimmed; + ext4_fsblk_t first_data_blk = + le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block); int ret = 0; start = range->start >> sb->s_blocksize_bits; @@ -4817,6 +4819,10 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) if (unlikely(minlen > EXT4_BLOCKS_PER_GROUP(sb))) return -EINVAL; + if (start < first_data_blk) { + len -= first_data_blk - start; + start = first_data_blk; + } /* Determine first and last group to examine based on start and len */ ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start, -- cgit v1.2.2 From c58efdb442bb49dea1d148f207560c41918c1bf4 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Tue, 4 Jan 2011 04:49:29 +0000 Subject: xfs: ensure log covering transactions are synchronous To ensure the log is covered and the filesystem idles correctly, we need to ensure that dummy transactions hit the disk and do not stay pinned in memory. If the superblock is pinned in memory, it can't be flushed so the log covering cannot make progress. The result is dependent on timing - more oftent han not we continue to issues a log covering transaction every 36s rather than idling after ~90s. Fix this by making the log covering transaction synchronous. To avoid additional log force from xfssyncd, make the log covering transaction take the place of the existing log force in the xfssyncd background sync process. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig Signed-off-by: Alex Elder --- fs/xfs/linux-2.6/xfs_super.c | 2 +- fs/xfs/linux-2.6/xfs_sync.c | 11 ++++++----- fs/xfs/xfs_fsops.c | 10 +++++----- fs/xfs/xfs_fsops.h | 2 +- 4 files changed, 13 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index c51faaa5e291..af32f375ca96 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -1413,7 +1413,7 @@ xfs_fs_freeze( xfs_save_resvblks(mp); xfs_quiesce_attr(mp); - return -xfs_fs_log_dummy(mp, SYNC_WAIT); + return -xfs_fs_log_dummy(mp); } STATIC int diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index a02480de9759..e22f0057d21f 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c @@ -362,7 +362,7 @@ xfs_quiesce_data( /* mark the log as covered if needed */ if (xfs_log_need_covered(mp)) - error2 = xfs_fs_log_dummy(mp, SYNC_WAIT); + error2 = xfs_fs_log_dummy(mp); /* flush data-only devices */ if (mp->m_rtdev_targp) @@ -503,13 +503,14 @@ xfs_sync_worker( int error; if (!(mp->m_flags & XFS_MOUNT_RDONLY)) { - xfs_log_force(mp, 0); - xfs_reclaim_inodes(mp, 0); /* dgc: errors ignored here */ - error = xfs_qm_sync(mp, SYNC_TRYLOCK); if (mp->m_super->s_frozen == SB_UNFROZEN && xfs_log_need_covered(mp)) - error = xfs_fs_log_dummy(mp, 0); + error = xfs_fs_log_dummy(mp); + else + xfs_log_force(mp, 0); + xfs_reclaim_inodes(mp, 0); + error = xfs_qm_sync(mp, SYNC_TRYLOCK); } mp->m_sync_seq++; wake_up(&mp->m_wait_single_sync_task); diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index f56d30e8040c..cec89dd5d7d2 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c @@ -612,12 +612,13 @@ out: * * We cannot use an inode here for this - that will push dirty state back up * into the VFS and then periodic inode flushing will prevent log covering from - * making progress. Hence we log a field in the superblock instead. + * making progress. Hence we log a field in the superblock instead and use a + * synchronous transaction to ensure the superblock is immediately unpinned + * and can be written back. */ int xfs_fs_log_dummy( - xfs_mount_t *mp, - int flags) + xfs_mount_t *mp) { xfs_trans_t *tp; int error; @@ -632,8 +633,7 @@ xfs_fs_log_dummy( /* log the UUID because it is an unchanging field */ xfs_mod_sb(tp, XFS_SB_UUID); - if (flags & SYNC_WAIT) - xfs_trans_set_sync(tp); + xfs_trans_set_sync(tp); return xfs_trans_commit(tp, 0); } diff --git a/fs/xfs/xfs_fsops.h b/fs/xfs/xfs_fsops.h index a786c5212c1e..1b6a98b66886 100644 --- a/fs/xfs/xfs_fsops.h +++ b/fs/xfs/xfs_fsops.h @@ -25,6 +25,6 @@ extern int xfs_fs_counts(xfs_mount_t *mp, xfs_fsop_counts_t *cnt); extern int xfs_reserve_blocks(xfs_mount_t *mp, __uint64_t *inval, xfs_fsop_resblks_t *outval); extern int xfs_fs_goingdown(xfs_mount_t *mp, __uint32_t inflags); -extern int xfs_fs_log_dummy(xfs_mount_t *mp, int flags); +extern int xfs_fs_log_dummy(struct xfs_mount *mp); #endif /* __XFS_FSOPS_H__ */ -- cgit v1.2.2 From a46db60834883c1c8c665d7fcc7b4ab66f5966fc Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 7 Jan 2011 13:02:04 +0000 Subject: xfs: add FITRIM support Allow manual discards from userspace using the FITRIM ioctl. This is not intended to be run during normal workloads, as the freepsace btree walks can cause large performance degradation. Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Alex Elder --- fs/xfs/Makefile | 1 + fs/xfs/linux-2.6/xfs_discard.c | 191 +++++++++++++++++++++++++++++++++++++++++ fs/xfs/linux-2.6/xfs_discard.h | 8 ++ fs/xfs/linux-2.6/xfs_ioctl.c | 3 + fs/xfs/linux-2.6/xfs_trace.h | 33 +++++++ fs/xfs/xfs_alloc.c | 10 +-- fs/xfs/xfs_alloc.h | 25 ++++-- 7 files changed, 259 insertions(+), 12 deletions(-) create mode 100644 fs/xfs/linux-2.6/xfs_discard.c create mode 100644 fs/xfs/linux-2.6/xfs_discard.h (limited to 'fs') diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile index 0dce969d6cad..faca44997099 100644 --- a/fs/xfs/Makefile +++ b/fs/xfs/Makefile @@ -98,6 +98,7 @@ xfs-y += $(addprefix $(XFS_LINUX)/, \ kmem.o \ xfs_aops.o \ xfs_buf.o \ + xfs_discard.o \ xfs_export.o \ xfs_file.o \ xfs_fs_subr.o \ diff --git a/fs/xfs/linux-2.6/xfs_discard.c b/fs/xfs/linux-2.6/xfs_discard.c new file mode 100644 index 000000000000..05201ae719e5 --- /dev/null +++ b/fs/xfs/linux-2.6/xfs_discard.c @@ -0,0 +1,191 @@ +/* + * Copyright (C) 2010 Red Hat, Inc. + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#include "xfs.h" +#include "xfs_sb.h" +#include "xfs_inum.h" +#include "xfs_log.h" +#include "xfs_ag.h" +#include "xfs_mount.h" +#include "xfs_quota.h" +#include "xfs_trans.h" +#include "xfs_alloc_btree.h" +#include "xfs_bmap_btree.h" +#include "xfs_ialloc_btree.h" +#include "xfs_btree.h" +#include "xfs_inode.h" +#include "xfs_alloc.h" +#include "xfs_error.h" +#include "xfs_discard.h" +#include "xfs_trace.h" + +STATIC int +xfs_trim_extents( + struct xfs_mount *mp, + xfs_agnumber_t agno, + xfs_fsblock_t start, + xfs_fsblock_t len, + xfs_fsblock_t minlen, + __uint64_t *blocks_trimmed) +{ + struct block_device *bdev = mp->m_ddev_targp->bt_bdev; + struct xfs_btree_cur *cur; + struct xfs_buf *agbp; + struct xfs_perag *pag; + int error; + int i; + + pag = xfs_perag_get(mp, agno); + + error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp); + if (error || !agbp) + goto out_put_perag; + + cur = xfs_allocbt_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_CNT); + + /* + * Force out the log. This means any transactions that might have freed + * space before we took the AGF buffer lock are now on disk, and the + * volatile disk cache is flushed. + */ + xfs_log_force(mp, XFS_LOG_SYNC); + + /* + * Look up the longest btree in the AGF and start with it. + */ + error = xfs_alloc_lookup_le(cur, 0, + XFS_BUF_TO_AGF(agbp)->agf_longest, &i); + if (error) + goto out_del_cursor; + + /* + * Loop until we are done with all extents that are large + * enough to be worth discarding. + */ + while (i) { + xfs_agblock_t fbno; + xfs_extlen_t flen; + + error = xfs_alloc_get_rec(cur, &fbno, &flen, &i); + if (error) + goto out_del_cursor; + XFS_WANT_CORRUPTED_GOTO(i == 1, out_del_cursor); + ASSERT(flen <= XFS_BUF_TO_AGF(agbp)->agf_longest); + + /* + * Too small? Give up. + */ + if (flen < minlen) { + trace_xfs_discard_toosmall(mp, agno, fbno, flen); + goto out_del_cursor; + } + + /* + * If the extent is entirely outside of the range we are + * supposed to discard skip it. Do not bother to trim + * down partially overlapping ranges for now. + */ + if (XFS_AGB_TO_FSB(mp, agno, fbno) + flen < start || + XFS_AGB_TO_FSB(mp, agno, fbno) >= start + len) { + trace_xfs_discard_exclude(mp, agno, fbno, flen); + goto next_extent; + } + + /* + * If any blocks in the range are still busy, skip the + * discard and try again the next time. + */ + if (xfs_alloc_busy_search(mp, agno, fbno, flen)) { + trace_xfs_discard_busy(mp, agno, fbno, flen); + goto next_extent; + } + + trace_xfs_discard_extent(mp, agno, fbno, flen); + error = -blkdev_issue_discard(bdev, + XFS_AGB_TO_DADDR(mp, agno, fbno), + XFS_FSB_TO_BB(mp, flen), + GFP_NOFS, 0); + if (error) + goto out_del_cursor; + *blocks_trimmed += flen; + +next_extent: + error = xfs_btree_decrement(cur, 0, &i); + if (error) + goto out_del_cursor; + } + +out_del_cursor: + xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); + xfs_buf_relse(agbp); +out_put_perag: + xfs_perag_put(pag); + return error; +} + +int +xfs_ioc_trim( + struct xfs_mount *mp, + struct fstrim_range __user *urange) +{ + struct request_queue *q = mp->m_ddev_targp->bt_bdev->bd_disk->queue; + unsigned int granularity = q->limits.discard_granularity; + struct fstrim_range range; + xfs_fsblock_t start, len, minlen; + xfs_agnumber_t start_agno, end_agno, agno; + __uint64_t blocks_trimmed = 0; + int error, last_error = 0; + + if (!capable(CAP_SYS_ADMIN)) + return -XFS_ERROR(EPERM); + if (copy_from_user(&range, urange, sizeof(range))) + return -XFS_ERROR(EFAULT); + + /* + * Truncating down the len isn't actually quite correct, but using + * XFS_B_TO_FSB would mean we trivially get overflows for values + * of ULLONG_MAX or slightly lower. And ULLONG_MAX is the default + * used by the fstrim application. In the end it really doesn't + * matter as trimming blocks is an advisory interface. + */ + start = XFS_B_TO_FSBT(mp, range.start); + len = XFS_B_TO_FSBT(mp, range.len); + minlen = XFS_B_TO_FSB(mp, max_t(u64, granularity, range.minlen)); + + start_agno = XFS_FSB_TO_AGNO(mp, start); + if (start_agno >= mp->m_sb.sb_agcount) + return -XFS_ERROR(EINVAL); + + end_agno = XFS_FSB_TO_AGNO(mp, start + len); + if (end_agno >= mp->m_sb.sb_agcount) + end_agno = mp->m_sb.sb_agcount - 1; + + for (agno = start_agno; agno <= end_agno; agno++) { + error = -xfs_trim_extents(mp, agno, start, len, minlen, + &blocks_trimmed); + if (error) + last_error = error; + } + + if (last_error) + return last_error; + + range.len = XFS_FSB_TO_B(mp, blocks_trimmed); + if (copy_to_user(urange, &range, sizeof(range))) + return -XFS_ERROR(EFAULT); + return 0; +} diff --git a/fs/xfs/linux-2.6/xfs_discard.h b/fs/xfs/linux-2.6/xfs_discard.h new file mode 100644 index 000000000000..e82b6dd3e127 --- /dev/null +++ b/fs/xfs/linux-2.6/xfs_discard.h @@ -0,0 +1,8 @@ +#ifndef XFS_DISCARD_H +#define XFS_DISCARD_H 1 + +struct fstrim_range; + +extern int xfs_ioc_trim(struct xfs_mount *, struct fstrim_range __user *); + +#endif /* XFS_DISCARD_H */ diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c index ad442d9e392e..b06ede1d0bed 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl.c +++ b/fs/xfs/linux-2.6/xfs_ioctl.c @@ -39,6 +39,7 @@ #include "xfs_dfrag.h" #include "xfs_fsops.h" #include "xfs_vnodeops.h" +#include "xfs_discard.h" #include "xfs_quota.h" #include "xfs_inode_item.h" #include "xfs_export.h" @@ -1294,6 +1295,8 @@ xfs_file_ioctl( trace_xfs_file_ioctl(ip); switch (cmd) { + case FITRIM: + return xfs_ioc_trim(mp, arg); case XFS_IOC_ALLOCSP: case XFS_IOC_FREESP: case XFS_IOC_RESVSP: diff --git a/fs/xfs/linux-2.6/xfs_trace.h b/fs/xfs/linux-2.6/xfs_trace.h index 647af2a2e7aa..2d0bcb479075 100644 --- a/fs/xfs/linux-2.6/xfs_trace.h +++ b/fs/xfs/linux-2.6/xfs_trace.h @@ -1759,6 +1759,39 @@ DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_recover); DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_cancel); DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_skip); +DECLARE_EVENT_CLASS(xfs_discard_class, + TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, + xfs_agblock_t agbno, xfs_extlen_t len), + TP_ARGS(mp, agno, agbno, len), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_agnumber_t, agno) + __field(xfs_agblock_t, agbno) + __field(xfs_extlen_t, len) + ), + TP_fast_assign( + __entry->dev = mp->m_super->s_dev; + __entry->agno = agno; + __entry->agbno = agbno; + __entry->len = len; + ), + TP_printk("dev %d:%d agno %u agbno %u len %u\n", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->agno, + __entry->agbno, + __entry->len) +) + +#define DEFINE_DISCARD_EVENT(name) \ +DEFINE_EVENT(xfs_discard_class, name, \ + TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \ + xfs_agblock_t agbno, xfs_extlen_t len), \ + TP_ARGS(mp, agno, agbno, len)) +DEFINE_DISCARD_EVENT(xfs_discard_extent); +DEFINE_DISCARD_EVENT(xfs_discard_toosmall); +DEFINE_DISCARD_EVENT(xfs_discard_exclude); +DEFINE_DISCARD_EVENT(xfs_discard_busy); + #endif /* _TRACE_XFS_H */ #undef TRACE_INCLUDE_PATH diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c index fa8723f5870a..f3227984a9bf 100644 --- a/fs/xfs/xfs_alloc.c +++ b/fs/xfs/xfs_alloc.c @@ -41,10 +41,6 @@ #define XFSA_FIXUP_BNO_OK 1 #define XFSA_FIXUP_CNT_OK 2 -static int -xfs_alloc_busy_search(struct xfs_mount *mp, xfs_agnumber_t agno, - xfs_agblock_t bno, xfs_extlen_t len); - /* * Prototypes for per-ag allocation routines */ @@ -94,7 +90,7 @@ xfs_alloc_lookup_ge( * Lookup the first record less than or equal to [bno, len] * in the btree given by cur. */ -STATIC int /* error */ +int /* error */ xfs_alloc_lookup_le( struct xfs_btree_cur *cur, /* btree cursor */ xfs_agblock_t bno, /* starting block of extent */ @@ -127,7 +123,7 @@ xfs_alloc_update( /* * Get the data from the pointed-to record. */ -STATIC int /* error */ +int /* error */ xfs_alloc_get_rec( struct xfs_btree_cur *cur, /* btree cursor */ xfs_agblock_t *bno, /* output: starting block of extent */ @@ -2615,7 +2611,7 @@ restart: * will require a synchronous transaction, but it can still be * used to distinguish between a partial or exact match. */ -static int +int xfs_alloc_busy_search( struct xfs_mount *mp, xfs_agnumber_t agno, diff --git a/fs/xfs/xfs_alloc.h b/fs/xfs/xfs_alloc.h index 895009a97271..0ab56b32c7eb 100644 --- a/fs/xfs/xfs_alloc.h +++ b/fs/xfs/xfs_alloc.h @@ -19,6 +19,7 @@ #define __XFS_ALLOC_H__ struct xfs_buf; +struct xfs_btree_cur; struct xfs_mount; struct xfs_perag; struct xfs_trans; @@ -118,16 +119,16 @@ xfs_alloc_longest_free_extent(struct xfs_mount *mp, struct xfs_perag *pag); #ifdef __KERNEL__ - void -xfs_alloc_busy_insert(xfs_trans_t *tp, - xfs_agnumber_t agno, - xfs_agblock_t bno, - xfs_extlen_t len); +xfs_alloc_busy_insert(struct xfs_trans *tp, xfs_agnumber_t agno, + xfs_agblock_t bno, xfs_extlen_t len); void xfs_alloc_busy_clear(struct xfs_mount *mp, struct xfs_busy_extent *busyp); +int +xfs_alloc_busy_search(struct xfs_mount *mp, xfs_agnumber_t agno, + xfs_agblock_t bno, xfs_extlen_t len); #endif /* __KERNEL__ */ /* @@ -205,4 +206,18 @@ xfs_free_extent( xfs_fsblock_t bno, /* starting block number of extent */ xfs_extlen_t len); /* length of extent */ +int /* error */ +xfs_alloc_lookup_le( + struct xfs_btree_cur *cur, /* btree cursor */ + xfs_agblock_t bno, /* starting block of extent */ + xfs_extlen_t len, /* length of extent */ + int *stat); /* success/failure */ + +int /* error */ +xfs_alloc_get_rec( + struct xfs_btree_cur *cur, /* btree cursor */ + xfs_agblock_t *bno, /* output: starting block of extent */ + xfs_extlen_t *len, /* output: length of extent */ + int *stat); /* output: success/failure */ + #endif /* __XFS_ALLOC_H__ */ -- cgit v1.2.2 From bfc60177f8ab509bc225becbb58f7e53a0e33e81 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 7 Jan 2011 13:02:23 +0000 Subject: xfs: fix error handling for synchronous writes If we get an IO error on a synchronous superblock write, we attach an error release function to it so that when the last reference goes away the release function is called and the buffer is invalidated and unlocked. The buffer is left locked until the release function is called so that other concurrent users of the buffer will be locked out until the buffer error is fully processed. Unfortunately, for the superblock buffer the filesyetm itself holds a reference to the buffer which prevents the reference count from dropping to zero and the release function being called. As a result, once an IO error occurs on a sync write, the buffer will never be unlocked and all future attempts to lock the buffer will hang. To make matters worse, this problems is not unique to such buffers; if there is a concurrent _xfs_buf_find() running, the lookup will grab a reference to the buffer and then wait on the buffer lock, preventing the reference count from ever falling to zero and hence unlocking the buffer. As such, the whole b_relse function implementation is broken because it cannot rely on the buffer reference count falling to zero to unlock the errored buffer. The synchronous write error path is the only path that uses this callback - it is used to ensure that the synchronous waiter gets the buffer error before the error state is cleared from the buffer by the release function. Given that the only sychronous buffer writes now go through xfs_bwrite and the error path in question can only occur for a write of a dirty, logged buffer, we can move most of the b_relse processing to happen inline in xfs_buf_iodone_callbacks, just like a normal I/O completion. In addition to that we make sure the error is not cleared in xfs_buf_iodone_callbacks, so that xfs_bwrite can reliably check it. Given that xfs_bwrite keeps the buffer locked until it has waited for it and checked the error this allows to reliably propagate the error to the caller, and make sure that the buffer is reliably unlocked. Given that xfs_buf_iodone_callbacks was the only instance of the b_relse callback we can remove it entirely. Based on earlier patches by Dave Chinner and Ajeet Yadav. Signed-off-by: Christoph Hellwig Reported-by: Ajeet Yadav Reviewed-by: Dave Chinner Signed-off-by: Alex Elder --- fs/xfs/linux-2.6/xfs_buf.c | 7 +-- fs/xfs/linux-2.6/xfs_buf.h | 7 +-- fs/xfs/xfs_buf_item.c | 151 +++++++++++++++------------------------------ 3 files changed, 51 insertions(+), 114 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index 92f1f2acc6ab..ac1c7e8378dd 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c @@ -896,7 +896,6 @@ xfs_buf_rele( trace_xfs_buf_rele(bp, _RET_IP_); if (!pag) { - ASSERT(!bp->b_relse); ASSERT(list_empty(&bp->b_lru)); ASSERT(RB_EMPTY_NODE(&bp->b_rbnode)); if (atomic_dec_and_test(&bp->b_hold)) @@ -908,11 +907,7 @@ xfs_buf_rele( ASSERT(atomic_read(&bp->b_hold) > 0); if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) { - if (bp->b_relse) { - atomic_inc(&bp->b_hold); - spin_unlock(&pag->pag_buf_lock); - bp->b_relse(bp); - } else if (!(bp->b_flags & XBF_STALE) && + if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) { xfs_buf_lru_add(bp); spin_unlock(&pag->pag_buf_lock); diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h index a76c2428faff..cbe65950e524 100644 --- a/fs/xfs/linux-2.6/xfs_buf.h +++ b/fs/xfs/linux-2.6/xfs_buf.h @@ -152,8 +152,6 @@ typedef struct xfs_buftarg { struct xfs_buf; typedef void (*xfs_buf_iodone_t)(struct xfs_buf *); -typedef void (*xfs_buf_relse_t)(struct xfs_buf *); -typedef int (*xfs_buf_bdstrat_t)(struct xfs_buf *); #define XB_PAGES 2 @@ -183,7 +181,6 @@ typedef struct xfs_buf { void *b_addr; /* virtual address of buffer */ struct work_struct b_iodone_work; xfs_buf_iodone_t b_iodone; /* I/O completion function */ - xfs_buf_relse_t b_relse; /* releasing function */ struct completion b_iowait; /* queue for I/O waiters */ void *b_fspriv; void *b_fspriv2; @@ -323,7 +320,6 @@ void xfs_buf_stale(struct xfs_buf *bp); #define XFS_BUF_FSPRIVATE2(bp, type) ((type)(bp)->b_fspriv2) #define XFS_BUF_SET_FSPRIVATE2(bp, val) ((bp)->b_fspriv2 = (void*)(val)) #define XFS_BUF_SET_START(bp) do { } while (0) -#define XFS_BUF_SET_BRELSE_FUNC(bp, func) ((bp)->b_relse = (func)) #define XFS_BUF_PTR(bp) (xfs_caddr_t)((bp)->b_addr) #define XFS_BUF_SET_PTR(bp, val, cnt) xfs_buf_associate_memory(bp, val, cnt) @@ -360,8 +356,7 @@ xfs_buf_set_ref( static inline void xfs_buf_relse(xfs_buf_t *bp) { - if (!bp->b_relse) - xfs_buf_unlock(bp); + xfs_buf_unlock(bp); xfs_buf_rele(bp); } diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index ed2b65f3f8b9..98c6f73b6752 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c @@ -141,7 +141,6 @@ xfs_buf_item_log_check( #define xfs_buf_item_log_check(x) #endif -STATIC void xfs_buf_error_relse(xfs_buf_t *bp); STATIC void xfs_buf_do_callbacks(struct xfs_buf *bp); /* @@ -959,128 +958,76 @@ xfs_buf_do_callbacks( */ void xfs_buf_iodone_callbacks( - xfs_buf_t *bp) + struct xfs_buf *bp) { - xfs_log_item_t *lip; - static ulong lasttime; - static xfs_buftarg_t *lasttarg; - xfs_mount_t *mp; + struct xfs_log_item *lip = bp->b_fspriv; + struct xfs_mount *mp = lip->li_mountp; + static ulong lasttime; + static xfs_buftarg_t *lasttarg; - ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL); - lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); + if (likely(!XFS_BUF_GETERROR(bp))) + goto do_callbacks; - if (XFS_BUF_GETERROR(bp) != 0) { - /* - * If we've already decided to shutdown the filesystem - * because of IO errors, there's no point in giving this - * a retry. - */ - mp = lip->li_mountp; - if (XFS_FORCED_SHUTDOWN(mp)) { - ASSERT(XFS_BUF_TARGET(bp) == mp->m_ddev_targp); - XFS_BUF_SUPER_STALE(bp); - trace_xfs_buf_item_iodone(bp, _RET_IP_); - xfs_buf_do_callbacks(bp); - XFS_BUF_SET_FSPRIVATE(bp, NULL); - XFS_BUF_CLR_IODONE_FUNC(bp); - xfs_buf_ioend(bp, 0); - return; - } + /* + * If we've already decided to shutdown the filesystem because of + * I/O errors, there's no point in giving this a retry. + */ + if (XFS_FORCED_SHUTDOWN(mp)) { + XFS_BUF_SUPER_STALE(bp); + trace_xfs_buf_item_iodone(bp, _RET_IP_); + goto do_callbacks; + } - if ((XFS_BUF_TARGET(bp) != lasttarg) || - (time_after(jiffies, (lasttime + 5*HZ)))) { - lasttime = jiffies; - cmn_err(CE_ALERT, "Device %s, XFS metadata write error" - " block 0x%llx in %s", - XFS_BUFTARG_NAME(XFS_BUF_TARGET(bp)), - (__uint64_t)XFS_BUF_ADDR(bp), mp->m_fsname); - } - lasttarg = XFS_BUF_TARGET(bp); + if (XFS_BUF_TARGET(bp) != lasttarg || + time_after(jiffies, (lasttime + 5*HZ))) { + lasttime = jiffies; + cmn_err(CE_ALERT, "Device %s, XFS metadata write error" + " block 0x%llx in %s", + XFS_BUFTARG_NAME(XFS_BUF_TARGET(bp)), + (__uint64_t)XFS_BUF_ADDR(bp), mp->m_fsname); + } + lasttarg = XFS_BUF_TARGET(bp); - if (XFS_BUF_ISASYNC(bp)) { - /* - * If the write was asynchronous then noone will be - * looking for the error. Clear the error state - * and write the buffer out again delayed write. - * - * XXXsup This is OK, so long as we catch these - * before we start the umount; we don't want these - * DELWRI metadata bufs to be hanging around. - */ - XFS_BUF_ERROR(bp,0); /* errno of 0 unsets the flag */ - - if (!(XFS_BUF_ISSTALE(bp))) { - XFS_BUF_DELAYWRITE(bp); - XFS_BUF_DONE(bp); - XFS_BUF_SET_START(bp); - } - ASSERT(XFS_BUF_IODONE_FUNC(bp)); - trace_xfs_buf_item_iodone_async(bp, _RET_IP_); - xfs_buf_relse(bp); - } else { - /* - * If the write of the buffer was not asynchronous, - * then we want to make sure to return the error - * to the caller of bwrite(). Because of this we - * cannot clear the B_ERROR state at this point. - * Instead we install a callback function that - * will be called when the buffer is released, and - * that routine will clear the error state and - * set the buffer to be written out again after - * some delay. - */ - /* We actually overwrite the existing b-relse - function at times, but we're gonna be shutting down - anyway. */ - XFS_BUF_SET_BRELSE_FUNC(bp,xfs_buf_error_relse); + /* + * If the write was asynchronous then noone will be looking for the + * error. Clear the error state and write the buffer out again. + * + * During sync or umount we'll write all pending buffers again + * synchronous, which will catch these errors if they keep hanging + * around. + */ + if (XFS_BUF_ISASYNC(bp)) { + XFS_BUF_ERROR(bp, 0); /* errno of 0 unsets the flag */ + + if (!XFS_BUF_ISSTALE(bp)) { + XFS_BUF_DELAYWRITE(bp); XFS_BUF_DONE(bp); - XFS_BUF_FINISH_IOWAIT(bp); + XFS_BUF_SET_START(bp); } + ASSERT(XFS_BUF_IODONE_FUNC(bp)); + trace_xfs_buf_item_iodone_async(bp, _RET_IP_); + xfs_buf_relse(bp); return; } - xfs_buf_do_callbacks(bp); - XFS_BUF_SET_FSPRIVATE(bp, NULL); - XFS_BUF_CLR_IODONE_FUNC(bp); - xfs_buf_ioend(bp, 0); -} - -/* - * This is a callback routine attached to a buffer which gets an error - * when being written out synchronously. - */ -STATIC void -xfs_buf_error_relse( - xfs_buf_t *bp) -{ - xfs_log_item_t *lip; - xfs_mount_t *mp; - - lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); - mp = (xfs_mount_t *)lip->li_mountp; - ASSERT(XFS_BUF_TARGET(bp) == mp->m_ddev_targp); - + /* + * If the write of the buffer was synchronous, we want to make + * sure to return the error to the caller of xfs_bwrite(). + */ XFS_BUF_STALE(bp); XFS_BUF_DONE(bp); XFS_BUF_UNDELAYWRITE(bp); - XFS_BUF_ERROR(bp,0); trace_xfs_buf_error_relse(bp, _RET_IP_); + xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR); - if (! XFS_FORCED_SHUTDOWN(mp)) - xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR); - /* - * We have to unpin the pinned buffers so do the - * callbacks. - */ +do_callbacks: xfs_buf_do_callbacks(bp); XFS_BUF_SET_FSPRIVATE(bp, NULL); XFS_BUF_CLR_IODONE_FUNC(bp); - XFS_BUF_SET_BRELSE_FUNC(bp,NULL); - xfs_buf_relse(bp); + xfs_buf_ioend(bp, 0); } - /* * This is the iodone() function for buffers which have been * logged. It is called when they are eventually flushed out. -- cgit v1.2.2 From 1884bd8354c9aec4ca501dc4773c13ad2a09af7b Mon Sep 17 00:00:00 2001 From: Jesper Juhl Date: Sat, 25 Dec 2010 20:14:53 +0000 Subject: xfs: fix an assignment within an ASSERT() In fs/xfs/xfs_trans.c::xfs_trans_unreserve_and_mod_sb() at the out: label we have this: ASSERT(error = 0); I believe a comparison was intended, not an assignment. If I'm right, the patch below fixes that up. Signed-off-by: Jesper Juhl Signed-off-by: Alex Elder --- fs/xfs/xfs_trans.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index f80a067a4658..33dbc4e0ad62 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c @@ -1137,7 +1137,7 @@ out_undo_fdblocks: if (blkdelta) xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, -blkdelta, rsvd); out: - ASSERT(error = 0); + ASSERT(error == 0); return; } -- cgit v1.2.2 From 65a84a0f7567ea244e5246e642920260cfc2744a Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Fri, 7 Jan 2011 03:30:41 +0000 Subject: xfs: Add log level to assertion printk I received a ppc64 bug report involving xfs but the assertion was filtered out by the console log level. Use KERN_CRIT to ensure it makes it out. Signed-off-by: Anton Blanchard Reviewed-by: Christoph Hellwig Signed-off-by: Alex Elder --- fs/xfs/support/debug.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/xfs/support/debug.c b/fs/xfs/support/debug.c index 975aa10e1a47..86162e5f9a21 100644 --- a/fs/xfs/support/debug.c +++ b/fs/xfs/support/debug.c @@ -104,7 +104,8 @@ xfs_fs_vcmn_err( void assfail(char *expr, char *file, int line) { - printk("Assertion failed: %s, file: %s, line: %d\n", expr, file, line); + printk(KERN_CRIT "Assertion failed: %s, file: %s, line: %d\n", expr, + file, line); BUG(); } -- cgit v1.2.2 From 73efe4a4ddf8eb2b1cc7039e8a66a23a424961af Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Wed, 12 Jan 2011 00:35:42 +0000 Subject: xfs: prevent NMI timeouts in cmn_err We currently have a global error message buffer in cmn_err that is protected by a spin lock that disables interrupts. Recently there have been reports of NMI timeouts occurring when the console is being flooded by SCSI error reports due to cmn_err() getting stuck trying to print to the console while holding this lock (i.e. with interrupts disabled). The NMI watchdog is seeing this CPU as non-responding and so is triggering a panic. While the trigger for the reported case is SCSI errors, pretty much anything that spams the kernel log could cause this to occur. Realistically the only reason that we have the intemediate message buffer is to prepend the correct kernel log level prefix to the log message. The only reason we have the lock is to protect the global message buffer and the only reason the message buffer is global is to keep it off the stack. Hence if we can avoid needing a global message buffer we avoid needing the lock, and we can do this with a small amount of cleanup and some preprocessor tricks: 1. clean up xfs_cmn_err() panic mask functionality to avoid needing debug code in xfs_cmn_err() 2. remove the couple of "!" message prefixes that still exist that the existing cmn_err() code steps over. 3. redefine CE_* levels directly to KERN_* 4. redefine cmn_err() and friends to use printk() directly via variable argument length macros. By doing this, we can completely remove the cmn_err() code and the lock that is causing the problems, and rely solely on printk() serialisation to ensure that we don't get garbled messages. A series of followup patches is really needed to clean up all the cmn_err() calls and related messages properly, but that results in a series that is not easily back portable to enterprise kernels. Hence this initial fix is only to address the direct problem in the lowest impact way possible. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig Signed-off-by: Alex Elder --- fs/xfs/linux-2.6/xfs_sysctl.c | 23 ++++++++- fs/xfs/support/debug.c | 109 +++++++++++++++++++----------------------- fs/xfs/support/debug.h | 25 ++++++---- fs/xfs/xfs_error.c | 31 ------------ fs/xfs/xfs_error.h | 18 +++---- fs/xfs/xfs_log.c | 2 +- fs/xfs/xfs_log_recover.c | 2 +- 7 files changed, 96 insertions(+), 114 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_sysctl.c b/fs/xfs/linux-2.6/xfs_sysctl.c index 7bb5092d6ae4..ee3cee097e7e 100644 --- a/fs/xfs/linux-2.6/xfs_sysctl.c +++ b/fs/xfs/linux-2.6/xfs_sysctl.c @@ -18,6 +18,7 @@ #include "xfs.h" #include #include +#include "xfs_error.h" static struct ctl_table_header *xfs_table_header; @@ -51,6 +52,26 @@ xfs_stats_clear_proc_handler( return ret; } + +STATIC int +xfs_panic_mask_proc_handler( + ctl_table *ctl, + int write, + void __user *buffer, + size_t *lenp, + loff_t *ppos) +{ + int ret, *valp = ctl->data; + + ret = proc_dointvec_minmax(ctl, write, buffer, lenp, ppos); + if (!ret && write) { + xfs_panic_mask = *valp; +#ifdef DEBUG + xfs_panic_mask |= (XFS_PTAG_SHUTDOWN_CORRUPT | XFS_PTAG_LOGRES); +#endif + } + return ret; +} #endif /* CONFIG_PROC_FS */ static ctl_table xfs_table[] = { @@ -77,7 +98,7 @@ static ctl_table xfs_table[] = { .data = &xfs_params.panic_mask.val, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = proc_dointvec_minmax, + .proc_handler = xfs_panic_mask_proc_handler, .extra1 = &xfs_params.panic_mask.min, .extra2 = &xfs_params.panic_mask.max }, diff --git a/fs/xfs/support/debug.c b/fs/xfs/support/debug.c index 86162e5f9a21..e6cf955ec0fc 100644 --- a/fs/xfs/support/debug.c +++ b/fs/xfs/support/debug.c @@ -25,80 +25,71 @@ #include "xfs_mount.h" #include "xfs_error.h" -static char message[1024]; /* keep it off the stack */ -static DEFINE_SPINLOCK(xfs_err_lock); - -/* Translate from CE_FOO to KERN_FOO, err_level(CE_FOO) == KERN_FOO */ -#define XFS_MAX_ERR_LEVEL 7 -#define XFS_ERR_MASK ((1 << 3) - 1) -static const char * const err_level[XFS_MAX_ERR_LEVEL+1] = - {KERN_EMERG, KERN_ALERT, KERN_CRIT, - KERN_ERR, KERN_WARNING, KERN_NOTICE, - KERN_INFO, KERN_DEBUG}; - void -cmn_err(register int level, char *fmt, ...) +cmn_err( + const char *lvl, + const char *fmt, + ...) { - char *fp = fmt; - int len; - ulong flags; - va_list ap; - - level &= XFS_ERR_MASK; - if (level > XFS_MAX_ERR_LEVEL) - level = XFS_MAX_ERR_LEVEL; - spin_lock_irqsave(&xfs_err_lock,flags); - va_start(ap, fmt); - if (*fmt == '!') fp++; - len = vsnprintf(message, sizeof(message), fp, ap); - if (len >= sizeof(message)) - len = sizeof(message) - 1; - if (message[len-1] == '\n') - message[len-1] = 0; - printk("%s%s\n", err_level[level], message); - va_end(ap); - spin_unlock_irqrestore(&xfs_err_lock,flags); - BUG_ON(level == CE_PANIC); + struct va_format vaf; + va_list args; + + va_start(args, fmt); + vaf.fmt = fmt; + vaf.va = &args; + + printk("%s%pV", lvl, &vaf); + va_end(args); + + BUG_ON(strncmp(lvl, KERN_EMERG, strlen(KERN_EMERG)) == 0); } void -xfs_fs_vcmn_err( - int level, +xfs_fs_cmn_err( + const char *lvl, struct xfs_mount *mp, - char *fmt, - va_list ap) + const char *fmt, + ...) { - unsigned long flags; - int len = 0; + struct va_format vaf; + va_list args; - level &= XFS_ERR_MASK; - if (level > XFS_MAX_ERR_LEVEL) - level = XFS_MAX_ERR_LEVEL; + va_start(args, fmt); + vaf.fmt = fmt; + vaf.va = &args; - spin_lock_irqsave(&xfs_err_lock,flags); + printk("%sFilesystem %s: %pV", lvl, mp->m_fsname, &vaf); + va_end(args); - if (mp) { - len = sprintf(message, "Filesystem \"%s\": ", mp->m_fsname); + BUG_ON(strncmp(lvl, KERN_EMERG, strlen(KERN_EMERG)) == 0); +} + +/* All callers to xfs_cmn_err use CE_ALERT, so don't bother testing lvl */ +void +xfs_cmn_err( + int panic_tag, + const char *lvl, + struct xfs_mount *mp, + const char *fmt, + ...) +{ + struct va_format vaf; + va_list args; + int panic = 0; - /* - * Skip the printk if we can't print anything useful - * due to an over-long device name. - */ - if (len >= sizeof(message)) - goto out; + if (xfs_panic_mask && (xfs_panic_mask & panic_tag)) { + printk(KERN_ALERT "XFS: Transforming an alert into a BUG."); + panic = 1; } - len = vsnprintf(message + len, sizeof(message) - len, fmt, ap); - if (len >= sizeof(message)) - len = sizeof(message) - 1; - if (message[len-1] == '\n') - message[len-1] = 0; + va_start(args, fmt); + vaf.fmt = fmt; + vaf.va = &args; - printk("%s%s\n", err_level[level], message); - out: - spin_unlock_irqrestore(&xfs_err_lock,flags); + printk(KERN_ALERT "Filesystem %s: %pV", mp->m_fsname, &vaf); + va_end(args); - BUG_ON(level == CE_PANIC); + BUG_ON(panic); } void diff --git a/fs/xfs/support/debug.h b/fs/xfs/support/debug.h index d2d20462fd4f..05699f67d475 100644 --- a/fs/xfs/support/debug.h +++ b/fs/xfs/support/debug.h @@ -20,15 +20,22 @@ #include -#define CE_DEBUG 7 /* debug */ -#define CE_CONT 6 /* continuation */ -#define CE_NOTE 5 /* notice */ -#define CE_WARN 4 /* warning */ -#define CE_ALERT 1 /* alert */ -#define CE_PANIC 0 /* panic */ - -extern void cmn_err(int, char *, ...) - __attribute__ ((format (printf, 2, 3))); +struct xfs_mount; + +#define CE_DEBUG KERN_DEBUG +#define CE_CONT KERN_INFO +#define CE_NOTE KERN_NOTICE +#define CE_WARN KERN_WARNING +#define CE_ALERT KERN_ALERT +#define CE_PANIC KERN_EMERG + +void cmn_err(const char *lvl, const char *fmt, ...) + __attribute__ ((format (printf, 2, 3))); +void xfs_fs_cmn_err( const char *lvl, struct xfs_mount *mp, + const char *fmt, ...) __attribute__ ((format (printf, 3, 4))); +void xfs_cmn_err( int panic_tag, const char *lvl, struct xfs_mount *mp, + const char *fmt, ...) __attribute__ ((format (printf, 4, 5))); + extern void assfail(char *expr, char *f, int l); #define ASSERT_ALWAYS(expr) \ diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c index c78cc6a3d87c..4c7db74a05f7 100644 --- a/fs/xfs/xfs_error.c +++ b/fs/xfs/xfs_error.c @@ -152,37 +152,6 @@ xfs_errortag_clearall(xfs_mount_t *mp, int loud) } #endif /* DEBUG */ - -void -xfs_fs_cmn_err(int level, xfs_mount_t *mp, char *fmt, ...) -{ - va_list ap; - - va_start(ap, fmt); - xfs_fs_vcmn_err(level, mp, fmt, ap); - va_end(ap); -} - -void -xfs_cmn_err(int panic_tag, int level, xfs_mount_t *mp, char *fmt, ...) -{ - va_list ap; - -#ifdef DEBUG - xfs_panic_mask |= (XFS_PTAG_SHUTDOWN_CORRUPT | XFS_PTAG_LOGRES); -#endif - - if (xfs_panic_mask && (xfs_panic_mask & panic_tag) - && (level & CE_ALERT)) { - level &= ~CE_ALERT; - level |= CE_PANIC; - cmn_err(CE_ALERT, "XFS: Transforming an alert into a BUG."); - } - va_start(ap, fmt); - xfs_fs_vcmn_err(level, mp, fmt, ap); - va_end(ap); -} - void xfs_error_report( const char *tag, diff --git a/fs/xfs/xfs_error.h b/fs/xfs/xfs_error.h index f338847f80b8..10dce5475f02 100644 --- a/fs/xfs/xfs_error.h +++ b/fs/xfs/xfs_error.h @@ -136,8 +136,8 @@ extern int xfs_error_test(int, int *, char *, int, char *, unsigned long); xfs_error_test((tag), (mp)->m_fixedfsid, "expr", __LINE__, __FILE__, \ (rf)))) -extern int xfs_errortag_add(int error_tag, xfs_mount_t *mp); -extern int xfs_errortag_clearall(xfs_mount_t *mp, int loud); +extern int xfs_errortag_add(int error_tag, struct xfs_mount *mp); +extern int xfs_errortag_clearall(struct xfs_mount *mp, int loud); #else #define XFS_TEST_ERROR(expr, mp, tag, rf) (expr) #define xfs_errortag_add(tag, mp) (ENOSYS) @@ -162,21 +162,15 @@ extern int xfs_errortag_clearall(xfs_mount_t *mp, int loud); struct xfs_mount; -extern void xfs_fs_vcmn_err(int level, struct xfs_mount *mp, - char *fmt, va_list ap) - __attribute__ ((format (printf, 3, 0))); -extern void xfs_cmn_err(int panic_tag, int level, struct xfs_mount *mp, - char *fmt, ...) - __attribute__ ((format (printf, 4, 5))); -extern void xfs_fs_cmn_err(int level, struct xfs_mount *mp, char *fmt, ...) - __attribute__ ((format (printf, 3, 4))); - extern void xfs_hex_dump(void *p, int length); #define xfs_fs_repair_cmn_err(level, mp, fmt, args...) \ xfs_fs_cmn_err(level, mp, fmt " Unmount and run xfs_repair.", ## args) #define xfs_fs_mount_cmn_err(f, fmt, args...) \ - ((f & XFS_MFSI_QUIET)? (void)0 : cmn_err(CE_WARN, "XFS: " fmt, ## args)) + do { \ + if (!(f & XFS_MFSI_QUIET)) \ + cmn_err(CE_WARN, "XFS: " fmt, ## args); \ + } while (0) #endif /* __XFS_ERROR_H__ */ diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 0bf24b11d0c4..ae6fef1ff563 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -377,7 +377,7 @@ xfs_log_mount( cmn_err(CE_NOTE, "XFS mounting filesystem %s", mp->m_fsname); else { cmn_err(CE_NOTE, - "!Mounting filesystem \"%s\" in no-recovery mode. Filesystem will be inconsistent.", + "Mounting filesystem \"%s\" in no-recovery mode. Filesystem will be inconsistent.", mp->m_fsname); ASSERT(mp->m_flags & XFS_MOUNT_RDONLY); } diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 204d8e5fa7fa..aa0ebb776903 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -3800,7 +3800,7 @@ xlog_recover_finish( log->l_flags &= ~XLOG_RECOVERY_NEEDED; } else { cmn_err(CE_DEBUG, - "!Ending clean XFS mount for filesystem: %s\n", + "Ending clean XFS mount for filesystem: %s\n", log->l_mp->m_fsname); } return 0; -- cgit v1.2.2 From 2818ef50c4dc103ce52e12d14ce2dfbde5268120 Mon Sep 17 00:00:00 2001 From: Anton Altaparmakov Date: Wed, 12 Jan 2011 10:34:35 +0000 Subject: NTFS: writev() fix and maintenance/contact details update Fix writev() to not keep writing the first segment over and over again instead of moving onto subsequent segments and update the NTFS entry in MAINTAINERS to reflect that Tuxera Inc. now supports the NTFS driver. Signed-off-by: Anton Altaparmakov Signed-off-by: Linus Torvalds --- fs/ntfs/Makefile | 2 +- fs/ntfs/file.c | 35 +++++++++++++++++------------------ fs/ntfs/super.c | 6 +++--- 3 files changed, 21 insertions(+), 22 deletions(-) (limited to 'fs') diff --git a/fs/ntfs/Makefile b/fs/ntfs/Makefile index 58b6be992544..4ff028fcfd6e 100644 --- a/fs/ntfs/Makefile +++ b/fs/ntfs/Makefile @@ -6,7 +6,7 @@ ntfs-objs := aops.o attrib.o collate.o compress.o debug.o dir.o file.o \ index.o inode.o mft.o mst.o namei.o runlist.o super.o sysctl.o \ unistr.o upcase.o -EXTRA_CFLAGS = -DNTFS_VERSION=\"2.1.29\" +EXTRA_CFLAGS = -DNTFS_VERSION=\"2.1.30\" ifeq ($(CONFIG_NTFS_DEBUG),y) EXTRA_CFLAGS += -DDEBUG diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c index 113ebd9f25a4..f4b1057abdd2 100644 --- a/fs/ntfs/file.c +++ b/fs/ntfs/file.c @@ -1,7 +1,7 @@ /* * file.c - NTFS kernel file operations. Part of the Linux-NTFS project. * - * Copyright (c) 2001-2007 Anton Altaparmakov + * Copyright (c) 2001-2011 Anton Altaparmakov and Tuxera Inc. * * This program/include file is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as published @@ -1380,15 +1380,14 @@ static inline void ntfs_set_next_iovec(const struct iovec **iovp, * pages (out to offset + bytes), to emulate ntfs_copy_from_user()'s * single-segment behaviour. * - * We call the same helper (__ntfs_copy_from_user_iovec_inatomic()) both - * when atomic and when not atomic. This is ok because - * __ntfs_copy_from_user_iovec_inatomic() calls __copy_from_user_inatomic() - * and it is ok to call this when non-atomic. - * Infact, the only difference between __copy_from_user_inatomic() and + * We call the same helper (__ntfs_copy_from_user_iovec_inatomic()) both when + * atomic and when not atomic. This is ok because it calls + * __copy_from_user_inatomic() and it is ok to call this when non-atomic. In + * fact, the only difference between __copy_from_user_inatomic() and * __copy_from_user() is that the latter calls might_sleep() and the former - * should not zero the tail of the buffer on error. And on many - * architectures __copy_from_user_inatomic() is just defined to - * __copy_from_user() so it makes no difference at all on those architectures. + * should not zero the tail of the buffer on error. And on many architectures + * __copy_from_user_inatomic() is just defined to __copy_from_user() so it + * makes no difference at all on those architectures. */ static inline size_t ntfs_copy_from_user_iovec(struct page **pages, unsigned nr_pages, unsigned ofs, const struct iovec **iov, @@ -1409,28 +1408,28 @@ static inline size_t ntfs_copy_from_user_iovec(struct page **pages, if (unlikely(copied != len)) { /* Do it the slow way. */ addr = kmap(*pages); - copied = __ntfs_copy_from_user_iovec_inatomic(addr + ofs, - *iov, *iov_ofs, len); - /* - * Zero the rest of the target like __copy_from_user(). - */ - memset(addr + ofs + copied, 0, len - copied); - kunmap(*pages); + copied = __ntfs_copy_from_user_iovec_inatomic(addr + + ofs, *iov, *iov_ofs, len); if (unlikely(copied != len)) goto err_out; + kunmap(*pages); } total += len; + ntfs_set_next_iovec(iov, iov_ofs, len); bytes -= len; if (!bytes) break; - ntfs_set_next_iovec(iov, iov_ofs, len); ofs = 0; } while (++pages < last_page); out: return total; err_out: - total += copied; + BUG_ON(copied > len); /* Zero the rest of the target like __copy_from_user(). */ + memset(addr + ofs + copied, 0, len - copied); + kunmap(*pages); + total += copied; + ntfs_set_next_iovec(iov, iov_ofs, copied); while (++pages < last_page) { bytes -= len; if (!bytes) diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c index a30ecacc01f2..29099a07b9fe 100644 --- a/fs/ntfs/super.c +++ b/fs/ntfs/super.c @@ -1,7 +1,7 @@ /* * super.c - NTFS kernel super block handling. Part of the Linux-NTFS project. * - * Copyright (c) 2001-2007 Anton Altaparmakov + * Copyright (c) 2001-2011 Anton Altaparmakov and Tuxera Inc. * Copyright (c) 2001,2002 Richard Russon * * This program/include file is free software; you can redistribute it and/or @@ -3193,8 +3193,8 @@ static void __exit exit_ntfs_fs(void) ntfs_sysctl(0); } -MODULE_AUTHOR("Anton Altaparmakov "); -MODULE_DESCRIPTION("NTFS 1.2/3.x driver - Copyright (c) 2001-2007 Anton Altaparmakov"); +MODULE_AUTHOR("Anton Altaparmakov "); +MODULE_DESCRIPTION("NTFS 1.2/3.x driver - Copyright (c) 2001-2011 Anton Altaparmakov and Tuxera Inc."); MODULE_VERSION(NTFS_VERSION); MODULE_LICENSE("GPL"); #ifdef DEBUG -- cgit v1.2.2 From f00c9e44ad1a9660fe8cd3ca15b6cd9497172eab Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 15 Sep 2010 17:38:58 +0200 Subject: quota: Fix deadlock during path resolution As Al Viro pointed out path resolution during Q_QUOTAON calls to quotactl is prone to deadlocks. We hold s_umount semaphore for reading during the path resolution and resolution itself may need to acquire the semaphore for writing when e. g. autofs mountpoint is passed. Solve the problem by performing the resolution before we get hold of the superblock (and thus s_umount semaphore). The whole thing is complicated by the fact that some filesystems (OCFS2) ignore the path argument. So to distinguish between filesystem which want the path and which do not we introduce new .quota_on_meta callback which does not get the path. OCFS2 then uses this callback instead of old .quota_on. CC: Al Viro CC: Christoph Hellwig CC: Ted Ts'o CC: Joel Becker Signed-off-by: Jan Kara --- fs/ext3/super.c | 25 +++++++------------------ fs/ext4/super.c | 25 +++++++------------------ fs/ocfs2/super.c | 5 ++--- fs/quota/dquot.c | 18 ++---------------- fs/quota/quota.c | 41 +++++++++++++++++++++++++++-------------- fs/reiserfs/super.c | 17 ++++++----------- 6 files changed, 51 insertions(+), 80 deletions(-) (limited to 'fs') diff --git a/fs/ext3/super.c b/fs/ext3/super.c index b7d0554631e4..0e0d391626be 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c @@ -755,7 +755,7 @@ static int ext3_release_dquot(struct dquot *dquot); static int ext3_mark_dquot_dirty(struct dquot *dquot); static int ext3_write_info(struct super_block *sb, int type); static int ext3_quota_on(struct super_block *sb, int type, int format_id, - char *path); + struct path *path); static int ext3_quota_on_mount(struct super_block *sb, int type); static ssize_t ext3_quota_read(struct super_block *sb, int type, char *data, size_t len, loff_t off); @@ -2885,27 +2885,20 @@ static int ext3_quota_on_mount(struct super_block *sb, int type) * Standard function to be called on quota_on */ static int ext3_quota_on(struct super_block *sb, int type, int format_id, - char *name) + struct path *path) { int err; - struct path path; if (!test_opt(sb, QUOTA)) return -EINVAL; - err = kern_path(name, LOOKUP_FOLLOW, &path); - if (err) - return err; - /* Quotafile not on the same filesystem? */ - if (path.mnt->mnt_sb != sb) { - path_put(&path); + if (path->mnt->mnt_sb != sb) return -EXDEV; - } /* Journaling quota? */ if (EXT3_SB(sb)->s_qf_names[type]) { /* Quotafile not of fs root? */ - if (path.dentry->d_parent != sb->s_root) + if (path->dentry->d_parent != sb->s_root) ext3_msg(sb, KERN_WARNING, "warning: Quota file not on filesystem root. " "Journaled quota will not work."); @@ -2915,7 +2908,7 @@ static int ext3_quota_on(struct super_block *sb, int type, int format_id, * When we journal data on quota file, we have to flush journal to see * all updates to the file when we bypass pagecache... */ - if (ext3_should_journal_data(path.dentry->d_inode)) { + if (ext3_should_journal_data(path->dentry->d_inode)) { /* * We don't need to lock updates but journal_flush() could * otherwise be livelocked... @@ -2923,15 +2916,11 @@ static int ext3_quota_on(struct super_block *sb, int type, int format_id, journal_lock_updates(EXT3_SB(sb)->s_journal); err = journal_flush(EXT3_SB(sb)->s_journal); journal_unlock_updates(EXT3_SB(sb)->s_journal); - if (err) { - path_put(&path); + if (err) return err; - } } - err = dquot_quota_on_path(sb, type, format_id, &path); - path_put(&path); - return err; + return dquot_quota_on(sb, type, format_id, path); } /* Read data from quotafile - avoid pagecache and such because we cannot afford diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 29c80f6d8b27..0f10ccd6bfc0 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1162,7 +1162,7 @@ static int ext4_release_dquot(struct dquot *dquot); static int ext4_mark_dquot_dirty(struct dquot *dquot); static int ext4_write_info(struct super_block *sb, int type); static int ext4_quota_on(struct super_block *sb, int type, int format_id, - char *path); + struct path *path); static int ext4_quota_off(struct super_block *sb, int type); static int ext4_quota_on_mount(struct super_block *sb, int type); static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data, @@ -4566,27 +4566,20 @@ static int ext4_quota_on_mount(struct super_block *sb, int type) * Standard function to be called on quota_on */ static int ext4_quota_on(struct super_block *sb, int type, int format_id, - char *name) + struct path *path) { int err; - struct path path; if (!test_opt(sb, QUOTA)) return -EINVAL; - err = kern_path(name, LOOKUP_FOLLOW, &path); - if (err) - return err; - /* Quotafile not on the same filesystem? */ - if (path.mnt->mnt_sb != sb) { - path_put(&path); + if (path->mnt->mnt_sb != sb) return -EXDEV; - } /* Journaling quota? */ if (EXT4_SB(sb)->s_qf_names[type]) { /* Quotafile not in fs root? */ - if (path.dentry->d_parent != sb->s_root) + if (path->dentry->d_parent != sb->s_root) ext4_msg(sb, KERN_WARNING, "Quota file not on filesystem root. " "Journaled quota will not work"); @@ -4597,7 +4590,7 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id, * all updates to the file when we bypass pagecache... */ if (EXT4_SB(sb)->s_journal && - ext4_should_journal_data(path.dentry->d_inode)) { + ext4_should_journal_data(path->dentry->d_inode)) { /* * We don't need to lock updates but journal_flush() could * otherwise be livelocked... @@ -4605,15 +4598,11 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id, jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal); err = jbd2_journal_flush(EXT4_SB(sb)->s_journal); jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal); - if (err) { - path_put(&path); + if (err) return err; - } } - err = dquot_quota_on_path(sb, type, format_id, &path); - path_put(&path); - return err; + return dquot_quota_on(sb, type, format_id, path); } static int ext4_quota_off(struct super_block *sb, int type) diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 17ff46fa8a10..31c3ffd2f8d0 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -993,8 +993,7 @@ static void ocfs2_disable_quotas(struct ocfs2_super *osb) } /* Handle quota on quotactl */ -static int ocfs2_quota_on(struct super_block *sb, int type, int format_id, - char *path) +static int ocfs2_quota_on(struct super_block *sb, int type, int format_id) { unsigned int feature[MAXQUOTAS] = { OCFS2_FEATURE_RO_COMPAT_USRQUOTA, OCFS2_FEATURE_RO_COMPAT_GRPQUOTA}; @@ -1013,7 +1012,7 @@ static int ocfs2_quota_off(struct super_block *sb, int type) } static const struct quotactl_ops ocfs2_quotactl_ops = { - .quota_on = ocfs2_quota_on, + .quota_on_meta = ocfs2_quota_on, .quota_off = ocfs2_quota_off, .quota_sync = dquot_quota_sync, .get_info = dquot_get_dqinfo, diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 84becd3e4772..a2a622e079f0 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -2189,8 +2189,8 @@ int dquot_resume(struct super_block *sb, int type) } EXPORT_SYMBOL(dquot_resume); -int dquot_quota_on_path(struct super_block *sb, int type, int format_id, - struct path *path) +int dquot_quota_on(struct super_block *sb, int type, int format_id, + struct path *path) { int error = security_quota_on(path->dentry); if (error) @@ -2204,20 +2204,6 @@ int dquot_quota_on_path(struct super_block *sb, int type, int format_id, DQUOT_LIMITS_ENABLED); return error; } -EXPORT_SYMBOL(dquot_quota_on_path); - -int dquot_quota_on(struct super_block *sb, int type, int format_id, char *name) -{ - struct path path; - int error; - - error = kern_path(name, LOOKUP_FOLLOW, &path); - if (!error) { - error = dquot_quota_on_path(sb, type, format_id, &path); - path_put(&path); - } - return error; -} EXPORT_SYMBOL(dquot_quota_on); /* diff --git a/fs/quota/quota.c b/fs/quota/quota.c index b299961e1edb..b34bdb25490c 100644 --- a/fs/quota/quota.c +++ b/fs/quota/quota.c @@ -64,18 +64,15 @@ static int quota_sync_all(int type) } static int quota_quotaon(struct super_block *sb, int type, int cmd, qid_t id, - void __user *addr) + struct path *path) { - char *pathname; - int ret = -ENOSYS; - - pathname = getname(addr); - if (IS_ERR(pathname)) - return PTR_ERR(pathname); - if (sb->s_qcop->quota_on) - ret = sb->s_qcop->quota_on(sb, type, id, pathname); - putname(pathname); - return ret; + if (!sb->s_qcop->quota_on && !sb->s_qcop->quota_on_meta) + return -ENOSYS; + if (sb->s_qcop->quota_on_meta) + return sb->s_qcop->quota_on_meta(sb, type, id); + if (IS_ERR(path)) + return PTR_ERR(path); + return sb->s_qcop->quota_on(sb, type, id, path); } static int quota_getfmt(struct super_block *sb, int type, void __user *addr) @@ -241,7 +238,7 @@ static int quota_getxquota(struct super_block *sb, int type, qid_t id, /* Copy parameters and call proper function */ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, - void __user *addr) + void __user *addr, struct path *path) { int ret; @@ -256,7 +253,7 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, switch (cmd) { case Q_QUOTAON: - return quota_quotaon(sb, type, cmd, id, addr); + return quota_quotaon(sb, type, cmd, id, path); case Q_QUOTAOFF: if (!sb->s_qcop->quota_off) return -ENOSYS; @@ -335,6 +332,7 @@ SYSCALL_DEFINE4(quotactl, unsigned int, cmd, const char __user *, special, { uint cmds, type; struct super_block *sb = NULL; + struct path path, *pathp = NULL; int ret; cmds = cmd >> SUBCMDSHIFT; @@ -351,12 +349,27 @@ SYSCALL_DEFINE4(quotactl, unsigned int, cmd, const char __user *, special, return -ENODEV; } + /* + * Path for quotaon has to be resolved before grabbing superblock + * because that gets s_umount sem which is also possibly needed by path + * resolution (think about autofs) and thus deadlocks could arise. + */ + if (cmds == Q_QUOTAON) { + ret = user_path_at(AT_FDCWD, addr, LOOKUP_FOLLOW, &path); + if (ret) + pathp = ERR_PTR(ret); + else + pathp = &path; + } + sb = quotactl_block(special); if (IS_ERR(sb)) return PTR_ERR(sb); - ret = do_quotactl(sb, type, cmds, id, addr); + ret = do_quotactl(sb, type, cmds, id, addr, pathp); drop_super(sb); + if (pathp && !IS_ERR(pathp)) + path_put(pathp); return ret; } diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index 2575682a9ead..0aab04f46827 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c @@ -632,7 +632,7 @@ static int reiserfs_acquire_dquot(struct dquot *); static int reiserfs_release_dquot(struct dquot *); static int reiserfs_mark_dquot_dirty(struct dquot *); static int reiserfs_write_info(struct super_block *, int); -static int reiserfs_quota_on(struct super_block *, int, int, char *); +static int reiserfs_quota_on(struct super_block *, int, int, struct path *); static const struct dquot_operations reiserfs_quota_operations = { .write_dquot = reiserfs_write_dquot, @@ -2048,25 +2048,21 @@ static int reiserfs_quota_on_mount(struct super_block *sb, int type) * Standard function to be called on quota_on */ static int reiserfs_quota_on(struct super_block *sb, int type, int format_id, - char *name) + struct path *path) { int err; - struct path path; struct inode *inode; struct reiserfs_transaction_handle th; if (!(REISERFS_SB(sb)->s_mount_opt & (1 << REISERFS_QUOTA))) return -EINVAL; - err = kern_path(name, LOOKUP_FOLLOW, &path); - if (err) - return err; /* Quotafile not on the same filesystem? */ - if (path.mnt->mnt_sb != sb) { + if (path->mnt->mnt_sb != sb) { err = -EXDEV; goto out; } - inode = path.dentry->d_inode; + inode = path->dentry->d_inode; /* We must not pack tails for quota files on reiserfs for quota IO to work */ if (!(REISERFS_I(inode)->i_flags & i_nopack_mask)) { err = reiserfs_unpack(inode, NULL); @@ -2082,7 +2078,7 @@ static int reiserfs_quota_on(struct super_block *sb, int type, int format_id, /* Journaling quota? */ if (REISERFS_SB(sb)->s_qf_names[type]) { /* Quotafile not of fs root? */ - if (path.dentry->d_parent != sb->s_root) + if (path->dentry->d_parent != sb->s_root) reiserfs_warning(sb, "super-6521", "Quota file not on filesystem root. " "Journalled quota will not work."); @@ -2101,9 +2097,8 @@ static int reiserfs_quota_on(struct super_block *sb, int type, int format_id, if (err) goto out; } - err = dquot_quota_on_path(sb, type, format_id, &path); + err = dquot_quota_on(sb, type, format_id, path); out: - path_put(&path); return err; } -- cgit v1.2.2 From 6c0f3af72cb1622a66962a1180c36ef8c41be8e2 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 16 Nov 2010 11:14:34 -0800 Subject: ceph: add dir_layout to inode Add a ceph_dir_layout to the inode, and calculate dentry hash values based on the parent directory's specified dir_hash function. This is needed because the old default Linux dcache hash function is extremely week and leads to a poor distribution of files among dir fragments. Signed-off-by: Sage Weil --- fs/ceph/dir.c | 20 ++++++++++++++++++++ fs/ceph/export.c | 2 +- fs/ceph/inode.c | 2 ++ fs/ceph/super.h | 2 ++ 4 files changed, 25 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index d902948a90d8..562f9884a4d9 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -1216,6 +1216,26 @@ void ceph_dentry_lru_del(struct dentry *dn) } } +/* + * Return name hash for a given dentry. This is dependent on + * the parent directory's hash function. + */ +unsigned ceph_dentry_hash(struct dentry *dn) +{ + struct inode *dir = dn->d_parent->d_inode; + struct ceph_inode_info *dci = ceph_inode(dir); + + switch (dci->i_dir_layout.dl_dir_hash) { + case 0: /* for backward compat */ + case CEPH_STR_HASH_LINUX: + return dn->d_name.hash; + + default: + return ceph_str_hash(dci->i_dir_layout.dl_dir_hash, + dn->d_name.name, dn->d_name.len); + } +} + const struct file_operations ceph_dir_fops = { .read = ceph_read_dir, .readdir = ceph_readdir, diff --git a/fs/ceph/export.c b/fs/ceph/export.c index 2297d9426992..e41056174bf8 100644 --- a/fs/ceph/export.c +++ b/fs/ceph/export.c @@ -59,7 +59,7 @@ static int ceph_encode_fh(struct dentry *dentry, u32 *rawfh, int *max_len, dout("encode_fh %p connectable\n", dentry); cfh->ino = ceph_ino(dentry->d_inode); cfh->parent_ino = ceph_ino(parent->d_inode); - cfh->parent_name_hash = parent->d_name.hash; + cfh->parent_name_hash = ceph_dentry_hash(parent); *max_len = connected_handle_length; type = 2; } else if (*max_len >= handle_length) { diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index bf1286588f26..045283ce4413 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -297,6 +297,8 @@ struct inode *ceph_alloc_inode(struct super_block *sb) ci->i_release_count = 0; ci->i_symlink = NULL; + memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout)); + ci->i_fragtree = RB_ROOT; mutex_init(&ci->i_fragtree_mutex); diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 7f01728a4657..6e0826695112 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -239,6 +239,7 @@ struct ceph_inode_info { unsigned i_ceph_flags; unsigned long i_release_count; + struct ceph_dir_layout i_dir_layout; struct ceph_file_layout i_layout; char *i_symlink; @@ -768,6 +769,7 @@ extern void ceph_dentry_lru_add(struct dentry *dn); extern void ceph_dentry_lru_touch(struct dentry *dn); extern void ceph_dentry_lru_del(struct dentry *dn); extern void ceph_invalidate_dentry_lease(struct dentry *dentry); +extern unsigned ceph_dentry_hash(struct dentry *dn); /* * our d_ops vary depending on whether the inode is live, -- cgit v1.2.2 From 14303d20f3ae3e6ab626c77a4aac202b3bafd377 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 14 Dec 2010 17:37:52 -0800 Subject: ceph: implement DIRLAYOUTHASH feature to get dir layout from MDS This implements the DIRLAYOUTHASH protocol feature, which passes the dir layout over the wire from the MDS. This gives the client knowledge of the correct hash function to use for mapping dentries among dir fragments. Note that if this feature is _not_ present on the client but is on the MDS, the client may misdirect requests. This will result in a forward and degrade performance. It may also result in inaccurate NFS filehandle generation, which will prevent fh resolution when the inode is not present in the client cache and the parent directories have been fragmented. Signed-off-by: Sage Weil --- fs/ceph/inode.c | 2 ++ fs/ceph/mds_client.c | 42 +++++++++++++++++++++++++++--------------- fs/ceph/mds_client.h | 1 + fs/ceph/super.c | 3 ++- 4 files changed, 32 insertions(+), 16 deletions(-) (limited to 'fs') diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 045283ce4413..e791fa34b23d 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -682,6 +682,8 @@ static int fill_inode(struct inode *inode, inode->i_op = &ceph_dir_iops; inode->i_fop = &ceph_dir_fops; + ci->i_dir_layout = iinfo->dir_layout; + ci->i_files = le64_to_cpu(info->files); ci->i_subdirs = le64_to_cpu(info->subdirs); ci->i_rbytes = le64_to_cpu(info->rbytes); diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 38800eaa81d0..9be29b06a2d9 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -60,7 +60,8 @@ static const struct ceph_connection_operations mds_con_ops; * parse individual inode info */ static int parse_reply_info_in(void **p, void *end, - struct ceph_mds_reply_info_in *info) + struct ceph_mds_reply_info_in *info, + int features) { int err = -EIO; @@ -74,6 +75,12 @@ static int parse_reply_info_in(void **p, void *end, info->symlink = *p; *p += info->symlink_len; + if (features & CEPH_FEATURE_DIRLAYOUTHASH) + ceph_decode_copy_safe(p, end, &info->dir_layout, + sizeof(info->dir_layout), bad); + else + memset(&info->dir_layout, 0, sizeof(info->dir_layout)); + ceph_decode_32_safe(p, end, info->xattr_len, bad); ceph_decode_need(p, end, info->xattr_len, bad); info->xattr_data = *p; @@ -88,12 +95,13 @@ bad: * target inode. */ static int parse_reply_info_trace(void **p, void *end, - struct ceph_mds_reply_info_parsed *info) + struct ceph_mds_reply_info_parsed *info, + int features) { int err; if (info->head->is_dentry) { - err = parse_reply_info_in(p, end, &info->diri); + err = parse_reply_info_in(p, end, &info->diri, features); if (err < 0) goto out_bad; @@ -114,7 +122,7 @@ static int parse_reply_info_trace(void **p, void *end, } if (info->head->is_target) { - err = parse_reply_info_in(p, end, &info->targeti); + err = parse_reply_info_in(p, end, &info->targeti, features); if (err < 0) goto out_bad; } @@ -134,7 +142,8 @@ out_bad: * parse readdir results */ static int parse_reply_info_dir(void **p, void *end, - struct ceph_mds_reply_info_parsed *info) + struct ceph_mds_reply_info_parsed *info, + int features) { u32 num, i = 0; int err; @@ -182,7 +191,7 @@ static int parse_reply_info_dir(void **p, void *end, *p += sizeof(struct ceph_mds_reply_lease); /* inode */ - err = parse_reply_info_in(p, end, &info->dir_in[i]); + err = parse_reply_info_in(p, end, &info->dir_in[i], features); if (err < 0) goto out_bad; i++; @@ -205,7 +214,8 @@ out_bad: * parse fcntl F_GETLK results */ static int parse_reply_info_filelock(void **p, void *end, - struct ceph_mds_reply_info_parsed *info) + struct ceph_mds_reply_info_parsed *info, + int features) { if (*p + sizeof(*info->filelock_reply) > end) goto bad; @@ -225,19 +235,21 @@ bad: * parse extra results */ static int parse_reply_info_extra(void **p, void *end, - struct ceph_mds_reply_info_parsed *info) + struct ceph_mds_reply_info_parsed *info, + int features) { if (info->head->op == CEPH_MDS_OP_GETFILELOCK) - return parse_reply_info_filelock(p, end, info); + return parse_reply_info_filelock(p, end, info, features); else - return parse_reply_info_dir(p, end, info); + return parse_reply_info_dir(p, end, info, features); } /* * parse entire mds reply */ static int parse_reply_info(struct ceph_msg *msg, - struct ceph_mds_reply_info_parsed *info) + struct ceph_mds_reply_info_parsed *info, + int features) { void *p, *end; u32 len; @@ -250,7 +262,7 @@ static int parse_reply_info(struct ceph_msg *msg, /* trace */ ceph_decode_32_safe(&p, end, len, bad); if (len > 0) { - err = parse_reply_info_trace(&p, p+len, info); + err = parse_reply_info_trace(&p, p+len, info, features); if (err < 0) goto out_bad; } @@ -258,7 +270,7 @@ static int parse_reply_info(struct ceph_msg *msg, /* extra */ ceph_decode_32_safe(&p, end, len, bad); if (len > 0) { - err = parse_reply_info_extra(&p, p+len, info); + err = parse_reply_info_extra(&p, p+len, info, features); if (err < 0) goto out_bad; } @@ -654,7 +666,7 @@ static int __choose_mds(struct ceph_mds_client *mdsc, } else { /* dir + name */ inode = dir; - hash = req->r_dentry->d_name.hash; + hash = ceph_dentry_hash(req->r_dentry); is_hash = true; } } @@ -2101,7 +2113,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) dout("handle_reply tid %lld result %d\n", tid, result); rinfo = &req->r_reply_info; - err = parse_reply_info(msg, rinfo); + err = parse_reply_info(msg, rinfo, session->s_con.peer_features); mutex_unlock(&mdsc->mutex); mutex_lock(&session->s_mutex); diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index aabe563b54db..f8f27f6eaa90 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h @@ -35,6 +35,7 @@ struct ceph_cap; */ struct ceph_mds_reply_info_in { struct ceph_mds_reply_inode *in; + struct ceph_dir_layout dir_layout; u32 symlink_len; char *symlink; u32 xattr_len; diff --git a/fs/ceph/super.c b/fs/ceph/super.c index 08b460ae0539..1417f3f3e246 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -428,7 +428,8 @@ struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt, goto fail; } fsc->client->extra_mon_dispatch = extra_mon_dispatch; - fsc->client->supported_features |= CEPH_FEATURE_FLOCK; + fsc->client->supported_features |= CEPH_FEATURE_FLOCK | + CEPH_FEATURE_DIRLAYOUTHASH; fsc->client->monc.want_mdsmap = 1; fsc->mount_options = fsopt; -- cgit v1.2.2 From 4af25fdda6943f311a63034f80933e4d6d6e3a19 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 2 Nov 2010 13:41:47 -0700 Subject: ceph: drop redundant r_mds field The r_mds field is redundant, since we can find the same information at r_session->s_mds, and when r_session is NULL then r_mds is meaningless. Signed-off-by: Sage Weil --- fs/ceph/debugfs.c | 9 ++++++--- fs/ceph/mds_client.c | 8 +++++--- fs/ceph/mds_client.h | 1 - 3 files changed, 11 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c index 7ae1b3d55b58..08f65faac112 100644 --- a/fs/ceph/debugfs.c +++ b/fs/ceph/debugfs.c @@ -60,10 +60,13 @@ static int mdsc_show(struct seq_file *s, void *p) for (rp = rb_first(&mdsc->request_tree); rp; rp = rb_next(rp)) { req = rb_entry(rp, struct ceph_mds_request, r_node); - if (req->r_request) - seq_printf(s, "%lld\tmds%d\t", req->r_tid, req->r_mds); - else + if (req->r_request && req->r_session) + seq_printf(s, "%lld\tmds%d\t", req->r_tid, + req->r_session->s_mds); + else if (!req->r_request) seq_printf(s, "%lld\t(no request)\t", req->r_tid); + else + seq_printf(s, "%lld\t(no session)\t", req->r_tid); seq_printf(s, "%s", ceph_mds_op_name(req->r_op)); diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 9be29b06a2d9..e22e8b41d572 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -1705,7 +1705,6 @@ static int __prepare_send_request(struct ceph_mds_client *mdsc, struct ceph_msg *msg; int flags = 0; - req->r_mds = mds; req->r_attempts++; if (req->r_inode) { struct ceph_cap *cap = @@ -2068,8 +2067,11 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) goto out; } else { struct ceph_inode_info *ci = ceph_inode(req->r_inode); - struct ceph_cap *cap = - ceph_get_cap_for_mds(ci, req->r_mds);; + struct ceph_cap *cap = NULL; + + if (req->r_session) + cap = ceph_get_cap_for_mds(ci, + req->r_session->s_mds); dout("already using auth"); if ((!cap || cap != ci->i_auth_cap) || diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index f8f27f6eaa90..4e3a9cc0bba6 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h @@ -166,7 +166,6 @@ struct ceph_mds_request { struct ceph_mds_client *r_mdsc; int r_op; /* mds op code */ - int r_mds; /* operation on what? */ struct inode *r_inode; /* arg1 */ -- cgit v1.2.2 From dc69e2e9fcd7c613eb744ea3b9c4ee9ca554e822 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 2 Nov 2010 13:49:00 -0700 Subject: ceph: associate requests with opening sessions Associate request with sessions that aren't yep open. This makes the debugfs mdsc request list more informative. Signed-off-by: Sage Weil --- fs/ceph/mds_client.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index e22e8b41d572..509339ceef72 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -1791,6 +1791,8 @@ static int __do_request(struct ceph_mds_client *mdsc, goto finish; } + put_request_session(req); + mds = __choose_mds(mdsc, req); if (mds < 0 || ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) { @@ -1808,6 +1810,8 @@ static int __do_request(struct ceph_mds_client *mdsc, goto finish; } } + req->r_session = get_session(session); + dout("do_request mds%d session %p state %s\n", mds, session, session_state_name(session->s_state)); if (session->s_state != CEPH_MDS_SESSION_OPEN && @@ -1820,7 +1824,6 @@ static int __do_request(struct ceph_mds_client *mdsc, } /* send request */ - req->r_session = get_session(session); req->r_resend_mds = -1; /* forget any previous mds hint */ if (req->r_request_started == 0) /* note request start time */ @@ -1874,7 +1877,6 @@ static void kick_requests(struct ceph_mds_client *mdsc, int mds) if (req->r_session && req->r_session->s_mds == mds) { dout(" kicking tid %llu\n", req->r_tid); - put_request_session(req); __do_request(mdsc, req); } } -- cgit v1.2.2 From 582c86e69045f37da8be445c265f72a7a73b18c6 Mon Sep 17 00:00:00 2001 From: Tracey Dent Date: Tue, 14 Dec 2010 19:32:37 -0500 Subject: ceph: Makefile: Remove unnessary code Remove the if and else conditional because the code is in mainline and there is no need in it being there. Also, Changed Makefile to use -y instead of -objs because -objs is deprecated and not mentioned in Documentation/kbuild/makefiles.txt. Signed-off-by: Tracey Dent Signed-off-by: Sage Weil --- fs/ceph/Makefile | 23 +---------------------- 1 file changed, 1 insertion(+), 22 deletions(-) (limited to 'fs') diff --git a/fs/ceph/Makefile b/fs/ceph/Makefile index 9e6c4f2e8ff1..bd352125e829 100644 --- a/fs/ceph/Makefile +++ b/fs/ceph/Makefile @@ -2,31 +2,10 @@ # Makefile for CEPH filesystem. # -ifneq ($(KERNELRELEASE),) - obj-$(CONFIG_CEPH_FS) += ceph.o -ceph-objs := super.o inode.o dir.o file.o locks.o addr.o ioctl.o \ +ceph-y := super.o inode.o dir.o file.o locks.o addr.o ioctl.o \ export.o caps.o snap.o xattr.o \ mds_client.o mdsmap.o strings.o ceph_frag.o \ debugfs.o -else -#Otherwise we were called directly from the command -# line; invoke the kernel build system. - -KERNELDIR ?= /lib/modules/$(shell uname -r)/build -PWD := $(shell pwd) - -default: all - -all: - $(MAKE) -C $(KERNELDIR) M=$(PWD) CONFIG_CEPH_FS=m modules - -modules_install: - $(MAKE) -C $(KERNELDIR) M=$(PWD) CONFIG_CEPH_FS=m modules_install - -clean: - $(MAKE) -C $(KERNELDIR) M=$(PWD) clean - -endif -- cgit v1.2.2 From 01e6acc4ea4c284c44bfb3d46c76f4ae580c6435 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 3 Jan 2011 14:49:45 +0100 Subject: ceph: fsc->*_wq's aren't used in memory reclaim path fsc->*_wq's aren't depended upon during memory reclaim. Convert to alloc_workqueue() w/o WQ_MEM_RECLAIM. Signed-off-by: Tejun Heo Cc: Sage Weil Cc: ceph-devel@vger.kernel.org Signed-off-by: Sage Weil --- fs/ceph/super.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/ceph/super.c b/fs/ceph/super.c index 1417f3f3e246..bf6f0f34082a 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -444,13 +444,17 @@ struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt, goto fail_client; err = -ENOMEM; - fsc->wb_wq = create_workqueue("ceph-writeback"); + /* + * The number of concurrent works can be high but they don't need + * to be processed in parallel, limit concurrency. + */ + fsc->wb_wq = alloc_workqueue("ceph-writeback", 0, 1); if (fsc->wb_wq == NULL) goto fail_bdi; - fsc->pg_inv_wq = create_singlethread_workqueue("ceph-pg-invalid"); + fsc->pg_inv_wq = alloc_workqueue("ceph-pg-invalid", 0, 1); if (fsc->pg_inv_wq == NULL) goto fail_wb_wq; - fsc->trunc_wq = create_singlethread_workqueue("ceph-trunc"); + fsc->trunc_wq = alloc_workqueue("ceph-trunc", 0, 1); if (fsc->trunc_wq == NULL) goto fail_pg_inv_wq; -- cgit v1.2.2 From c8aebb0c9f8c7471643d5f8ba68328de8013005f Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 18 Dec 2010 10:22:30 -0500 Subject: per-superblock default ->d_op Signed-off-by: Al Viro --- fs/dcache.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'fs') diff --git a/fs/dcache.c b/fs/dcache.c index 5699d4c027cb..5ec58267b5bb 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -1320,6 +1320,7 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name) __dget_dlock(parent); dentry->d_parent = parent; dentry->d_sb = parent->d_sb; + d_set_d_op(dentry, dentry->d_sb->s_d_op); list_add(&dentry->d_u.d_child, &parent->d_subdirs); spin_unlock(&parent->d_lock); } @@ -1335,6 +1336,7 @@ struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name) struct dentry *dentry = d_alloc(NULL, name); if (dentry) { dentry->d_sb = sb; + d_set_d_op(dentry, dentry->d_sb->s_d_op); dentry->d_parent = dentry; dentry->d_flags |= DCACHE_DISCONNECTED; } @@ -1507,6 +1509,7 @@ struct dentry * d_alloc_root(struct inode * root_inode) res = d_alloc(NULL, &name); if (res) { res->d_sb = root_inode->i_sb; + d_set_d_op(res, res->d_sb->s_d_op); res->d_parent = res; d_instantiate(res, root_inode); } @@ -1567,6 +1570,7 @@ struct dentry *d_obtain_alias(struct inode *inode) /* attach a disconnected dentry */ spin_lock(&tmp->d_lock); tmp->d_sb = inode->i_sb; + d_set_d_op(tmp, tmp->d_sb->s_d_op); tmp->d_inode = inode; tmp->d_flags |= DCACHE_DISCONNECTED; list_add(&tmp->d_alias, &inode->i_dentry); -- cgit v1.2.2 From 6cc9c1d2c1414ef67f465462aa96a5d1fed12f5d Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 18 Dec 2010 10:29:26 -0500 Subject: fix isofs d_op handling switch to ->s_d_op; d_obtain_alias() will DTRT now Signed-off-by: Al Viro --- fs/isofs/inode.c | 13 +++++++------ fs/isofs/namei.c | 2 -- 2 files changed, 7 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c index 844a7903c72f..a0f3833c0dbf 100644 --- a/fs/isofs/inode.c +++ b/fs/isofs/inode.c @@ -939,17 +939,18 @@ root_found: goto out_iput; } - /* get the root dentry */ - s->s_root = d_alloc_root(inode); - if (!(s->s_root)) - goto out_no_root; - table = 0; if (joliet_level) table += 2; if (opt.check == 'r') table++; - d_set_d_op(s->s_root, &isofs_dentry_ops[table]); + + s->s_d_op = &isofs_dentry_ops[table]; + + /* get the root dentry */ + s->s_root = d_alloc_root(inode); + if (!(s->s_root)) + goto out_no_root; kfree(opt.iocharset); diff --git a/fs/isofs/namei.c b/fs/isofs/namei.c index 679a849c3b27..4fb3e8074fd4 100644 --- a/fs/isofs/namei.c +++ b/fs/isofs/namei.c @@ -172,8 +172,6 @@ struct dentry *isofs_lookup(struct inode *dir, struct dentry *dentry, struct nam struct inode *inode; struct page *page; - d_set_d_op(dentry, dir->i_sb->s_root->d_op); - page = alloc_page(GFP_USER); if (!page) return ERR_PTR(-ENOMEM); -- cgit v1.2.2 From 3d23985d6cfa7908e46fd0c62a2ee84faffe4d8b Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 18 Dec 2010 10:44:00 -0500 Subject: switch fat to ->s_d_op, close exportfs races there don't bother with lock_super() in fat_fill_super() callers, while we are at it - there won't be any concurrency anyway. Signed-off-by: Al Viro --- fs/fat/fat.h | 3 ++- fs/fat/inode.c | 13 +++++-------- fs/fat/namei_msdos.c | 27 +++++++++------------------ fs/fat/namei_vfat.c | 27 +++++++++------------------ 4 files changed, 25 insertions(+), 45 deletions(-) (limited to 'fs') diff --git a/fs/fat/fat.h b/fs/fat/fat.h index d75a77f85c28..f50408901f7e 100644 --- a/fs/fat/fat.h +++ b/fs/fat/fat.h @@ -319,7 +319,8 @@ extern struct inode *fat_build_inode(struct super_block *sb, struct msdos_dir_entry *de, loff_t i_pos); extern int fat_sync_inode(struct inode *inode); extern int fat_fill_super(struct super_block *sb, void *data, int silent, - const struct inode_operations *fs_dir_inode_ops, int isvfat); + const struct inode_operations *fs_dir_inode_ops, + int isvfat, void (*setup)(struct super_block *)); extern int fat_flush_inodes(struct super_block *sb, struct inode *i1, struct inode *i2); diff --git a/fs/fat/inode.c b/fs/fat/inode.c index 206351af7c58..86753fe10bd1 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c @@ -703,7 +703,6 @@ static struct dentry *fat_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { struct inode *inode = NULL; - struct dentry *result; u32 *fh = fid->raw; if (fh_len < 5 || fh_type != 3) @@ -748,10 +747,7 @@ static struct dentry *fat_fh_to_dentry(struct super_block *sb, * the fat_iget lookup again. If that fails, then we are totally out * of luck. But all that is for another day */ - result = d_obtain_alias(inode); - if (!IS_ERR(result)) - d_set_d_op(result, sb->s_root->d_op); - return result; + return d_obtain_alias(inode); } static int @@ -799,8 +795,6 @@ static struct dentry *fat_get_parent(struct dentry *child) brelse(bh); parent = d_obtain_alias(inode); - if (!IS_ERR(parent)) - d_set_d_op(parent, sb->s_root->d_op); out: unlock_super(sb); @@ -1244,7 +1238,8 @@ static int fat_read_root(struct inode *inode) * Read the super block of an MS-DOS FS. */ int fat_fill_super(struct super_block *sb, void *data, int silent, - const struct inode_operations *fs_dir_inode_ops, int isvfat) + const struct inode_operations *fs_dir_inode_ops, int isvfat, + void (*setup)(struct super_block *)) { struct inode *root_inode = NULL, *fat_inode = NULL; struct buffer_head *bh; @@ -1280,6 +1275,8 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, if (error) goto out_fail; + setup(sb); /* flavour-specific stuff that needs options */ + error = -EIO; sb_min_blocksize(sb, 512); bh = sb_bread(sb, 0); diff --git a/fs/fat/namei_msdos.c b/fs/fat/namei_msdos.c index 35ffe43afa4b..711499040eb6 100644 --- a/fs/fat/namei_msdos.c +++ b/fs/fat/namei_msdos.c @@ -227,11 +227,7 @@ static struct dentry *msdos_lookup(struct inode *dir, struct dentry *dentry, } out: unlock_super(sb); - d_set_d_op(dentry, &msdos_dentry_operations); - dentry = d_splice_alias(inode, dentry); - if (dentry) - d_set_d_op(dentry, &msdos_dentry_operations); - return dentry; + return d_splice_alias(inode, dentry); error: unlock_super(sb); @@ -661,21 +657,16 @@ static const struct inode_operations msdos_dir_inode_operations = { .getattr = fat_getattr, }; -static int msdos_fill_super(struct super_block *sb, void *data, int silent) +static void setup(struct super_block *sb) { - int res; - - lock_super(sb); - res = fat_fill_super(sb, data, silent, &msdos_dir_inode_operations, 0); - if (res) { - unlock_super(sb); - return res; - } - + sb->s_d_op = &msdos_dentry_operations; sb->s_flags |= MS_NOATIME; - d_set_d_op(sb->s_root, &msdos_dentry_operations); - unlock_super(sb); - return 0; +} + +static int msdos_fill_super(struct super_block *sb, void *data, int silent) +{ + return fat_fill_super(sb, data, silent, &msdos_dir_inode_operations, + 0, setup); } static struct dentry *msdos_mount(struct file_system_type *fs_type, diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c index e3ffc5e12332..f88f752babd9 100644 --- a/fs/fat/namei_vfat.c +++ b/fs/fat/namei_vfat.c @@ -772,13 +772,10 @@ static struct dentry *vfat_lookup(struct inode *dir, struct dentry *dentry, out: unlock_super(sb); - d_set_d_op(dentry, sb->s_root->d_op); dentry->d_time = dentry->d_parent->d_inode->i_version; dentry = d_splice_alias(inode, dentry); - if (dentry) { - d_set_d_op(dentry, sb->s_root->d_op); + if (dentry) dentry->d_time = dentry->d_parent->d_inode->i_version; - } return dentry; error: @@ -1066,24 +1063,18 @@ static const struct inode_operations vfat_dir_inode_operations = { .getattr = fat_getattr, }; -static int vfat_fill_super(struct super_block *sb, void *data, int silent) +static void setup(struct super_block *sb) { - int res; - - lock_super(sb); - res = fat_fill_super(sb, data, silent, &vfat_dir_inode_operations, 1); - if (res) { - unlock_super(sb); - return res; - } - if (MSDOS_SB(sb)->options.name_check != 's') - d_set_d_op(sb->s_root, &vfat_ci_dentry_ops); + sb->s_d_op = &vfat_ci_dentry_ops; else - d_set_d_op(sb->s_root, &vfat_dentry_ops); + sb->s_d_op = &vfat_dentry_ops; +} - unlock_super(sb); - return 0; +static int vfat_fill_super(struct super_block *sb, void *data, int silent) +{ + return fat_fill_super(sb, data, silent, &vfat_dir_inode_operations, + 1, setup); } static struct dentry *vfat_mount(struct file_system_type *fs_type, -- cgit v1.2.2 From 94b77bd86f8ad458fa7870def78ec3a8a7caa986 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 18 Dec 2010 10:59:31 -0500 Subject: switch jfs to ->s_d_op, close exportfs races Signed-off-by: Al Viro --- fs/jfs/namei.c | 10 +--------- fs/jfs/super.c | 6 +++--- 2 files changed, 4 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c index 4414e3a42264..81ead850ddb6 100644 --- a/fs/jfs/namei.c +++ b/fs/jfs/namei.c @@ -1465,9 +1465,6 @@ static struct dentry *jfs_lookup(struct inode *dip, struct dentry *dentry, struc jfs_info("jfs_lookup: name = %s", name); - if (JFS_SBI(dip->i_sb)->mntflag & JFS_OS2) - d_set_d_op(dentry, &jfs_ci_dentry_operations); - if ((name[0] == '.') && (len == 1)) inum = dip->i_ino; else if (strcmp(name, "..") == 0) @@ -1492,12 +1489,7 @@ static struct dentry *jfs_lookup(struct inode *dip, struct dentry *dentry, struc return ERR_CAST(ip); } - dentry = d_splice_alias(ip, dentry); - - if (dentry && (JFS_SBI(dip->i_sb)->mntflag & JFS_OS2)) - d_set_d_op(dentry, &jfs_ci_dentry_operations); - - return dentry; + return d_splice_alias(ip, dentry); } static struct inode *jfs_nfs_get_inode(struct super_block *sb, diff --git a/fs/jfs/super.c b/fs/jfs/super.c index 3150d766e0d4..eeca48a031ab 100644 --- a/fs/jfs/super.c +++ b/fs/jfs/super.c @@ -515,6 +515,9 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent) sb->s_magic = JFS_SUPER_MAGIC; + if (sbi->mntflag & JFS_OS2) + sb->s_d_op = &jfs_ci_dentry_operations; + inode = jfs_iget(sb, ROOT_I); if (IS_ERR(inode)) { ret = PTR_ERR(inode); @@ -524,9 +527,6 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent) if (!sb->s_root) goto out_no_root; - if (sbi->mntflag & JFS_OS2) - d_set_d_op(sb->s_root, &jfs_ci_dentry_operations); - /* logical blocks are represented by 40 bits in pxd_t, etc. */ sb->s_maxbytes = ((u64) sb->s_blocksize) << 40; #if BITS_PER_LONG == 32 -- cgit v1.2.2 From c35eebe9939f55b9d51631d03301a7af19090dcc Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 18 Dec 2010 11:15:22 -0500 Subject: switch fuse Signed-off-by: Al Viro --- fs/fuse/dir.c | 1 - fs/fuse/inode.c | 10 ++++------ 2 files changed, 4 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 042af7346ec1..bfed8447ed80 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -350,7 +350,6 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry, } entry = newent ? newent : entry; - d_set_d_op(entry, &fuse_dentry_operations); if (outarg_valid) fuse_change_entry_timeout(entry, &outarg); else diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index f62b32cffea9..9e3f68cc1bd1 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -617,10 +617,8 @@ static struct dentry *fuse_get_dentry(struct super_block *sb, goto out_iput; entry = d_obtain_alias(inode); - if (!IS_ERR(entry) && get_node_id(inode) != FUSE_ROOT_ID) { - d_set_d_op(entry, &fuse_dentry_operations); + if (!IS_ERR(entry) && get_node_id(inode) != FUSE_ROOT_ID) fuse_invalidate_entry_cache(entry); - } return entry; @@ -719,10 +717,8 @@ static struct dentry *fuse_get_parent(struct dentry *child) } parent = d_obtain_alias(inode); - if (!IS_ERR(parent) && get_node_id(inode) != FUSE_ROOT_ID) { - d_set_d_op(parent, &fuse_dentry_operations); + if (!IS_ERR(parent) && get_node_id(inode) != FUSE_ROOT_ID) fuse_invalidate_entry_cache(parent); - } return parent; } @@ -989,6 +985,8 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) iput(root); goto err_put_conn; } + /* only now - we want root dentry with NULL ->d_op */ + sb->s_d_op = &fuse_dentry_operations; init_req = fuse_request_alloc(); if (!init_req) -- cgit v1.2.2 From 30304aba6a053f114092cea6643a96ac2902bc5a Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 18 Dec 2010 11:16:30 -0500 Subject: switch sysv Signed-off-by: Al Viro --- fs/sysv/namei.c | 1 - fs/sysv/super.c | 8 ++++---- 2 files changed, 4 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c index b5e68da2db32..b427b1208c26 100644 --- a/fs/sysv/namei.c +++ b/fs/sysv/namei.c @@ -48,7 +48,6 @@ static struct dentry *sysv_lookup(struct inode * dir, struct dentry * dentry, st struct inode * inode = NULL; ino_t ino; - d_set_d_op(dentry, dir->i_sb->s_root->d_op); if (dentry->d_name.len > SYSV_NAMELEN) return ERR_PTR(-ENAMETOOLONG); ino = sysv_inode_by_name(dentry); diff --git a/fs/sysv/super.c b/fs/sysv/super.c index 76712aefc4ab..f60c196913ea 100644 --- a/fs/sysv/super.c +++ b/fs/sysv/super.c @@ -332,6 +332,10 @@ static int complete_read_super(struct super_block *sb, int silent, int size) sb->s_magic = SYSV_MAGIC_BASE + sbi->s_type; /* set up enough so that it can read an inode */ sb->s_op = &sysv_sops; + if (sbi->s_forced_ro) + sb->s_flags |= MS_RDONLY; + if (sbi->s_truncate) + sb->s_d_op = &sysv_dentry_operations; root_inode = sysv_iget(sb, SYSV_ROOT_INO); if (IS_ERR(root_inode)) { printk("SysV FS: get root inode failed\n"); @@ -343,10 +347,6 @@ static int complete_read_super(struct super_block *sb, int silent, int size) printk("SysV FS: get root dentry failed\n"); return 0; } - if (sbi->s_forced_ro) - sb->s_flags |= MS_RDONLY; - if (sbi->s_truncate) - d_set_d_op(sb->s_root, &sysv_dentry_operations); return 1; } -- cgit v1.2.2 From c6cb412366e8f338baae7300b9f1961f3e559a24 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 18 Dec 2010 11:17:44 -0500 Subject: minixfs: kill dead code ->d_op of root stays NULL these days on minixfs Signed-off-by: Al Viro --- fs/minix/namei.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'fs') diff --git a/fs/minix/namei.c b/fs/minix/namei.c index 1b9e07728a9f..ce7337ddfdbf 100644 --- a/fs/minix/namei.c +++ b/fs/minix/namei.c @@ -23,8 +23,6 @@ static struct dentry *minix_lookup(struct inode * dir, struct dentry *dentry, st struct inode * inode = NULL; ino_t ino; - d_set_d_op(dentry, dir->i_sb->s_root->d_op); - if (dentry->d_name.len > minix_sb(dir->i_sb)->s_namelen) return ERR_PTR(-ENAMETOOLONG); -- cgit v1.2.2 From 518c79d28e22f657fec399ef5bf0d50b13f7e9b0 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 18 Dec 2010 11:19:14 -0500 Subject: switch hfs Signed-off-by: Al Viro --- fs/hfs/dir.c | 2 -- fs/hfs/super.c | 3 +-- 2 files changed, 1 insertion(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/hfs/dir.c b/fs/hfs/dir.c index ea4aefe7c652..afa66aaa2237 100644 --- a/fs/hfs/dir.c +++ b/fs/hfs/dir.c @@ -25,8 +25,6 @@ static struct dentry *hfs_lookup(struct inode *dir, struct dentry *dentry, struct inode *inode = NULL; int res; - d_set_d_op(dentry, &hfs_dentry_operations); - hfs_find_init(HFS_SB(dir->i_sb)->cat_tree, &fd); hfs_cat_build_key(dir->i_sb, fd.search_key, dir->i_ino, &dentry->d_name); res = hfs_brec_read(&fd, &rec, sizeof(rec)); diff --git a/fs/hfs/super.c b/fs/hfs/super.c index 0bef62aa4f42..1b55f704fb22 100644 --- a/fs/hfs/super.c +++ b/fs/hfs/super.c @@ -429,13 +429,12 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent) if (!root_inode) goto bail_no_root; + sb->s_d_op = &hfs_dentry_operations; res = -ENOMEM; sb->s_root = d_alloc_root(root_inode); if (!sb->s_root) goto bail_iput; - d_set_d_op(sb->s_root, &hfs_dentry_operations); - /* everything's okay */ return 0; -- cgit v1.2.2 From eddf790bd41aa153922df223b4692cc606cadb7b Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 18 Dec 2010 11:19:23 -0500 Subject: switch hfsplus Signed-off-by: Al Viro --- fs/hfsplus/dir.c | 1 - fs/hfsplus/super.c | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c index f896dc843026..4df5059c25da 100644 --- a/fs/hfsplus/dir.c +++ b/fs/hfsplus/dir.c @@ -37,7 +37,6 @@ static struct dentry *hfsplus_lookup(struct inode *dir, struct dentry *dentry, sb = dir->i_sb; - d_set_d_op(dentry, &hfsplus_dentry_operations); dentry->d_fsdata = NULL; hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd); hfsplus_cat_build_key(sb, fd.search_key, dir->i_ino, &dentry->d_name); diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c index 6ee6ad20acf2..9a3b4795f43c 100644 --- a/fs/hfsplus/super.c +++ b/fs/hfsplus/super.c @@ -444,13 +444,13 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) err = PTR_ERR(root); goto cleanup; } + sb->s_d_op = &hfsplus_dentry_operations; sb->s_root = d_alloc_root(root); if (!sb->s_root) { iput(root); err = -ENOMEM; goto cleanup; } - d_set_d_op(sb->s_root, &hfsplus_dentry_operations); str.len = sizeof(HFSP_HIDDENDIR_NAME) - 1; str.name = HFSP_HIDDENDIR_NAME; -- cgit v1.2.2 From 96e1391414f80a8a3bae74ddf58d3c2870e304f0 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 18 Dec 2010 11:20:57 -0500 Subject: switch adfs Signed-off-by: Al Viro --- fs/adfs/dir.c | 1 - fs/adfs/super.c | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/adfs/dir.c b/fs/adfs/dir.c index bf7693c384f9..3b4a764ed780 100644 --- a/fs/adfs/dir.c +++ b/fs/adfs/dir.c @@ -276,7 +276,6 @@ adfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) struct object_info obj; int error; - d_set_d_op(dentry, &adfs_dentry_operations); lock_kernel(); error = adfs_dir_lookup_byname(dir, &dentry->d_name, &obj); if (error == 0) { diff --git a/fs/adfs/super.c b/fs/adfs/super.c index a4041b52fbca..2d7954049fbe 100644 --- a/fs/adfs/super.c +++ b/fs/adfs/super.c @@ -473,6 +473,7 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent) asb->s_namelen = ADFS_F_NAME_LEN; } + sb->s_d_op = &adfs_dentry_operations; root = adfs_iget(sb, &root_obj); sb->s_root = d_alloc_root(root); if (!sb->s_root) { @@ -483,8 +484,7 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent) kfree(asb->s_map); adfs_error(sb, "get root inode failed\n"); goto error; - } else - d_set_d_op(sb->s_root, &adfs_dentry_operations); + } unlock_kernel(); return 0; -- cgit v1.2.2 From 8b244ff2fa58f81f84aa03c82c2c23307a778ce7 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 18 Dec 2010 11:29:39 -0500 Subject: switch nfs to ->s_d_op Signed-off-by: Al Viro --- fs/nfs/dir.c | 4 ---- fs/nfs/getroot.c | 6 ------ fs/nfs/super.c | 1 + 3 files changed, 1 insertion(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index abe4f0c8dc5f..95b081bc9e25 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -439,7 +439,6 @@ void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry) if (dentry == NULL) return; - d_set_d_op(dentry, NFS_PROTO(dir)->dentry_ops); inode = nfs_fhget(dentry->d_sb, entry->fh, entry->fattr); if (IS_ERR(inode)) goto out; @@ -1193,8 +1192,6 @@ static struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, stru if (dentry->d_name.len > NFS_SERVER(dir)->namelen) goto out; - d_set_d_op(dentry, NFS_PROTO(dir)->dentry_ops); - /* * If we're doing an exclusive create, optimize away the lookup * but don't hash the dentry. @@ -1338,7 +1335,6 @@ static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry res = ERR_PTR(-ENAMETOOLONG); goto out; } - d_set_d_op(dentry, NFS_PROTO(dir)->dentry_ops); /* Let vfs_create() deal with O_EXCL. Instantiate, but don't hash * the dentry. */ diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c index 5596c6a2881e..b5ffe8fa291f 100644 --- a/fs/nfs/getroot.c +++ b/fs/nfs/getroot.c @@ -119,9 +119,6 @@ struct dentry *nfs_get_root(struct super_block *sb, struct nfs_fh *mntfh) } security_d_instantiate(ret, inode); - - if (ret->d_op == NULL) - d_set_d_op(ret, server->nfs_client->rpc_ops->dentry_ops); out: nfs_free_fattr(fsinfo.fattr); return ret; @@ -227,9 +224,6 @@ struct dentry *nfs4_get_root(struct super_block *sb, struct nfs_fh *mntfh) security_d_instantiate(ret, inode); - if (ret->d_op == NULL) - d_set_d_op(ret, server->nfs_client->rpc_ops->dentry_ops); - out: nfs_free_fattr(fattr); dprintk("<-- nfs4_get_root()\n"); diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 0f9ea73e7789..b68c8607770f 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -2202,6 +2202,7 @@ static int nfs_set_super(struct super_block *s, void *data) s->s_flags = sb_mntdata->mntflags; s->s_fs_info = server; + s->s_d_op = server->nfs_client->rpc_ops->dentry_ops; ret = set_anon_super(s, server); if (ret == 0) server->s_dev = s->s_dev; -- cgit v1.2.2 From 1c929cfe6d8f2087a337a868fbf6c38d56bb4889 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 18 Dec 2010 11:43:51 -0500 Subject: switch cifs Signed-off-by: Al Viro --- fs/cifs/cifsfs.c | 6 ++++++ fs/cifs/dir.c | 25 +------------------------ fs/cifs/inode.c | 8 -------- fs/cifs/link.c | 4 ---- fs/cifs/readdir.c | 5 ----- 5 files changed, 7 insertions(+), 41 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 5e7075d5f139..d9f652a522a6 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -174,6 +174,12 @@ cifs_read_super(struct super_block *sb, void *data, goto out_no_root; } + /* do that *after* d_alloc_root() - we want NULL ->d_op for root here */ + if (cifs_sb_master_tcon(cifs_sb)->nocase) + sb->s_d_op = &cifs_ci_dentry_ops; + else + sb->s_d_op = &cifs_dentry_ops; + #ifdef CONFIG_CIFS_EXPERIMENTAL if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) { cFYI(1, "export ops supported"); diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index 2e773825835e..1e95dd635632 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -130,17 +130,6 @@ cifs_bp_rename_retry: return full_path; } -static void setup_cifs_dentry(struct cifsTconInfo *tcon, - struct dentry *direntry, - struct inode *newinode) -{ - if (tcon->nocase) - d_set_d_op(direntry, &cifs_ci_dentry_ops); - else - d_set_d_op(direntry, &cifs_dentry_ops); - d_instantiate(direntry, newinode); -} - /* Inode operations in similar order to how they appear in Linux file fs.h */ int @@ -327,7 +316,7 @@ cifs_create_get_file_info: cifs_create_set_dentry: if (rc == 0) - setup_cifs_dentry(tcon, direntry, newinode); + d_instantiate(direntry, newinode); else cFYI(1, "Create worked, get_inode_info failed rc = %d", rc); @@ -418,10 +407,6 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode, rc = cifs_get_inode_info_unix(&newinode, full_path, inode->i_sb, xid); - if (pTcon->nocase) - d_set_d_op(direntry, &cifs_ci_dentry_ops); - else - d_set_d_op(direntry, &cifs_dentry_ops); if (rc == 0) d_instantiate(direntry, newinode); @@ -601,10 +586,6 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry, parent_dir_inode->i_sb, xid, NULL); if ((rc == 0) && (newInode != NULL)) { - if (pTcon->nocase) - d_set_d_op(direntry, &cifs_ci_dentry_ops); - else - d_set_d_op(direntry, &cifs_dentry_ops); d_add(direntry, newInode); if (posix_open) { filp = lookup_instantiate_filp(nd, direntry, @@ -631,10 +612,6 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry, } else if (rc == -ENOENT) { rc = 0; direntry->d_time = jiffies; - if (pTcon->nocase) - d_set_d_op(direntry, &cifs_ci_dentry_ops); - else - d_set_d_op(direntry, &cifs_dentry_ops); d_add(direntry, NULL); /* if it was once a directory (but how can we tell?) we could do shrink_dcache_parent(direntry); */ diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 0c7e36910e31..b06b60620240 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -1324,10 +1324,6 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode) /*BB check (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID ) to see if need to set uid/gid */ inc_nlink(inode); - if (pTcon->nocase) - d_set_d_op(direntry, &cifs_ci_dentry_ops); - else - d_set_d_op(direntry, &cifs_dentry_ops); cifs_unix_basic_to_fattr(&fattr, pInfo, cifs_sb); cifs_fill_uniqueid(inode->i_sb, &fattr); @@ -1368,10 +1364,6 @@ mkdir_get_info: rc = cifs_get_inode_info(&newinode, full_path, NULL, inode->i_sb, xid, NULL); - if (pTcon->nocase) - d_set_d_op(direntry, &cifs_ci_dentry_ops); - else - d_set_d_op(direntry, &cifs_dentry_ops); d_instantiate(direntry, newinode); /* setting nlink not necessary except in cases where we * failed to get it from the server or was set bogus */ diff --git a/fs/cifs/link.c b/fs/cifs/link.c index fe2f6a93c49e..306769de2fb5 100644 --- a/fs/cifs/link.c +++ b/fs/cifs/link.c @@ -524,10 +524,6 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname) cFYI(1, "Create symlink ok, getinodeinfo fail rc = %d", rc); } else { - if (pTcon->nocase) - d_set_d_op(direntry, &cifs_ci_dentry_ops); - else - d_set_d_op(direntry, &cifs_dentry_ops); d_instantiate(direntry, newinode); } } diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index 76b1b37c9e6b..7f25cc3d2256 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c @@ -102,11 +102,6 @@ cifs_readdir_lookup(struct dentry *parent, struct qstr *name, return NULL; } - if (cifs_sb_master_tcon(CIFS_SB(sb))->nocase) - d_set_d_op(dentry, &cifs_ci_dentry_ops); - else - d_set_d_op(dentry, &cifs_dentry_ops); - alias = d_materialise_unique(dentry, inode); if (alias != NULL) { dput(dentry); -- cgit v1.2.2 From 41ced6dcf3dc6b901716fda0dc8de3536da4d39b Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 18 Dec 2010 12:06:56 -0500 Subject: switch gfs2, close races Signed-off-by: Al Viro --- fs/gfs2/export.c | 13 ++----------- fs/gfs2/ops_fstype.c | 2 +- fs/gfs2/ops_inode.c | 2 -- 3 files changed, 3 insertions(+), 14 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/export.c b/fs/gfs2/export.c index 97012ecff560..9023db8184f9 100644 --- a/fs/gfs2/export.c +++ b/fs/gfs2/export.c @@ -126,12 +126,7 @@ static int gfs2_get_name(struct dentry *parent, char *name, static struct dentry *gfs2_get_parent(struct dentry *child) { - struct dentry *dentry; - - dentry = d_obtain_alias(gfs2_lookupi(child->d_inode, &gfs2_qdotdot, 1)); - if (!IS_ERR(dentry)) - d_set_d_op(dentry, &gfs2_dops); - return dentry; + return d_obtain_alias(gfs2_lookupi(child->d_inode, &gfs2_qdotdot, 1)); } static struct dentry *gfs2_get_dentry(struct super_block *sb, @@ -139,7 +134,6 @@ static struct dentry *gfs2_get_dentry(struct super_block *sb, { struct gfs2_sbd *sdp = sb->s_fs_info; struct inode *inode; - struct dentry *dentry; inode = gfs2_ilookup(sb, inum->no_addr); if (inode) { @@ -156,10 +150,7 @@ static struct dentry *gfs2_get_dentry(struct super_block *sb, return ERR_CAST(inode); out_inode: - dentry = d_obtain_alias(inode); - if (!IS_ERR(dentry)) - d_set_d_op(dentry, &gfs2_dops); - return dentry; + return d_obtain_alias(inode); } static struct dentry *gfs2_fh_to_dentry(struct super_block *sb, struct fid *fid, diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 2aeabd4218cc..693f4470a2df 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -440,7 +440,6 @@ static int gfs2_lookup_root(struct super_block *sb, struct dentry **dptr, iput(inode); return -ENOMEM; } - d_set_d_op(dentry, &gfs2_dops); *dptr = dentry; return 0; } @@ -1106,6 +1105,7 @@ static int fill_super(struct super_block *sb, struct gfs2_args *args, int silent sb->s_magic = GFS2_MAGIC; sb->s_op = &gfs2_super_ops; + sb->s_d_op = &gfs2_dops; sb->s_export_op = &gfs2_export_ops; sb->s_xattr = gfs2_xattr_handlers; sb->s_qcop = &gfs2_quotactl_ops; diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c index 1501db4f0e6d..ae140c8abb5c 100644 --- a/fs/gfs2/ops_inode.c +++ b/fs/gfs2/ops_inode.c @@ -106,8 +106,6 @@ static struct dentry *gfs2_lookup(struct inode *dir, struct dentry *dentry, { struct inode *inode = NULL; - d_set_d_op(dentry, &gfs2_dops); - inode = gfs2_lookupi(dir, &dentry->d_name, 0); if (inode && IS_ERR(inode)) return ERR_CAST(inode); -- cgit v1.2.2 From ba87167c06ed0049260d9ca36405c0f8af609e07 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 18 Dec 2010 12:10:00 -0500 Subject: switch ocfs2, close races Signed-off-by: Al Viro --- fs/ocfs2/export.c | 6 +----- fs/ocfs2/namei.c | 5 ----- fs/ocfs2/super.c | 1 + 3 files changed, 2 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c index 6adafa576065..5dbc3062b4fd 100644 --- a/fs/ocfs2/export.c +++ b/fs/ocfs2/export.c @@ -137,9 +137,7 @@ check_gen: } result = d_obtain_alias(inode); - if (!IS_ERR(result)) - d_set_d_op(result, &ocfs2_dentry_ops); - else + if (IS_ERR(result)) mlog_errno(PTR_ERR(result)); bail: @@ -175,8 +173,6 @@ static struct dentry *ocfs2_get_parent(struct dentry *child) } parent = d_obtain_alias(ocfs2_iget(OCFS2_SB(dir->i_sb), blkno, 0, 0)); - if (!IS_ERR(parent)) - d_set_d_op(parent, &ocfs2_dentry_ops); bail_unlock: ocfs2_inode_unlock(dir, 0); diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index 30c523144452..849fb4a2e814 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c @@ -147,7 +147,6 @@ static struct dentry *ocfs2_lookup(struct inode *dir, struct dentry *dentry, spin_unlock(&oi->ip_lock); bail_add: - d_set_d_op(dentry, &ocfs2_dentry_ops); ret = d_splice_alias(inode, dentry); if (inode) { @@ -415,7 +414,6 @@ static int ocfs2_mknod(struct inode *dir, mlog_errno(status); goto leave; } - d_set_d_op(dentry, &ocfs2_dentry_ops); status = ocfs2_add_entry(handle, dentry, inode, OCFS2_I(inode)->ip_blkno, parent_fe_bh, @@ -743,7 +741,6 @@ static int ocfs2_link(struct dentry *old_dentry, } ihold(inode); - d_set_d_op(dentry, &ocfs2_dentry_ops); d_instantiate(dentry, inode); out_commit: @@ -1797,7 +1794,6 @@ static int ocfs2_symlink(struct inode *dir, mlog_errno(status); goto bail; } - d_set_d_op(dentry, &ocfs2_dentry_ops); status = ocfs2_add_entry(handle, dentry, inode, le64_to_cpu(fe->i_blkno), parent_fe_bh, @@ -2462,7 +2458,6 @@ int ocfs2_mv_orphaned_inode_to_new(struct inode *dir, goto out_commit; } - d_set_d_op(dentry, &ocfs2_dentry_ops); d_instantiate(dentry, inode); status = 0; out_commit: diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 17ff46fa8a10..06d1f749ca89 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -2097,6 +2097,7 @@ static int ocfs2_initialize_super(struct super_block *sb, sb->s_fs_info = osb; sb->s_op = &ocfs2_sops; + sb->s_d_op = &ocfs2_dentry_ops; sb->s_export_op = &ocfs2_export_ops; sb->s_qcop = &ocfs2_quotactl_ops; sb->dq_op = &ocfs2_quota_operations; -- cgit v1.2.2 From af53d29ac13a97304d44343dc3b26154ca595268 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 20 Dec 2010 10:56:06 -0500 Subject: switch btrfs, close races Signed-off-by: Al Viro --- fs/btrfs/export.c | 12 ++---------- fs/btrfs/inode.c | 2 -- fs/btrfs/super.c | 1 + 3 files changed, 3 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c index 0ccf9a8afcdf..9786963b07e5 100644 --- a/fs/btrfs/export.c +++ b/fs/btrfs/export.c @@ -65,7 +65,6 @@ static struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid, { struct btrfs_fs_info *fs_info = btrfs_sb(sb)->fs_info; struct btrfs_root *root; - struct dentry *dentry; struct inode *inode; struct btrfs_key key; int index; @@ -108,10 +107,7 @@ static struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid, return ERR_PTR(-ESTALE); } - dentry = d_obtain_alias(inode); - if (!IS_ERR(dentry)) - d_set_d_op(dentry, &btrfs_dentry_operations); - return dentry; + return d_obtain_alias(inode); fail: srcu_read_unlock(&fs_info->subvol_srcu, index); return ERR_PTR(err); @@ -166,7 +162,6 @@ static struct dentry *btrfs_fh_to_dentry(struct super_block *sb, struct fid *fh, static struct dentry *btrfs_get_parent(struct dentry *child) { struct inode *dir = child->d_inode; - struct dentry *dentry; struct btrfs_root *root = BTRFS_I(dir)->root; struct btrfs_path *path; struct extent_buffer *leaf; @@ -223,10 +218,7 @@ static struct dentry *btrfs_get_parent(struct dentry *child) key.type = BTRFS_INODE_ITEM_KEY; key.offset = 0; - dentry = d_obtain_alias(btrfs_iget(root->fs_info->sb, &key, root, NULL)); - if (!IS_ERR(dentry)) - d_set_d_op(dentry, &btrfs_dentry_operations); - return dentry; + return d_obtain_alias(btrfs_iget(root->fs_info->sb, &key, root, NULL)); fail: btrfs_free_path(path); return ERR_PTR(ret); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index a0ff46a47895..f870aefc59dd 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4084,8 +4084,6 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) int index; int ret; - d_set_d_op(dentry, &btrfs_dentry_operations); - if (dentry->d_name.len > BTRFS_NAME_LEN) return ERR_PTR(-ENAMETOOLONG); diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 883c6fa1367e..22acdaa78ce1 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -460,6 +460,7 @@ static int btrfs_fill_super(struct super_block *sb, sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_magic = BTRFS_SUPER_MAGIC; sb->s_op = &btrfs_super_ops; + sb->s_d_op = &btrfs_dentry_operations; sb->s_export_op = &btrfs_export_ops; sb->s_xattr = btrfs_xattr_handlers; sb->s_time_gran = 1; -- cgit v1.2.2 From 43d344d7722f9b914849ba0014342111a9a0b03e Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 12 Jan 2011 16:12:05 -0500 Subject: switch hpfs Signed-off-by: Al Viro --- fs/hpfs/dentry.c | 7 +------ fs/hpfs/dir.c | 1 - fs/hpfs/hpfs_fn.h | 2 +- fs/hpfs/super.c | 2 +- 4 files changed, 3 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/hpfs/dentry.c b/fs/hpfs/dentry.c index 32c13a94e1e9..05d4816e4e77 100644 --- a/fs/hpfs/dentry.c +++ b/fs/hpfs/dentry.c @@ -58,12 +58,7 @@ static int hpfs_compare_dentry(const struct dentry *parent, return 0; } -static const struct dentry_operations hpfs_dentry_operations = { +const struct dentry_operations hpfs_dentry_operations = { .d_hash = hpfs_hash_dentry, .d_compare = hpfs_compare_dentry, }; - -void hpfs_set_dentry_operations(struct dentry *dentry) -{ - d_set_d_op(dentry, &hpfs_dentry_operations); -} diff --git a/fs/hpfs/dir.c b/fs/hpfs/dir.c index 2338130cceba..d32f63a569f7 100644 --- a/fs/hpfs/dir.c +++ b/fs/hpfs/dir.c @@ -298,7 +298,6 @@ struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, struct name end: end_add: - hpfs_set_dentry_operations(dentry); unlock_kernel(); d_add(dentry, result); return NULL; diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h index 2fee17d0d9ab..1c43dbea55e8 100644 --- a/fs/hpfs/hpfs_fn.h +++ b/fs/hpfs/hpfs_fn.h @@ -233,7 +233,7 @@ void hpfs_mark_4buffers_dirty(struct quad_buffer_head *); /* dentry.c */ -void hpfs_set_dentry_operations(struct dentry *); +extern const struct dentry_operations hpfs_dentry_operations; /* dir.c */ diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c index 49935ba78db8..b30426b1fc97 100644 --- a/fs/hpfs/super.c +++ b/fs/hpfs/super.c @@ -550,6 +550,7 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent) /* Fill superblock stuff */ s->s_magic = HPFS_SUPER_MAGIC; s->s_op = &hpfs_sops; + s->s_d_op = &hpfs_dentry_operations; sbi->sb_root = superblock->root; sbi->sb_fs_size = superblock->n_sectors; @@ -651,7 +652,6 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent) iput(root); goto bail0; } - hpfs_set_dentry_operations(s->s_root); /* * find the root directory's . pointer & finish filling in the inode -- cgit v1.2.2 From 9501e4c48e1561443e1a16c2c9917ec6c63118a4 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 12 Jan 2011 16:25:02 -0500 Subject: switch coda Coda ->d_revalidate() actually checks for root, ->d_delete() is irrelevant. So we can use the same d_op for all coda dentries Signed-off-by: Al Viro --- fs/coda/dir.c | 4 +--- fs/coda/inode.c | 1 + 2 files changed, 2 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/coda/dir.c b/fs/coda/dir.c index 29badd91360f..9df71f0eb218 100644 --- a/fs/coda/dir.c +++ b/fs/coda/dir.c @@ -61,7 +61,7 @@ static int coda_return_EIO(void) } #define CODA_EIO_ERROR ((void *) (coda_return_EIO)) -static const struct dentry_operations coda_dentry_operations = +const struct dentry_operations coda_dentry_operations = { .d_revalidate = coda_dentry_revalidate, .d_delete = coda_dentry_delete, @@ -126,8 +126,6 @@ static struct dentry *coda_lookup(struct inode *dir, struct dentry *entry, struc return ERR_PTR(error); exit: - d_set_d_op(entry, &coda_dentry_operations); - if (inode && (type & CODA_NOCACHE)) coda_flag_inode(inode, C_VATTR | C_PURGE); diff --git a/fs/coda/inode.c b/fs/coda/inode.c index 50dc7d189f56..bd7fde2721a8 100644 --- a/fs/coda/inode.c +++ b/fs/coda/inode.c @@ -193,6 +193,7 @@ static int coda_fill_super(struct super_block *sb, void *data, int silent) sb->s_blocksize_bits = 12; sb->s_magic = CODA_SUPER_MAGIC; sb->s_op = &coda_super_operations; + sb->s_d_op = &coda_dentry_operations; sb->s_bdi = &vc->bdi; /* get root fid from Venus: this needs the root inode */ -- cgit v1.2.2 From 31a203df9c109480fc6d48ba0a68763e89199acb Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 12 Jan 2011 16:36:09 -0500 Subject: take coda-private headers out of include/linux Signed-off-by: Al Viro --- fs/coda/cache.c | 5 +-- fs/coda/cnode.c | 3 +- fs/coda/coda_cache.h | 22 +++++++++++ fs/coda/coda_fs_i.h | 58 +++++++++++++++++++++++++++++ fs/coda/coda_linux.c | 3 +- fs/coda/coda_linux.h | 101 +++++++++++++++++++++++++++++++++++++++++++++++++++ fs/coda/dir.c | 5 +-- fs/coda/file.c | 3 +- fs/coda/inode.c | 5 +-- fs/coda/pioctl.c | 4 +- fs/coda/psdev.c | 4 +- fs/coda/symlink.c | 4 +- fs/coda/upcall.c | 5 +-- 13 files changed, 198 insertions(+), 24 deletions(-) create mode 100644 fs/coda/coda_cache.h create mode 100644 fs/coda/coda_fs_i.h create mode 100644 fs/coda/coda_linux.h (limited to 'fs') diff --git a/fs/coda/cache.c b/fs/coda/cache.c index 5525e1c660fd..690157876184 100644 --- a/fs/coda/cache.c +++ b/fs/coda/cache.c @@ -20,10 +20,9 @@ #include #include -#include #include -#include -#include +#include "coda_linux.h" +#include "coda_cache.h" static atomic_t permission_epoch = ATOMIC_INIT(0); diff --git a/fs/coda/cnode.c b/fs/coda/cnode.c index 602240569c89..6475877b0763 100644 --- a/fs/coda/cnode.c +++ b/fs/coda/cnode.c @@ -7,9 +7,8 @@ #include #include -#include -#include #include +#include "coda_linux.h" static inline int coda_fideq(struct CodaFid *fid1, struct CodaFid *fid2) { diff --git a/fs/coda/coda_cache.h b/fs/coda/coda_cache.h new file mode 100644 index 000000000000..c910b5eb1ceb --- /dev/null +++ b/fs/coda/coda_cache.h @@ -0,0 +1,22 @@ +/* Coda filesystem -- Linux Minicache + * + * Copyright (C) 1989 - 1997 Carnegie Mellon University + * + * Carnegie Mellon University encourages users of this software to + * contribute improvements to the Coda project. Contact Peter Braam + * + */ + +#ifndef _CFSNC_HEADER_ +#define _CFSNC_HEADER_ + +/* credential cache */ +void coda_cache_enter(struct inode *inode, int mask); +void coda_cache_clear_inode(struct inode *); +void coda_cache_clear_all(struct super_block *sb); +int coda_cache_check(struct inode *inode, int mask); + +/* for downcalls and attributes and lookups */ +void coda_flag_inode_children(struct inode *inode, int flag); + +#endif /* _CFSNC_HEADER_ */ diff --git a/fs/coda/coda_fs_i.h b/fs/coda/coda_fs_i.h new file mode 100644 index 000000000000..e35071b1de0e --- /dev/null +++ b/fs/coda/coda_fs_i.h @@ -0,0 +1,58 @@ +/* + * coda_fs_i.h + * + * Copyright (C) 1998 Carnegie Mellon University + * + */ + +#ifndef _LINUX_CODA_FS_I +#define _LINUX_CODA_FS_I + +#include +#include +#include +#include + +/* + * coda fs inode data + * c_lock protects accesses to c_flags, c_mapcount, c_cached_epoch, c_uid and + * c_cached_perm. + * vfs_inode is set only when the inode is created and never changes. + * c_fid is set when the inode is created and should be considered immutable. + */ +struct coda_inode_info { + struct CodaFid c_fid; /* Coda identifier */ + u_short c_flags; /* flags (see below) */ + unsigned int c_mapcount; /* nr of times this inode is mapped */ + unsigned int c_cached_epoch; /* epoch for cached permissions */ + vuid_t c_uid; /* fsuid for cached permissions */ + unsigned int c_cached_perm; /* cached access permissions */ + spinlock_t c_lock; + struct inode vfs_inode; +}; + +/* + * coda fs file private data + */ +#define CODA_MAGIC 0xC0DAC0DA +struct coda_file_info { + int cfi_magic; /* magic number */ + struct file *cfi_container; /* container file for this cnode */ + unsigned int cfi_mapcount; /* nr of times this file is mapped */ +}; + +#define CODA_FTOC(file) ((struct coda_file_info *)((file)->private_data)) + +/* flags */ +#define C_VATTR 0x1 /* Validity of vattr in inode */ +#define C_FLUSH 0x2 /* used after a flush */ +#define C_DYING 0x4 /* from venus (which died) */ +#define C_PURGE 0x8 + +int coda_cnode_make(struct inode **, struct CodaFid *, struct super_block *); +struct inode *coda_iget(struct super_block *sb, struct CodaFid *fid, struct coda_vattr *attr); +int coda_cnode_makectl(struct inode **inode, struct super_block *sb); +struct inode *coda_fid_to_inode(struct CodaFid *fid, struct super_block *sb); +void coda_replace_fid(struct inode *, struct CodaFid *, struct CodaFid *); + +#endif diff --git a/fs/coda/coda_linux.c b/fs/coda/coda_linux.c index bf4a3fd3c8e3..2bdbcc11b373 100644 --- a/fs/coda/coda_linux.c +++ b/fs/coda/coda_linux.c @@ -17,9 +17,8 @@ #include #include -#include #include -#include +#include "coda_linux.h" /* initialize the debugging variables */ int coda_fake_statfs; diff --git a/fs/coda/coda_linux.h b/fs/coda/coda_linux.h new file mode 100644 index 000000000000..9b0c5323890b --- /dev/null +++ b/fs/coda/coda_linux.h @@ -0,0 +1,101 @@ +/* + * Coda File System, Linux Kernel module + * + * Original version, adapted from cfs_mach.c, (C) Carnegie Mellon University + * Linux modifications (C) 1996, Peter J. Braam + * Rewritten for Linux 2.1 (C) 1997 Carnegie Mellon University + * + * Carnegie Mellon University encourages users of this software to + * contribute improvements to the Coda project. + */ + +#ifndef _LINUX_CODA_FS +#define _LINUX_CODA_FS + +#include +#include +#include +#include +#include +#include +#include +#include +#include "coda_fs_i.h" + +/* operations */ +extern const struct inode_operations coda_dir_inode_operations; +extern const struct inode_operations coda_file_inode_operations; +extern const struct inode_operations coda_ioctl_inode_operations; + +extern const struct dentry_operations coda_dentry_operations; + +extern const struct address_space_operations coda_file_aops; +extern const struct address_space_operations coda_symlink_aops; + +extern const struct file_operations coda_dir_operations; +extern const struct file_operations coda_file_operations; +extern const struct file_operations coda_ioctl_operations; + +/* operations shared over more than one file */ +int coda_open(struct inode *i, struct file *f); +int coda_release(struct inode *i, struct file *f); +int coda_permission(struct inode *inode, int mask, unsigned int flags); +int coda_revalidate_inode(struct dentry *); +int coda_getattr(struct vfsmount *, struct dentry *, struct kstat *); +int coda_setattr(struct dentry *, struct iattr *); + +/* this file: heloers */ +char *coda_f2s(struct CodaFid *f); +int coda_isroot(struct inode *i); +int coda_iscontrol(const char *name, size_t length); + +void coda_vattr_to_iattr(struct inode *, struct coda_vattr *); +void coda_iattr_to_vattr(struct iattr *, struct coda_vattr *); +unsigned short coda_flags_to_cflags(unsigned short); + +/* sysctl.h */ +void coda_sysctl_init(void); +void coda_sysctl_clean(void); + +#define CODA_ALLOC(ptr, cast, size) do { \ + if (size < PAGE_SIZE) \ + ptr = kmalloc((unsigned long) size, GFP_KERNEL); \ + else \ + ptr = (cast)vmalloc((unsigned long) size); \ + if (!ptr) \ + printk("kernel malloc returns 0 at %s:%d\n", __FILE__, __LINE__); \ + else memset( ptr, 0, size ); \ +} while (0) + + +#define CODA_FREE(ptr,size) \ + do { if (size < PAGE_SIZE) kfree((ptr)); else vfree((ptr)); } while (0) + +/* inode to cnode access functions */ + +static inline struct coda_inode_info *ITOC(struct inode *inode) +{ + return list_entry(inode, struct coda_inode_info, vfs_inode); +} + +static __inline__ struct CodaFid *coda_i2f(struct inode *inode) +{ + return &(ITOC(inode)->c_fid); +} + +static __inline__ char *coda_i2s(struct inode *inode) +{ + return coda_f2s(&(ITOC(inode)->c_fid)); +} + +/* this will not zap the inode away */ +static __inline__ void coda_flag_inode(struct inode *inode, int flag) +{ + struct coda_inode_info *cii = ITOC(inode); + + spin_lock(&cii->c_lock); + cii->c_flags |= flag; + spin_unlock(&cii->c_lock); +} + +#endif diff --git a/fs/coda/dir.c b/fs/coda/dir.c index 9df71f0eb218..2b8dae4d121e 100644 --- a/fs/coda/dir.c +++ b/fs/coda/dir.c @@ -23,10 +23,9 @@ #include #include -#include #include -#include -#include +#include "coda_linux.h" +#include "coda_cache.h" #include "coda_int.h" diff --git a/fs/coda/file.c b/fs/coda/file.c index c8b50ba4366a..0433057be330 100644 --- a/fs/coda/file.c +++ b/fs/coda/file.c @@ -21,10 +21,9 @@ #include #include -#include -#include #include +#include "coda_linux.h" #include "coda_int.h" static ssize_t diff --git a/fs/coda/inode.c b/fs/coda/inode.c index bd7fde2721a8..261d86f9caec 100644 --- a/fs/coda/inode.c +++ b/fs/coda/inode.c @@ -28,10 +28,9 @@ #include #include -#include #include -#include -#include +#include "coda_linux.h" +#include "coda_cache.h" #include "coda_int.h" diff --git a/fs/coda/pioctl.c b/fs/coda/pioctl.c index 741f0bd03918..6cbb3afb36dc 100644 --- a/fs/coda/pioctl.c +++ b/fs/coda/pioctl.c @@ -19,10 +19,10 @@ #include #include -#include -#include #include +#include "coda_linux.h" + /* pioctl ops */ static int coda_ioctl_permission(struct inode *inode, int mask, unsigned int flags); static long coda_pioctl(struct file *filp, unsigned int cmd, diff --git a/fs/coda/psdev.c b/fs/coda/psdev.c index 62647a8595e4..8f616e0e252c 100644 --- a/fs/coda/psdev.c +++ b/fs/coda/psdev.c @@ -43,10 +43,10 @@ #include #include -#include -#include #include +#include "coda_linux.h" + #include "coda_int.h" /* statistics */ diff --git a/fs/coda/symlink.c b/fs/coda/symlink.c index af78f007a2b0..ab94ef63caef 100644 --- a/fs/coda/symlink.c +++ b/fs/coda/symlink.c @@ -16,9 +16,9 @@ #include #include -#include #include -#include + +#include "coda_linux.h" static int coda_symlink_filler(struct file *file, struct page *page) { diff --git a/fs/coda/upcall.c b/fs/coda/upcall.c index c3563cab9758..9727e0c52579 100644 --- a/fs/coda/upcall.c +++ b/fs/coda/upcall.c @@ -33,10 +33,9 @@ #include #include -#include #include -#include -#include +#include "coda_linux.h" +#include "coda_cache.h" #include "coda_int.h" -- cgit v1.2.2 From d463a0c4b53a8fab505fd9aa3a1a04cb9f411b78 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 12 Jan 2011 16:41:05 -0500 Subject: switch configfs Signed-off-by: Al Viro --- fs/configfs/configfs_internal.h | 1 + fs/configfs/dir.c | 6 +----- fs/configfs/mount.c | 1 + 3 files changed, 3 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/configfs/configfs_internal.h b/fs/configfs/configfs_internal.h index 026cf68553a4..82bda8fdfc1c 100644 --- a/fs/configfs/configfs_internal.h +++ b/fs/configfs/configfs_internal.h @@ -90,6 +90,7 @@ extern const struct file_operations configfs_file_operations; extern const struct file_operations bin_fops; extern const struct inode_operations configfs_dir_inode_operations; extern const struct inode_operations configfs_symlink_inode_operations; +extern const struct dentry_operations configfs_dentry_ops; extern int configfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname); diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c index 36637a8c1ed3..90ff3cb10de3 100644 --- a/fs/configfs/dir.c +++ b/fs/configfs/dir.c @@ -72,7 +72,7 @@ static int configfs_d_delete(const struct dentry *dentry) return 1; } -static const struct dentry_operations configfs_dentry_ops = { +const struct dentry_operations configfs_dentry_ops = { .d_iput = configfs_d_iput, /* simple_delete_dentry() isn't exported */ .d_delete = configfs_d_delete, @@ -442,7 +442,6 @@ static int configfs_attach_attr(struct configfs_dirent * sd, struct dentry * den return error; } - d_set_d_op(dentry, &configfs_dentry_ops); d_rehash(dentry); return 0; @@ -489,7 +488,6 @@ static struct dentry * configfs_lookup(struct inode *dir, */ if (dentry->d_name.len > NAME_MAX) return ERR_PTR(-ENAMETOOLONG); - d_set_d_op(dentry, &configfs_dentry_ops); d_add(dentry, NULL); return NULL; } @@ -683,7 +681,6 @@ static int create_default_group(struct config_group *parent_group, ret = -ENOMEM; child = d_alloc(parent, &name); if (child) { - d_set_d_op(child, &configfs_dentry_ops); d_add(child, NULL); ret = configfs_attach_group(&parent_group->cg_item, @@ -1681,7 +1678,6 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys) err = -ENOMEM; dentry = d_alloc(configfs_sb->s_root, &name); if (dentry) { - d_set_d_op(dentry, &configfs_dentry_ops); d_add(dentry, NULL); err = configfs_attach_group(sd->s_element, &group->cg_item, diff --git a/fs/configfs/mount.c b/fs/configfs/mount.c index 7d3607febe1c..ecc62178beda 100644 --- a/fs/configfs/mount.c +++ b/fs/configfs/mount.c @@ -101,6 +101,7 @@ static int configfs_fill_super(struct super_block *sb, void *data, int silent) configfs_root_group.cg_item.ci_dentry = root; root->d_fsdata = &configfs_root; sb->s_root = root; + sb->s_d_op = &configfs_dentry_ops; /* the rest get that */ return 0; } -- cgit v1.2.2 From a129880dafaa3c80eb0bae714da38088ccc2ce21 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 12 Jan 2011 16:45:19 -0500 Subject: switch affs either d_op instance would work for root, actually... Signed-off-by: Al Viro --- fs/affs/affs.h | 1 + fs/affs/namei.c | 3 +-- fs/affs/super.c | 6 +++++- 3 files changed, 7 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/affs/affs.h b/fs/affs/affs.h index a8cbdeb34025..0e95f73a7023 100644 --- a/fs/affs/affs.h +++ b/fs/affs/affs.h @@ -201,6 +201,7 @@ extern const struct address_space_operations affs_aops; extern const struct address_space_operations affs_aops_ofs; extern const struct dentry_operations affs_dentry_operations; +extern const struct dentry_operations affs_intl_dentry_operations; static inline void affs_set_blocksize(struct super_block *sb, int size) diff --git a/fs/affs/namei.c b/fs/affs/namei.c index 944a4042fb65..e3e9efc1fdd8 100644 --- a/fs/affs/namei.c +++ b/fs/affs/namei.c @@ -32,7 +32,7 @@ const struct dentry_operations affs_dentry_operations = { .d_compare = affs_compare_dentry, }; -static const struct dentry_operations affs_intl_dentry_operations = { +const struct dentry_operations affs_intl_dentry_operations = { .d_hash = affs_intl_hash_dentry, .d_compare = affs_intl_compare_dentry, }; @@ -240,7 +240,6 @@ affs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) if (IS_ERR(inode)) return ERR_CAST(inode); } - d_set_d_op(dentry, AFFS_SB(sb)->s_flags & SF_INTL ? &affs_intl_dentry_operations : &affs_dentry_operations); d_add(dentry, inode); return NULL; } diff --git a/fs/affs/super.c b/fs/affs/super.c index d39081bbe7ce..b31507d0f9b9 100644 --- a/fs/affs/super.c +++ b/fs/affs/super.c @@ -477,12 +477,16 @@ got_root: goto out_error_noinode; } + if (AFFS_SB(sb)->s_flags & SF_INTL) + sb->s_d_op = &affs_intl_dentry_operations; + else + sb->s_d_op = &affs_dentry_operations; + sb->s_root = d_alloc_root(root_inode); if (!sb->s_root) { printk(KERN_ERR "AFFS: Get root inode failed\n"); goto out_error; } - d_set_d_op(sb->s_root, &affs_dentry_operations); pr_debug("AFFS: s_flags=%lX\n",sb->s_flags); return 0; -- cgit v1.2.2 From f772c4a6a320ec25d94ba951881474eeef1b7f48 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 12 Jan 2011 16:47:00 -0500 Subject: switch hostfs ->d_delete() doesn't matter for s_root anyway Signed-off-by: Al Viro --- fs/hostfs/hostfs_kern.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c index d3244d949a4e..2638c834ed28 100644 --- a/fs/hostfs/hostfs_kern.c +++ b/fs/hostfs/hostfs_kern.c @@ -612,7 +612,6 @@ struct dentry *hostfs_lookup(struct inode *ino, struct dentry *dentry, goto out_put; d_add(dentry, inode); - d_set_d_op(dentry, &hostfs_dentry_ops); return NULL; out_put: @@ -922,6 +921,7 @@ static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent) sb->s_blocksize_bits = 10; sb->s_magic = HOSTFS_SUPER_MAGIC; sb->s_op = &hostfs_sbops; + sb->s_d_op = &hostfs_dentry_ops; sb->s_maxbytes = MAX_LFS_FILESIZE; /* NULL is printed as by sprintf: avoid that. */ -- cgit v1.2.2 From c74a1cbb3cac348f276fabc381758f5b0b4713b2 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 12 Jan 2011 16:59:34 -0500 Subject: pass default dentry_operations to mount_pseudo() Signed-off-by: Al Viro --- fs/anon_inodes.c | 21 +++++++++++---------- fs/block_dev.c | 2 +- fs/libfs.c | 4 +++- fs/pipe.c | 4 ++-- 4 files changed, 17 insertions(+), 14 deletions(-) (limited to 'fs') diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c index 5fd38112a6ca..549a53cc0283 100644 --- a/fs/anon_inodes.c +++ b/fs/anon_inodes.c @@ -26,12 +26,6 @@ static struct vfsmount *anon_inode_mnt __read_mostly; static struct inode *anon_inode_inode; static const struct file_operations anon_inode_fops; -static struct dentry *anon_inodefs_mount(struct file_system_type *fs_type, - int flags, const char *dev_name, void *data) -{ - return mount_pseudo(fs_type, "anon_inode:", NULL, ANON_INODE_FS_MAGIC); -} - /* * anon_inodefs_dname() is called from d_path(). */ @@ -41,14 +35,22 @@ static char *anon_inodefs_dname(struct dentry *dentry, char *buffer, int buflen) dentry->d_name.name); } +static const struct dentry_operations anon_inodefs_dentry_operations = { + .d_dname = anon_inodefs_dname, +}; + +static struct dentry *anon_inodefs_mount(struct file_system_type *fs_type, + int flags, const char *dev_name, void *data) +{ + return mount_pseudo(fs_type, "anon_inode:", NULL, + &anon_inodefs_dentry_operations, ANON_INODE_FS_MAGIC); +} + static struct file_system_type anon_inode_fs_type = { .name = "anon_inodefs", .mount = anon_inodefs_mount, .kill_sb = kill_anon_super, }; -static const struct dentry_operations anon_inodefs_dentry_operations = { - .d_dname = anon_inodefs_dname, -}; /* * nop .set_page_dirty method so that people can use .page_mkwrite on @@ -113,7 +115,6 @@ struct file *anon_inode_getfile(const char *name, */ ihold(anon_inode_inode); - d_set_d_op(path.dentry, &anon_inodefs_dentry_operations); d_instantiate(path.dentry, anon_inode_inode); error = -ENFILE; diff --git a/fs/block_dev.c b/fs/block_dev.c index 771f23527010..88da70355aa3 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -473,7 +473,7 @@ static const struct super_operations bdev_sops = { static struct dentry *bd_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { - return mount_pseudo(fs_type, "bdev:", &bdev_sops, 0x62646576); + return mount_pseudo(fs_type, "bdev:", &bdev_sops, NULL, 0x62646576); } static struct file_system_type bd_type = { diff --git a/fs/libfs.c b/fs/libfs.c index 889311e3d06b..c88eab55aec9 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -217,7 +217,8 @@ static const struct super_operations simple_super_operations = { * will never be mountable) */ struct dentry *mount_pseudo(struct file_system_type *fs_type, char *name, - const struct super_operations *ops, unsigned long magic) + const struct super_operations *ops, + const struct dentry_operations *dops, unsigned long magic) { struct super_block *s = sget(fs_type, NULL, set_anon_super, NULL); struct dentry *dentry; @@ -254,6 +255,7 @@ struct dentry *mount_pseudo(struct file_system_type *fs_type, char *name, dentry->d_parent = dentry; d_instantiate(dentry, root); s->s_root = dentry; + s->s_d_op = dops; s->s_flags |= MS_ACTIVE; return dget(s->s_root); diff --git a/fs/pipe.c b/fs/pipe.c index 68f1f8e4e23b..6b0255a74f36 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -1004,7 +1004,6 @@ struct file *create_write_pipe(int flags) goto err_inode; path.mnt = mntget(pipe_mnt); - d_set_d_op(path.dentry, &pipefs_dentry_operations); d_instantiate(path.dentry, inode); err = -ENFILE; @@ -1266,7 +1265,8 @@ static const struct super_operations pipefs_ops = { static struct dentry *pipefs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { - return mount_pseudo(fs_type, "pipe:", &pipefs_ops, PIPEFS_MAGIC); + return mount_pseudo(fs_type, "pipe:", &pipefs_ops, + &pipefs_dentry_operations, PIPEFS_MAGIC); } static struct file_system_type pipe_fs_type = { -- cgit v1.2.2 From 98cd3fb0a2c376f583216ec35f66175a71b2ef67 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 12 Jan 2011 17:10:55 -0500 Subject: switch 9p here we actually *want* ->d_op for root; setting it allows to get rid of kludge in v9fs_kill_super() since now we have proper ->d_release() for root and don't need to call it manually. Signed-off-by: Al Viro --- fs/9p/v9fs_vfs.h | 1 - fs/9p/vfs_dentry.c | 2 +- fs/9p/vfs_inode.c | 5 ----- fs/9p/vfs_super.c | 8 +++++--- 4 files changed, 6 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h index bab0eac873f4..b789f8e597ec 100644 --- a/fs/9p/v9fs_vfs.h +++ b/fs/9p/v9fs_vfs.h @@ -59,7 +59,6 @@ void v9fs_stat2inode_dotl(struct p9_stat_dotl *, struct inode *); int v9fs_dir_release(struct inode *inode, struct file *filp); int v9fs_file_open(struct inode *inode, struct file *file); void v9fs_inode2stat(struct inode *inode, struct p9_wstat *stat); -void v9fs_dentry_release(struct dentry *); int v9fs_uflags2omode(int uflags, int extended); ssize_t v9fs_file_readn(struct file *, char *, char __user *, u32, u64); diff --git a/fs/9p/vfs_dentry.c b/fs/9p/vfs_dentry.c index 466d2a4fc5cb..233b7d4ffe5e 100644 --- a/fs/9p/vfs_dentry.c +++ b/fs/9p/vfs_dentry.c @@ -86,7 +86,7 @@ static int v9fs_cached_dentry_delete(const struct dentry *dentry) * */ -void v9fs_dentry_release(struct dentry *dentry) +static void v9fs_dentry_release(struct dentry *dentry) { struct v9fs_dentry *dent; struct p9_fid *temp, *current_fid; diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index 5076eeb95502..b76a40bdf4c2 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c @@ -699,11 +699,6 @@ struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry, goto error_iput; inst_out: - if (v9ses->cache) - d_set_d_op(dentry, &v9fs_cached_dentry_operations); - else - d_set_d_op(dentry, &v9fs_dentry_operations); - d_add(dentry, inode); return NULL; diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c index c55c614500ad..dbaabe3b8131 100644 --- a/fs/9p/vfs_super.c +++ b/fs/9p/vfs_super.c @@ -141,6 +141,11 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags, } v9fs_fill_super(sb, v9ses, flags, data); + if (v9ses->cache) + sb->s_d_op = &v9fs_cached_dentry_operations; + else + sb->s_d_op = &v9fs_dentry_operations; + inode = v9fs_get_inode(sb, S_IFDIR | mode); if (IS_ERR(inode)) { retval = PTR_ERR(inode); @@ -217,9 +222,6 @@ static void v9fs_kill_super(struct super_block *s) P9_DPRINTK(P9_DEBUG_VFS, " %p\n", s); - if (s->s_root) - v9fs_dentry_release(s->s_root); /* clunk root */ - kill_anon_super(s); v9fs_session_cancel(v9ses); -- cgit v1.2.2 From 0378c4051a621303ae919f1cee832206a4c1aa68 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 12 Jan 2011 17:25:03 -0500 Subject: switch ncpfs merge dentry_operations for root and non-root Signed-off-by: Al Viro --- fs/ncpfs/dir.c | 15 ++++----------- fs/ncpfs/inode.c | 2 +- 2 files changed, 5 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c index 28f136d4aaec..119accd07dd5 100644 --- a/fs/ncpfs/dir.c +++ b/fs/ncpfs/dir.c @@ -82,7 +82,7 @@ static int ncp_compare_dentry(const struct dentry *, const struct inode *, unsigned int, const char *, const struct qstr *); static int ncp_delete_dentry(const struct dentry *); -static const struct dentry_operations ncp_dentry_operations = +const struct dentry_operations ncp_dentry_operations = { .d_revalidate = ncp_lookup_validate, .d_hash = ncp_hash_dentry, @@ -90,14 +90,6 @@ static const struct dentry_operations ncp_dentry_operations = .d_delete = ncp_delete_dentry, }; -const struct dentry_operations ncp_root_dentry_operations = -{ - .d_hash = ncp_hash_dentry, - .d_compare = ncp_compare_dentry, - .d_delete = ncp_delete_dentry, -}; - - #define ncp_namespace(i) (NCP_SERVER(i)->name_space[NCP_FINFO(i)->volNumber]) static inline int ncp_preserve_entry_case(struct inode *i, __u32 nscreator) @@ -309,6 +301,9 @@ ncp_lookup_validate(struct dentry *dentry, struct nameidata *nd) int res, val = 0, len; __u8 __name[NCP_MAXPATHLEN + 1]; + if (dentry == dentry->d_sb->s_root) + return 1; + if (nd->flags & LOOKUP_RCU) return -ECHILD; @@ -637,7 +632,6 @@ ncp_fill_cache(struct file *filp, void *dirent, filldir_t filldir, entry->ino = iunique(dir->i_sb, 2); inode = ncp_iget(dir->i_sb, entry); if (inode) { - d_set_d_op(newdent, &ncp_dentry_operations); d_instantiate(newdent, inode); if (!hashed) d_rehash(newdent); @@ -893,7 +887,6 @@ static struct dentry *ncp_lookup(struct inode *dir, struct dentry *dentry, struc if (inode) { ncp_new_dentry(dentry); add_entry: - d_set_d_op(dentry, &ncp_dentry_operations); d_add(dentry, inode); error = 0; } diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c index 9b39a5dd4131..8b8bebbb9601 100644 --- a/fs/ncpfs/inode.c +++ b/fs/ncpfs/inode.c @@ -544,6 +544,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent) sb->s_blocksize_bits = 10; sb->s_magic = NCP_SUPER_MAGIC; sb->s_op = &ncp_sops; + sb->s_d_op = &ncp_dentry_operations; sb->s_bdi = &server->bdi; server = NCP_SBP(sb); @@ -723,7 +724,6 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent) sb->s_root = d_alloc_root(root_inode); if (!sb->s_root) goto out_no_root; - d_set_d_op(sb->s_root, &ncp_root_dentry_operations); return 0; out_no_root: -- cgit v1.2.2 From 32c419d95f3d1da891ab9bd032a214ee05b94ed4 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 12 Jan 2011 17:37:47 -0500 Subject: move internal-only parts of ncpfs headers to fs/ncpfs Signed-off-by: Al Viro --- fs/ncpfs/dir.c | 4 +- fs/ncpfs/file.c | 3 +- fs/ncpfs/inode.c | 4 +- fs/ncpfs/ioctl.c | 4 +- fs/ncpfs/mmap.c | 4 +- fs/ncpfs/ncp_fs.h | 98 ++++++++++++++++++++++++++ fs/ncpfs/ncp_fs_i.h | 29 ++++++++ fs/ncpfs/ncp_fs_sb.h | 176 ++++++++++++++++++++++++++++++++++++++++++++++ fs/ncpfs/ncplib_kernel.c | 2 +- fs/ncpfs/ncplib_kernel.h | 2 - fs/ncpfs/ncpsign_kernel.c | 1 + fs/ncpfs/ncpsign_kernel.h | 2 - fs/ncpfs/sock.c | 2 +- fs/ncpfs/symlink.c | 4 +- 14 files changed, 313 insertions(+), 22 deletions(-) create mode 100644 fs/ncpfs/ncp_fs.h create mode 100644 fs/ncpfs/ncp_fs_i.h create mode 100644 fs/ncpfs/ncp_fs_sb.h (limited to 'fs') diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c index 119accd07dd5..f6946bb5cb55 100644 --- a/fs/ncpfs/dir.c +++ b/fs/ncpfs/dir.c @@ -21,9 +21,7 @@ #include #include -#include - -#include "ncplib_kernel.h" +#include "ncp_fs.h" static void ncp_read_volume_list(struct file *, void *, filldir_t, struct ncp_cache_control *); diff --git a/fs/ncpfs/file.c b/fs/ncpfs/file.c index cb50aaf981df..0ed65e0c3dfe 100644 --- a/fs/ncpfs/file.c +++ b/fs/ncpfs/file.c @@ -18,8 +18,7 @@ #include #include -#include -#include "ncplib_kernel.h" +#include "ncp_fs.h" static int ncp_fsync(struct file *file, int datasync) { diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c index 8b8bebbb9601..00a1d1c3d3a4 100644 --- a/fs/ncpfs/inode.c +++ b/fs/ncpfs/inode.c @@ -31,11 +31,9 @@ #include #include -#include - #include -#include "ncplib_kernel.h" +#include "ncp_fs.h" #include "getopt.h" #define NCP_DEFAULT_FILE_MODE 0600 diff --git a/fs/ncpfs/ioctl.c b/fs/ncpfs/ioctl.c index d40a547e3377..790e92a9ec63 100644 --- a/fs/ncpfs/ioctl.c +++ b/fs/ncpfs/ioctl.c @@ -20,11 +20,9 @@ #include #include -#include - #include -#include "ncplib_kernel.h" +#include "ncp_fs.h" /* maximum limit for ncp_objectname_ioctl */ #define NCP_OBJECT_NAME_MAX_LEN 4096 diff --git a/fs/ncpfs/mmap.c b/fs/ncpfs/mmap.c index 56f5b3a0e1ee..a7c07b44b100 100644 --- a/fs/ncpfs/mmap.c +++ b/fs/ncpfs/mmap.c @@ -16,12 +16,12 @@ #include #include #include -#include -#include "ncplib_kernel.h" #include #include +#include "ncp_fs.h" + /* * Fill in the supplied page for mmap * XXX: how are we excluding truncate/invalidate here? Maybe need to lock diff --git a/fs/ncpfs/ncp_fs.h b/fs/ncpfs/ncp_fs.h new file mode 100644 index 000000000000..31831afe1c3b --- /dev/null +++ b/fs/ncpfs/ncp_fs.h @@ -0,0 +1,98 @@ +#include +#include "ncp_fs_i.h" +#include "ncp_fs_sb.h" + +/* define because it is easy to change PRINTK to {*}PRINTK */ +#define PRINTK(format, args...) printk(KERN_DEBUG format , ## args) + +#undef NCPFS_PARANOIA +#ifdef NCPFS_PARANOIA +#define PPRINTK(format, args...) PRINTK(format , ## args) +#else +#define PPRINTK(format, args...) +#endif + +#ifndef DEBUG_NCP +#define DEBUG_NCP 0 +#endif +#if DEBUG_NCP > 0 +#define DPRINTK(format, args...) PRINTK(format , ## args) +#else +#define DPRINTK(format, args...) +#endif +#if DEBUG_NCP > 1 +#define DDPRINTK(format, args...) PRINTK(format , ## args) +#else +#define DDPRINTK(format, args...) +#endif + +#define NCP_MAX_RPC_TIMEOUT (6*HZ) + + +struct ncp_entry_info { + struct nw_info_struct i; + ino_t ino; + int opened; + int access; + unsigned int volume; + __u8 file_handle[6]; +}; + +static inline struct ncp_server *NCP_SBP(const struct super_block *sb) +{ + return sb->s_fs_info; +} + +#define NCP_SERVER(inode) NCP_SBP((inode)->i_sb) +static inline struct ncp_inode_info *NCP_FINFO(const struct inode *inode) +{ + return container_of(inode, struct ncp_inode_info, vfs_inode); +} + +/* linux/fs/ncpfs/inode.c */ +int ncp_notify_change(struct dentry *, struct iattr *); +struct inode *ncp_iget(struct super_block *, struct ncp_entry_info *); +void ncp_update_inode(struct inode *, struct ncp_entry_info *); +void ncp_update_inode2(struct inode *, struct ncp_entry_info *); + +/* linux/fs/ncpfs/dir.c */ +extern const struct inode_operations ncp_dir_inode_operations; +extern const struct file_operations ncp_dir_operations; +extern const struct dentry_operations ncp_dentry_operations; +int ncp_conn_logged_in(struct super_block *); +int ncp_date_dos2unix(__le16 time, __le16 date); +void ncp_date_unix2dos(int unix_date, __le16 * time, __le16 * date); + +/* linux/fs/ncpfs/ioctl.c */ +long ncp_ioctl(struct file *, unsigned int, unsigned long); +long ncp_compat_ioctl(struct file *, unsigned int, unsigned long); + +/* linux/fs/ncpfs/sock.c */ +int ncp_request2(struct ncp_server *server, int function, + void* reply, int max_reply_size); +static inline int ncp_request(struct ncp_server *server, int function) { + return ncp_request2(server, function, server->packet, server->packet_size); +} +int ncp_connect(struct ncp_server *server); +int ncp_disconnect(struct ncp_server *server); +void ncp_lock_server(struct ncp_server *server); +void ncp_unlock_server(struct ncp_server *server); + +/* linux/fs/ncpfs/symlink.c */ +#if defined(CONFIG_NCPFS_EXTRAS) || defined(CONFIG_NCPFS_NFS_NS) +extern const struct address_space_operations ncp_symlink_aops; +int ncp_symlink(struct inode*, struct dentry*, const char*); +#endif + +/* linux/fs/ncpfs/file.c */ +extern const struct inode_operations ncp_file_inode_operations; +extern const struct file_operations ncp_file_operations; +int ncp_make_open(struct inode *, int); + +/* linux/fs/ncpfs/mmap.c */ +int ncp_mmap(struct file *, struct vm_area_struct *); + +/* linux/fs/ncpfs/ncplib_kernel.c */ +int ncp_make_closed(struct inode *); + +#include "ncplib_kernel.h" diff --git a/fs/ncpfs/ncp_fs_i.h b/fs/ncpfs/ncp_fs_i.h new file mode 100644 index 000000000000..4b0bec477846 --- /dev/null +++ b/fs/ncpfs/ncp_fs_i.h @@ -0,0 +1,29 @@ +/* + * ncp_fs_i.h + * + * Copyright (C) 1995 Volker Lendecke + * + */ + +#ifndef _LINUX_NCP_FS_I +#define _LINUX_NCP_FS_I + +/* + * This is the ncpfs part of the inode structure. This must contain + * all the information we need to work with an inode after creation. + */ +struct ncp_inode_info { + __le32 dirEntNum; + __le32 DosDirNum; + __u8 volNumber; + __le32 nwattr; + struct mutex open_mutex; + atomic_t opened; + int access; + int flags; +#define NCPI_KLUDGE_SYMLINK 0x0001 + __u8 file_handle[6]; + struct inode vfs_inode; +}; + +#endif /* _LINUX_NCP_FS_I */ diff --git a/fs/ncpfs/ncp_fs_sb.h b/fs/ncpfs/ncp_fs_sb.h new file mode 100644 index 000000000000..4af803f13516 --- /dev/null +++ b/fs/ncpfs/ncp_fs_sb.h @@ -0,0 +1,176 @@ +/* + * ncp_fs_sb.h + * + * Copyright (C) 1995, 1996 by Volker Lendecke + * + */ + +#ifndef _NCP_FS_SB +#define _NCP_FS_SB + +#include +#include +#include +#include +#include +#include + +#define NCP_DEFAULT_OPTIONS 0 /* 2 for packet signatures */ + +struct sock; + +struct ncp_mount_data_kernel { + unsigned long flags; /* NCP_MOUNT_* flags */ + unsigned int int_flags; /* internal flags */ +#define NCP_IMOUNT_LOGGEDIN_POSSIBLE 0x0001 + __kernel_uid32_t mounted_uid; /* Who may umount() this filesystem? */ + struct pid *wdog_pid; /* Who cares for our watchdog packets? */ + unsigned int ncp_fd; /* The socket to the ncp port */ + unsigned int time_out; /* How long should I wait after + sending a NCP request? */ + unsigned int retry_count; /* And how often should I retry? */ + unsigned char mounted_vol[NCP_VOLNAME_LEN + 1]; + __kernel_uid32_t uid; + __kernel_gid32_t gid; + __kernel_mode_t file_mode; + __kernel_mode_t dir_mode; + int info_fd; +}; + +struct ncp_server { + + struct ncp_mount_data_kernel m; /* Nearly all of the mount data is of + interest for us later, so we store + it completely. */ + + __u8 name_space[NCP_NUMBER_OF_VOLUMES + 2]; + + struct file *ncp_filp; /* File pointer to ncp socket */ + struct socket *ncp_sock;/* ncp socket */ + struct file *info_filp; + struct socket *info_sock; + + u8 sequence; + u8 task; + u16 connection; /* Remote connection number */ + + u8 completion; /* Status message from server */ + u8 conn_status; /* Bit 4 = 1 ==> Server going down, no + requests allowed anymore. + Bit 0 = 1 ==> Server is down. */ + + int buffer_size; /* Negotiated bufsize */ + + int reply_size; /* Size of last reply */ + + int packet_size; + unsigned char *packet; /* Here we prepare requests and + receive replies */ + unsigned char *txbuf; /* Storage for current request */ + unsigned char *rxbuf; /* Storage for reply to current request */ + + int lock; /* To prevent mismatch in protocols. */ + struct mutex mutex; + + int current_size; /* for packet preparation */ + int has_subfunction; + int ncp_reply_size; + + int root_setuped; + struct mutex root_setup_lock; + + /* info for packet signing */ + int sign_wanted; /* 1=Server needs signed packets */ + int sign_active; /* 0=don't do signing, 1=do */ + char sign_root[8]; /* generated from password and encr. key */ + char sign_last[16]; + + /* Authentication info: NDS or BINDERY, username */ + struct { + int auth_type; + size_t object_name_len; + void* object_name; + int object_type; + } auth; + /* Password info */ + struct { + size_t len; + void* data; + } priv; + struct rw_semaphore auth_rwsem; + + /* nls info: codepage for volume and charset for I/O */ + struct nls_table *nls_vol; + struct nls_table *nls_io; + + /* maximum age in jiffies */ + atomic_t dentry_ttl; + + /* miscellaneous */ + unsigned int flags; + + spinlock_t requests_lock; /* Lock accesses to tx.requests, tx.creq and rcv.creq when STREAM mode */ + + void (*data_ready)(struct sock* sk, int len); + void (*error_report)(struct sock* sk); + void (*write_space)(struct sock* sk); /* STREAM mode only */ + struct { + struct work_struct tq; /* STREAM/DGRAM: data/error ready */ + struct ncp_request_reply* creq; /* STREAM/DGRAM: awaiting reply from this request */ + struct mutex creq_mutex; /* DGRAM only: lock accesses to rcv.creq */ + + unsigned int state; /* STREAM only: receiver state */ + struct { + __u32 magic __packed; + __u32 len __packed; + __u16 type __packed; + __u16 p1 __packed; + __u16 p2 __packed; + __u16 p3 __packed; + __u16 type2 __packed; + } buf; /* STREAM only: temporary buffer */ + unsigned char* ptr; /* STREAM only: pointer to data */ + size_t len; /* STREAM only: length of data to receive */ + } rcv; + struct { + struct list_head requests; /* STREAM only: queued requests */ + struct work_struct tq; /* STREAM only: transmitter ready */ + struct ncp_request_reply* creq; /* STREAM only: currently transmitted entry */ + } tx; + struct timer_list timeout_tm; /* DGRAM only: timeout timer */ + struct work_struct timeout_tq; /* DGRAM only: associated queue, we run timers from process context */ + int timeout_last; /* DGRAM only: current timeout length */ + int timeout_retries; /* DGRAM only: retries left */ + struct { + size_t len; + __u8 data[128]; + } unexpected_packet; + struct backing_dev_info bdi; +}; + +extern void ncp_tcp_rcv_proc(struct work_struct *work); +extern void ncp_tcp_tx_proc(struct work_struct *work); +extern void ncpdgram_rcv_proc(struct work_struct *work); +extern void ncpdgram_timeout_proc(struct work_struct *work); +extern void ncpdgram_timeout_call(unsigned long server); +extern void ncp_tcp_data_ready(struct sock* sk, int len); +extern void ncp_tcp_write_space(struct sock* sk); +extern void ncp_tcp_error_report(struct sock* sk); + +#define NCP_FLAG_UTF8 1 + +#define NCP_CLR_FLAG(server, flag) ((server)->flags &= ~(flag)) +#define NCP_SET_FLAG(server, flag) ((server)->flags |= (flag)) +#define NCP_IS_FLAG(server, flag) ((server)->flags & (flag)) + +static inline int ncp_conn_valid(struct ncp_server *server) +{ + return ((server->conn_status & 0x11) == 0); +} + +static inline void ncp_invalidate_conn(struct ncp_server *server) +{ + server->conn_status |= 0x01; +} + +#endif diff --git a/fs/ncpfs/ncplib_kernel.c b/fs/ncpfs/ncplib_kernel.c index a95615a0b6ac..981a95617fc9 100644 --- a/fs/ncpfs/ncplib_kernel.c +++ b/fs/ncpfs/ncplib_kernel.c @@ -11,7 +11,7 @@ -#include "ncplib_kernel.h" +#include "ncp_fs.h" static inline void assert_server_locked(struct ncp_server *server) { diff --git a/fs/ncpfs/ncplib_kernel.h b/fs/ncpfs/ncplib_kernel.h index 1220df75ff22..09881e6aa5ad 100644 --- a/fs/ncpfs/ncplib_kernel.h +++ b/fs/ncpfs/ncplib_kernel.h @@ -32,8 +32,6 @@ #include #endif /* CONFIG_NCPFS_NLS */ -#include - #define NCP_MIN_SYMLINK_SIZE 8 #define NCP_MAX_SYMLINK_SIZE 512 diff --git a/fs/ncpfs/ncpsign_kernel.c b/fs/ncpfs/ncpsign_kernel.c index d8b2d7e6910b..08907599dcd2 100644 --- a/fs/ncpfs/ncpsign_kernel.c +++ b/fs/ncpfs/ncpsign_kernel.c @@ -11,6 +11,7 @@ #include #include #include +#include "ncp_fs.h" #include "ncpsign_kernel.h" /* i386: 32-bit, little endian, handles mis-alignment */ diff --git a/fs/ncpfs/ncpsign_kernel.h b/fs/ncpfs/ncpsign_kernel.h index 6451a68381cc..d9a1438bb1f6 100644 --- a/fs/ncpfs/ncpsign_kernel.h +++ b/fs/ncpfs/ncpsign_kernel.h @@ -8,8 +8,6 @@ #ifndef _NCPSIGN_KERNEL_H #define _NCPSIGN_KERNEL_H -#include - #ifdef CONFIG_NCPFS_PACKET_SIGNING void __sign_packet(struct ncp_server *server, const char *data, size_t size, __u32 totalsize, void *sign_buff); int sign_verify_reply(struct ncp_server *server, const char *data, size_t size, __u32 totalsize, const void *sign_buff); diff --git a/fs/ncpfs/sock.c b/fs/ncpfs/sock.c index 668bd267346e..3a1587222c8a 100644 --- a/fs/ncpfs/sock.c +++ b/fs/ncpfs/sock.c @@ -28,7 +28,7 @@ #include #include -#include +#include "ncp_fs.h" #include "ncpsign_kernel.h" diff --git a/fs/ncpfs/symlink.c b/fs/ncpfs/symlink.c index c634fd17b337..661f861d80c6 100644 --- a/fs/ncpfs/symlink.c +++ b/fs/ncpfs/symlink.c @@ -25,13 +25,11 @@ #include #include -#include #include #include #include #include -#include "ncplib_kernel.h" - +#include "ncp_fs.h" /* these magic numbers must appear in the symlink file -- this makes it a bit more resilient against the magic attributes being set on random files. */ -- cgit v1.2.2 From d61dcce2977d9abe855a5fe3570a81242209c23b Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 12 Jan 2011 20:04:20 -0500 Subject: switch afs Signed-off-by: Al Viro --- fs/afs/dir.c | 4 +--- fs/afs/internal.h | 1 + fs/afs/super.c | 1 + 3 files changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/afs/dir.c b/fs/afs/dir.c index 34a3263d60a4..e6a4ab980e31 100644 --- a/fs/afs/dir.c +++ b/fs/afs/dir.c @@ -62,7 +62,7 @@ const struct inode_operations afs_dir_inode_operations = { .setattr = afs_setattr, }; -static const struct dentry_operations afs_fs_dentry_operations = { +const struct dentry_operations afs_fs_dentry_operations = { .d_revalidate = afs_d_revalidate, .d_delete = afs_d_delete, .d_release = afs_d_release, @@ -582,8 +582,6 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry, } success: - d_set_d_op(dentry, &afs_fs_dentry_operations); - d_add(dentry, inode); _leave(" = 0 { vn=%u u=%u } -> { ino=%lu v=%llu }", fid.vnode, diff --git a/fs/afs/internal.h b/fs/afs/internal.h index 6d4bc1c8ff60..ab6db5abaf53 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -486,6 +486,7 @@ extern bool afs_cm_incoming_call(struct afs_call *); * dir.c */ extern const struct inode_operations afs_dir_inode_operations; +extern const struct dentry_operations afs_fs_dentry_operations; extern const struct file_operations afs_dir_file_operations; /* diff --git a/fs/afs/super.c b/fs/afs/super.c index f901a9d7c111..fb240e8766d6 100644 --- a/fs/afs/super.c +++ b/fs/afs/super.c @@ -336,6 +336,7 @@ static int afs_fill_super(struct super_block *sb, void *data) if (!root) goto error; + sb->s_d_op = &afs_fs_dentry_operations; sb->s_root = root; _leave(" = 0"); -- cgit v1.2.2 From 66cb76666d69502fe982990b2cff5b6d607fd3b1 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 12 Jan 2011 20:04:37 -0500 Subject: sanitize ecryptfs ->mount() kill ecryptfs_read_super(), reorder code allowing to use normal d_alloc_root() instead of opencoding it. Signed-off-by: Al Viro --- fs/ecryptfs/inode.c | 1 - fs/ecryptfs/main.c | 155 +++++++++++++++++++++++----------------------------- 2 files changed, 68 insertions(+), 88 deletions(-) (limited to 'fs') diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index 337352a94751..64ff02330752 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c @@ -441,7 +441,6 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode, struct qstr lower_name; int rc = 0; - d_set_d_op(ecryptfs_dentry, &ecryptfs_dops); if ((ecryptfs_dentry->d_name.len == 1 && !strcmp(ecryptfs_dentry->d_name.name, ".")) || (ecryptfs_dentry->d_name.len == 2 diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c index 351038675376..9ed476906327 100644 --- a/fs/ecryptfs/main.c +++ b/fs/ecryptfs/main.c @@ -141,21 +141,9 @@ int ecryptfs_init_persistent_file(struct dentry *ecryptfs_dentry) return rc; } -/** - * ecryptfs_interpose - * @lower_dentry: Existing dentry in the lower filesystem - * @dentry: ecryptfs' dentry - * @sb: ecryptfs's super_block - * @flags: flags to govern behavior of interpose procedure - * - * Interposes upper and lower dentries. - * - * Returns zero on success; non-zero otherwise - */ -int ecryptfs_interpose(struct dentry *lower_dentry, struct dentry *dentry, - struct super_block *sb, u32 flags) +static inode *ecryptfs_get_inode(struct inode *lower_inode, + struct super_block *sb) { - struct inode *lower_inode; struct inode *inode; int rc = 0; @@ -189,17 +177,38 @@ int ecryptfs_interpose(struct dentry *lower_dentry, struct dentry *dentry, if (special_file(lower_inode->i_mode)) init_special_inode(inode, lower_inode->i_mode, lower_inode->i_rdev); - d_set_d_op(dentry, &ecryptfs_dops); fsstack_copy_attr_all(inode, lower_inode); /* This size will be overwritten for real files w/ headers and * other metadata */ fsstack_copy_inode_size(inode, lower_inode); + return inode; +out: + return ERR_PTR(rc); +} + +/** + * ecryptfs_interpose + * @lower_dentry: Existing dentry in the lower filesystem + * @dentry: ecryptfs' dentry + * @sb: ecryptfs's super_block + * @flags: flags to govern behavior of interpose procedure + * + * Interposes upper and lower dentries. + * + * Returns zero on success; non-zero otherwise + */ +int ecryptfs_interpose(struct dentry *lower_dentry, struct dentry *dentry, + struct super_block *sb, u32 flags) +{ + struct inode *lower_inode = lower_dentry->d_inode; + struct inode *inode = ecryptfs_get_inode(lower_inode, sb); + if (IS_ERR(inode) + return PTR_ERR(inode); if (flags & ECRYPTFS_INTERPOSE_FLAG_D_ADD) d_add(dentry, inode); else d_instantiate(dentry, inode); -out: - return rc; + return 0; } enum { ecryptfs_opt_sig, ecryptfs_opt_ecryptfs_sig, @@ -491,60 +500,12 @@ out: struct kmem_cache *ecryptfs_sb_info_cache; static struct file_system_type ecryptfs_fs_type; -/** - * ecryptfs_read_super - * @sb: The ecryptfs super block - * @dev_name: The path to mount over - * - * Read the super block of the lower filesystem, and use - * ecryptfs_interpose to create our initial inode and super block - * struct. - */ -static int ecryptfs_read_super(struct super_block *sb, const char *dev_name) -{ - struct path path; - int rc; - - rc = kern_path(dev_name, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &path); - if (rc) { - ecryptfs_printk(KERN_WARNING, "path_lookup() failed\n"); - goto out; - } - if (path.dentry->d_sb->s_type == &ecryptfs_fs_type) { - rc = -EINVAL; - printk(KERN_ERR "Mount on filesystem of type " - "eCryptfs explicitly disallowed due to " - "known incompatibilities\n"); - goto out_free; - } - ecryptfs_set_superblock_lower(sb, path.dentry->d_sb); - sb->s_maxbytes = path.dentry->d_sb->s_maxbytes; - sb->s_blocksize = path.dentry->d_sb->s_blocksize; - ecryptfs_set_dentry_lower(sb->s_root, path.dentry); - ecryptfs_set_dentry_lower_mnt(sb->s_root, path.mnt); - rc = ecryptfs_interpose(path.dentry, sb->s_root, sb, 0); - if (rc) - goto out_free; - rc = 0; - goto out; -out_free: - path_put(&path); -out: - return rc; -} - /** * ecryptfs_get_sb * @fs_type * @flags * @dev_name: The path to mount over * @raw_data: The options passed into the kernel - * - * The whole ecryptfs_get_sb process is broken into 3 functions: - * ecryptfs_parse_options(): handle options passed to ecryptfs, if any - * ecryptfs_read_super(): this accesses the lower filesystem and uses - * ecryptfs_interpose to perform most of the linking - * ecryptfs_interpose(): links the lower filesystem into ecryptfs (inode.c) */ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *raw_data) @@ -553,6 +514,8 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags struct ecryptfs_sb_info *sbi; struct ecryptfs_dentry_info *root_info; const char *err = "Getting sb failed"; + struct inode *inode; + struct path path; int rc; sbi = kmem_cache_zalloc(ecryptfs_sb_info_cache, GFP_KERNEL); @@ -575,10 +538,8 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags s->s_flags = flags; rc = bdi_setup_and_register(&sbi->bdi, "ecryptfs", BDI_CAP_MAP_COPY); - if (rc) { - deactivate_locked_super(s); - goto out; - } + if (rc) + goto out1; ecryptfs_set_superblock_private(s, sbi); s->s_bdi = &sbi->bdi; @@ -586,34 +547,54 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags /* ->kill_sb() will take care of sbi after that point */ sbi = NULL; s->s_op = &ecryptfs_sops; + s->s_d_op = &ecryptfs_dops; - rc = -ENOMEM; - s->s_root = d_alloc(NULL, &(const struct qstr) { - .hash = 0,.name = "/",.len = 1}); + err = "Reading sb failed"; + rc = kern_path(dev_name, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &path); + if (rc) { + ecryptfs_printk(KERN_WARNING, "kern_path() failed\n"); + goto out1; + } + if (path.dentry->d_sb->s_type == &ecryptfs_fs_type) { + rc = -EINVAL; + printk(KERN_ERR "Mount on filesystem of type " + "eCryptfs explicitly disallowed due to " + "known incompatibilities\n"); + goto out_free; + } + ecryptfs_set_superblock_lower(s, path.dentry->d_sb); + s->s_maxbytes = path.dentry->d_sb->s_maxbytes; + s->s_blocksize = path.dentry->d_sb->s_blocksize; + + inode = ecryptfs_get_inode(path.dentry->d_inode, s); + rc = PTR_ERR(inode); + if (IS_ERR(inode)) + goto out_free; + + s->s_root = d_alloc_root(inode); if (!s->s_root) { - deactivate_locked_super(s); - goto out; + iput(inode); + rc = -ENOMEM; + goto out_free; } - d_set_d_op(s->s_root, &ecryptfs_dops); - s->s_root->d_sb = s; - s->s_root->d_parent = s->s_root; + rc = -ENOMEM; root_info = kmem_cache_zalloc(ecryptfs_dentry_info_cache, GFP_KERNEL); - if (!root_info) { - deactivate_locked_super(s); - goto out; - } + if (!root_info) + goto out_free; + /* ->kill_sb() will take care of root_info */ ecryptfs_set_dentry_private(s->s_root, root_info); + ecryptfs_set_dentry_lower(s->s_root, path.dentry); + ecryptfs_set_dentry_lower_mnt(s->s_root, path.mnt); + s->s_flags |= MS_ACTIVE; - rc = ecryptfs_read_super(s, dev_name); - if (rc) { - deactivate_locked_super(s); - err = "Reading sb failed"; - goto out; - } return dget(s->s_root); +out_free: + path_put(&path); +out1: + deactivate_locked_super(s); out: if (sbi) { ecryptfs_destroy_mount_crypt_stat(&sbi->mount_crypt_stat); -- cgit v1.2.2 From 1c977540fda4bf65ab467d110f5d840fc27e7608 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Thu, 18 Nov 2010 15:02:45 -0800 Subject: fs: fix kernel-doc for dcache::d_validate Fix function parameter kernel-doc for d_validate(): Warning(fs/dcache.c:1495): No description found for parameter 'parent' Warning(fs/dcache.c:1495): Excess function parameter 'dparent' description in 'd_validate' Signed-off-by: Randy Dunlap Cc: Alexander Viro Signed-off-by: Al Viro --- fs/dcache.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/dcache.c b/fs/dcache.c index 5ec58267b5bb..b2e90998ad36 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -1970,7 +1970,7 @@ out: /** * d_validate - verify dentry provided from insecure source (deprecated) * @dentry: The dentry alleged to be valid child of @dparent - * @dparent: The parent dentry (known to be valid) + * @parent: The parent dentry (known to be valid) * * An insecure source has sent us a dentry, here we verify it and dget() it. * This is used by ncpfs in its readdir implementation. -- cgit v1.2.2 From 208898c17a97610ce1c01b1cc58e51802a1d52c3 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Thu, 18 Nov 2010 15:02:49 -0800 Subject: fs: fix kernel-doc for dcache::prepend_path Fix function kernel-doc warning for prepend_path(): Warning(fs/dcache.c:1924): missing initial short description on line: Signed-off-by: Randy Dunlap Cc: Alexander Viro Signed-off-by: Al Viro --- fs/dcache.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/dcache.c b/fs/dcache.c index b2e90998ad36..0c6d5c549d84 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -2453,8 +2453,7 @@ static int prepend_name(char **buffer, int *buflen, struct qstr *name) } /** - * Prepend path string to a buffer - * + * prepend_path - Prepend path string to a buffer * @path: the dentry/vfsmount to report * @root: root vfsmnt/dentry (may be modified by this function) * @buffer: pointer to the end of the buffer -- cgit v1.2.2 From cccb5a1e698535fa5a734ffe21c7061c97f8d8c5 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 17 Dec 2010 07:44:05 -0500 Subject: fix signedness mess in rw_verify_area() on 64bit architectures ... and clean the unsigned-f_pos code, while we are at it. Signed-off-by: Al Viro --- fs/read_write.c | 27 +++++++++++---------------- 1 file changed, 11 insertions(+), 16 deletions(-) (limited to 'fs') diff --git a/fs/read_write.c b/fs/read_write.c index 5d431bacbea9..5520f8ad5504 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -30,18 +30,9 @@ const struct file_operations generic_ro_fops = { EXPORT_SYMBOL(generic_ro_fops); -static int -__negative_fpos_check(struct file *file, loff_t pos, size_t count) +static inline int unsigned_offsets(struct file *file) { - /* - * pos or pos+count is negative here, check overflow. - * too big "count" will be caught in rw_verify_area(). - */ - if ((pos < 0) && (pos + count < pos)) - return -EOVERFLOW; - if (file->f_mode & FMODE_UNSIGNED_OFFSET) - return 0; - return -EINVAL; + return file->f_mode & FMODE_UNSIGNED_OFFSET; } /** @@ -75,7 +66,7 @@ generic_file_llseek_unlocked(struct file *file, loff_t offset, int origin) break; } - if (offset < 0 && __negative_fpos_check(file, offset, 0)) + if (offset < 0 && !unsigned_offsets(file)) return -EINVAL; if (offset > inode->i_sb->s_maxbytes) return -EINVAL; @@ -152,7 +143,7 @@ loff_t default_llseek(struct file *file, loff_t offset, int origin) offset += file->f_pos; } retval = -EINVAL; - if (offset >= 0 || !__negative_fpos_check(file, offset, 0)) { + if (offset >= 0 || unsigned_offsets(file)) { if (offset != file->f_pos) { file->f_pos = offset; file->f_version = 0; @@ -252,9 +243,13 @@ int rw_verify_area(int read_write, struct file *file, loff_t *ppos, size_t count if (unlikely((ssize_t) count < 0)) return retval; pos = *ppos; - if (unlikely((pos < 0) || (loff_t) (pos + count) < 0)) { - retval = __negative_fpos_check(file, pos, count); - if (retval) + if (unlikely(pos < 0)) { + if (!unsigned_offsets(file)) + return retval; + if (count >= -pos) /* both values are in 0..LLONG_MAX */ + return -EOVERFLOW; + } else if (unlikely((loff_t) (pos + count) < 0)) { + if (!unsigned_offsets(file)) return retval; } -- cgit v1.2.2 From e1181ee6575d7970bad15aaa852784b4972d2af8 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Tue, 7 Dec 2010 16:19:50 -0500 Subject: vfs: pass struct file to do_truncate on O_TRUNC opens (try #2) When a file is opened with O_TRUNC, the truncate processing is handled by handle_truncate(). This function however doesn't receive any info about the newly instantiated filp, and therefore can't pass that info along so that the setattr can use it. This makes NFSv4 misbehave. The client does an open and gets a valid stateid, and then doesn't use that stateid on the subsequent truncate. It uses the zero-stateid instead. Most servers ignore this fact and just do the truncate anyway, but some don't like it (notably, RHEL4). It seems more correct that since we have a fully instantiated file at the time that handle_truncate is called, that we pass that along so that the truncate operation can properly use it. Signed-off-by: Jeff Layton Signed-off-by: Al Viro --- fs/namei.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index 24ece10470b6..0b14f6910fc6 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1950,8 +1950,9 @@ int may_open(struct path *path, int acc_mode, int flag) return break_lease(inode, flag); } -static int handle_truncate(struct path *path) +static int handle_truncate(struct file *filp) { + struct path *path = &filp->f_path; struct inode *inode = path->dentry->d_inode; int error = get_write_access(inode); if (error) @@ -1965,7 +1966,7 @@ static int handle_truncate(struct path *path) if (!error) { error = do_truncate(path->dentry, 0, ATTR_MTIME|ATTR_CTIME|ATTR_OPEN, - NULL); + filp); } put_write_access(inode); return error; @@ -2063,7 +2064,7 @@ static struct file *finish_open(struct nameidata *nd, } if (!IS_ERR(filp)) { if (will_truncate) { - error = handle_truncate(&nd->path); + error = handle_truncate(filp); if (error) { fput(filp); filp = ERR_PTR(error); -- cgit v1.2.2 From 79124f18b335172e1916075c633745e12dae1dac Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 17 Nov 2010 20:46:15 -0500 Subject: fs: add hole punching to fallocate Hole punching has already been implemented by XFS and OCFS2, and has the potential to be implemented on both BTRFS and EXT4 so we need a generic way to get to this feature. The simplest way in my mind is to add FALLOC_FL_PUNCH_HOLE to fallocate() since it already looks like the normal fallocate() operation. I've tested this patch with XFS and BTRFS to make sure XFS did what it's supposed to do and that BTRFS failed like it was supposed to. Thank you, Signed-off-by: Josef Bacik Signed-off-by: Al Viro --- fs/open.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/open.c b/fs/open.c index 4197b9ed023d..5b6ef7e2859e 100644 --- a/fs/open.c +++ b/fs/open.c @@ -223,7 +223,12 @@ int do_fallocate(struct file *file, int mode, loff_t offset, loff_t len) return -EINVAL; /* Return error if mode is not supported */ - if (mode && !(mode & FALLOC_FL_KEEP_SIZE)) + if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) + return -EOPNOTSUPP; + + /* Punch hole must have keep size set */ + if ((mode & FALLOC_FL_PUNCH_HOLE) && + !(mode & FALLOC_FL_KEEP_SIZE)) return -EOPNOTSUPP; if (!(file->f_mode & FMODE_WRITE)) -- cgit v1.2.2 From c25d246715b87ad37e69e7abd1a0fed781423fa2 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 17 Nov 2010 20:46:16 -0500 Subject: XFS: handle hole punching via fallocate properly This patch simply allows XFS to handle the hole punching flag in fallocate properly. I've tested this with a little program that does a bunch of random hole punching with FL_KEEP_SIZE and without it to make sure it does the right thing. Thanks, Signed-off-by: Josef Bacik Signed-off-by: Al Viro --- fs/xfs/linux-2.6/xfs_iops.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c index 94d5fd6a2973..da54403633b6 100644 --- a/fs/xfs/linux-2.6/xfs_iops.c +++ b/fs/xfs/linux-2.6/xfs_iops.c @@ -516,6 +516,7 @@ xfs_vn_fallocate( loff_t new_size = 0; xfs_flock64_t bf; xfs_inode_t *ip = XFS_I(inode); + int cmd = XFS_IOC_RESVSP; /* preallocation on directories not yet supported */ error = -ENODEV; @@ -528,6 +529,9 @@ xfs_vn_fallocate( xfs_ilock(ip, XFS_IOLOCK_EXCL); + if (mode & FALLOC_FL_PUNCH_HOLE) + cmd = XFS_IOC_UNRESVSP; + /* check the new inode size is valid before allocating */ if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > i_size_read(inode)) { @@ -537,8 +541,7 @@ xfs_vn_fallocate( goto out_unlock; } - error = -xfs_change_file_space(ip, XFS_IOC_RESVSP, &bf, - 0, XFS_ATTR_NOLOCK); + error = -xfs_change_file_space(ip, cmd, &bf, 0, XFS_ATTR_NOLOCK); if (error) goto out_unlock; -- cgit v1.2.2 From db47fef2cd9aab76ab976e8b45a06a1b3ad0e3e4 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 17 Nov 2010 20:46:17 -0500 Subject: Ocfs2: handle hole punching via fallocate properly This patch just makes ocfs2 use its UNRESERVP ioctl when we get the hole punch flag in fallocate. I didn't test it, but it seems simple enough. Thanks, Acked-by: Jan Kara Acked-by: Joel Becker Signed-off-by: Josef Bacik Signed-off-by: Al Viro --- fs/ocfs2/file.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index bdadbae09094..63e3fca266e0 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -1995,6 +1995,7 @@ static long ocfs2_fallocate(struct inode *inode, int mode, loff_t offset, struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_space_resv sr; int change_size = 1; + int cmd = OCFS2_IOC_RESVSP64; if (!ocfs2_writes_unwritten_extents(osb)) return -EOPNOTSUPP; @@ -2005,12 +2006,15 @@ static long ocfs2_fallocate(struct inode *inode, int mode, loff_t offset, if (mode & FALLOC_FL_KEEP_SIZE) change_size = 0; + if (mode & FALLOC_FL_PUNCH_HOLE) + cmd = OCFS2_IOC_UNRESVSP64; + sr.l_whence = 0; sr.l_start = (s64)offset; sr.l_len = (s64)len; - return __ocfs2_change_file_space(NULL, inode, offset, - OCFS2_IOC_RESVSP64, &sr, change_size); + return __ocfs2_change_file_space(NULL, inode, offset, cmd, &sr, + change_size); } int ocfs2_check_range_for_refcount(struct inode *inode, loff_t pos, -- cgit v1.2.2 From d6dc8462f471f7bbb49c42c147bf84de0c977099 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 17 Nov 2010 20:46:18 -0500 Subject: Ext4: fail if we try to use hole punch Ext4 doesn't have the ability to punch holes yet, so make sure we return EOPNOTSUPP if we try to use hole punching through fallocate. This support can be added later. Thanks, Acked-by: Jan Kara Signed-off-by: Josef Bacik Signed-off-by: Al Viro --- fs/ext4/extents.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'fs') diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index e910720e8bb8..2e061dffb722 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -3644,6 +3644,10 @@ long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len) struct ext4_map_blocks map; unsigned int credits, blkbits = inode->i_blkbits; + /* We only support the FALLOC_FL_KEEP_SIZE mode */ + if (mode && (mode != FALLOC_FL_KEEP_SIZE)) + return -EOPNOTSUPP; + /* * currently supporting (pre)allocate mode for extent-based * files _only_ -- cgit v1.2.2 From 23a8519b55235660f6fb7d6f394a912de9d23208 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 17 Nov 2010 20:46:19 -0500 Subject: Btrfs: fail if we try to use hole punch Btrfs doesn't have the ability to punch holes yet, so make sure we return EOPNOTSUPP if we try to use hole punching through fallocate. This support can be added later. Thanks, Signed-off-by: Josef Bacik Signed-off-by: Al Viro --- fs/btrfs/inode.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'fs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index f870aefc59dd..a3798a3aa0d2 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -7115,6 +7115,10 @@ static long btrfs_fallocate(struct inode *inode, int mode, alloc_start = offset & ~mask; alloc_end = (offset + len + mask) & ~mask; + /* We only support the FALLOC_FL_KEEP_SIZE mode */ + if (mode && (mode != FALLOC_FL_KEEP_SIZE)) + return -EOPNOTSUPP; + /* * wait for ordered IO before we have any locks. We'll loop again * below with the locks held. -- cgit v1.2.2 From 9ecf639a9686c9c7e3fd2cd72817ca490c658e6f Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 17 Nov 2010 20:46:20 -0500 Subject: Gfs2: fail if we try to use hole punch Gfs2 doesn't have the ability to punch holes yet, so make sure we return EOPNOTSUPP if we try to use hole punching through fallocate. This support can be added later. Thanks, Signed-off-by: Josef Bacik Signed-off-by: Al Viro --- fs/gfs2/ops_inode.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'fs') diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c index ae140c8abb5c..040b5a2e6556 100644 --- a/fs/gfs2/ops_inode.c +++ b/fs/gfs2/ops_inode.c @@ -1425,6 +1425,10 @@ static long gfs2_fallocate(struct inode *inode, int mode, loff_t offset, loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift; next = (next + 1) << sdp->sd_sb.sb_bsize_shift; + /* We only support the FALLOC_FL_KEEP_SIZE mode */ + if (mode && (mode != FALLOC_FL_KEEP_SIZE)) + return -EOPNOTSUPP; + offset = (offset >> sdp->sd_sb.sb_bsize_shift) << sdp->sd_sb.sb_bsize_shift; -- cgit v1.2.2 From 6db26ffc917b609402619e03df5af8d1cd371ce7 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Wed, 12 Jan 2011 16:59:13 -0800 Subject: fs/ext4/inode.c: use pr_warn_ratelimited() pr_warning_ratelimited() doesn't exist. Also include printk.h, which defines these things. Cc: Theodore Ts'o Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ext4/inode.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index e80fc513eacc..549465fef7e9 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -39,6 +39,7 @@ #include #include #include +#include #include #include @@ -3737,7 +3738,7 @@ static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode) retry: io_end = ext4_init_io_end(inode, GFP_ATOMIC); if (!io_end) { - pr_warning_ratelimited("%s: allocation fail\n", __func__); + pr_warn_ratelimited("%s: allocation fail\n", __func__); schedule(); goto retry; } -- cgit v1.2.2 From 65329bf46bf9ddc37845c9a6823a8e8022d305b9 Mon Sep 17 00:00:00 2001 From: Vasiliy Kulikov Date: Wed, 12 Jan 2011 17:00:00 -0800 Subject: fs/select.c: fix information leak to userspace On some architectures __kernel_suseconds_t is int. On these archs struct timeval has padding bytes at the end. This struct is copied to userspace with these padding bytes uninitialized. This leads to leaking of contents of kernel stack memory. This bug was added with v2.6.27-rc5-286-gb773ad4. [akpm@linux-foundation.org: avoid the memset on architectures which don't need it] Signed-off-by: Vasiliy Kulikov Cc: Al Viro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/select.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs') diff --git a/fs/select.c b/fs/select.c index b7b10aa30861..e56560d2b08a 100644 --- a/fs/select.c +++ b/fs/select.c @@ -306,6 +306,8 @@ static int poll_select_copy_remaining(struct timespec *end_time, void __user *p, rts.tv_sec = rts.tv_nsec = 0; if (timeval) { + if (sizeof(rtv) > sizeof(rtv.tv_sec) + sizeof(rtv.tv_usec)) + memset(&rtv, 0, sizeof(rtv)); rtv.tv_sec = rts.tv_sec; rtv.tv_usec = rts.tv_nsec / NSEC_PER_USEC; -- cgit v1.2.2 From 52bd19f7691b2ea6433aef0ef94c08c57efd7e79 Mon Sep 17 00:00:00 2001 From: Robin Holt Date: Wed, 12 Jan 2011 17:00:01 -0800 Subject: epoll: convert max_user_watches to long On a 16TB machine, max_user_watches has an integer overflow. Convert it to use a long and handle the associated fallout. Signed-off-by: Robin Holt Cc: "Eric W. Biederman" Acked-by: Davide Libenzi Cc: Pekka Enberg Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/eventpoll.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 8cf07242067d..cc8a9b7d6064 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -217,7 +217,7 @@ struct ep_send_events_data { * Configuration options available inside /proc/sys/fs/epoll/ */ /* Maximum number of epoll watched descriptors, per user */ -static int max_user_watches __read_mostly; +static long max_user_watches __read_mostly; /* * This mutex is used to serialize ep_free() and eventpoll_release_file(). @@ -240,16 +240,18 @@ static struct kmem_cache *pwq_cache __read_mostly; #include -static int zero; +static long zero; +static long long_max = LONG_MAX; ctl_table epoll_table[] = { { .procname = "max_user_watches", .data = &max_user_watches, - .maxlen = sizeof(int), + .maxlen = sizeof(max_user_watches), .mode = 0644, - .proc_handler = proc_dointvec_minmax, + .proc_handler = proc_doulongvec_minmax, .extra1 = &zero, + .extra2 = &long_max, }, { } }; @@ -561,7 +563,7 @@ static int ep_remove(struct eventpoll *ep, struct epitem *epi) /* At this point it is safe to free the eventpoll item */ kmem_cache_free(epi_cache, epi); - atomic_dec(&ep->user->epoll_watches); + atomic_long_dec(&ep->user->epoll_watches); return 0; } @@ -898,11 +900,12 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, { int error, revents, pwake = 0; unsigned long flags; + long user_watches; struct epitem *epi; struct ep_pqueue epq; - if (unlikely(atomic_read(&ep->user->epoll_watches) >= - max_user_watches)) + user_watches = atomic_long_read(&ep->user->epoll_watches); + if (unlikely(user_watches >= max_user_watches)) return -ENOSPC; if (!(epi = kmem_cache_alloc(epi_cache, GFP_KERNEL))) return -ENOMEM; @@ -966,7 +969,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, spin_unlock_irqrestore(&ep->lock, flags); - atomic_inc(&ep->user->epoll_watches); + atomic_long_inc(&ep->user->epoll_watches); /* We have to call this outside the lock */ if (pwake) @@ -1426,6 +1429,7 @@ static int __init eventpoll_init(void) */ max_user_watches = (((si.totalram - si.totalhigh) / 25) << PAGE_SHIFT) / EP_ITEM_COST; + BUG_ON(max_user_watches < 0); /* Initialize the structure used to perform safe poll wait head wake ups */ ep_nested_calls_init(&poll_safewake_ncalls); -- cgit v1.2.2 From f670d0ecda73b7438eec9ed108680bc5f5362ad8 Mon Sep 17 00:00:00 2001 From: Mikael Pettersson Date: Wed, 12 Jan 2011 17:00:02 -0800 Subject: binfmt_elf: cleanups This cleans up a few bits in binfmt_elf.c and binfmts.h: - the hasvdso field in struct linux_binfmt is unused, so remove it and the only initialization of it - the elf_map CPP symbol is not defined anywhere in the kernel, so remove an unnecessary #ifndef elf_map - reduce excessive indentation in elf_format's initializer - add missing spaces, remove extraneous spaces No functional changes, but tested on x86 (32 and 64 bit), powerpc (32 and 64 bit), sparc64, arm, and alpha. Signed-off-by: Mikael Pettersson Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/binfmt_elf.c | 23 +++++++++-------------- 1 file changed, 9 insertions(+), 14 deletions(-) (limited to 'fs') diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 6884e198e0c7..d5b640ba6cb1 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -66,12 +66,11 @@ static int elf_core_dump(struct coredump_params *cprm); #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1)) static struct linux_binfmt elf_format = { - .module = THIS_MODULE, - .load_binary = load_elf_binary, - .load_shlib = load_elf_library, - .core_dump = elf_core_dump, - .min_coredump = ELF_EXEC_PAGESIZE, - .hasvdso = 1 + .module = THIS_MODULE, + .load_binary = load_elf_binary, + .load_shlib = load_elf_library, + .core_dump = elf_core_dump, + .min_coredump = ELF_EXEC_PAGESIZE, }; #define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE) @@ -316,8 +315,6 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec, return 0; } -#ifndef elf_map - static unsigned long elf_map(struct file *filep, unsigned long addr, struct elf_phdr *eppnt, int prot, int type, unsigned long total_size) @@ -354,8 +351,6 @@ static unsigned long elf_map(struct file *filep, unsigned long addr, return(map_addr); } -#endif /* !elf_map */ - static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr) { int i, first_idx = -1, last_idx = -1; @@ -421,7 +416,7 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex, goto out; retval = kernel_read(interpreter, interp_elf_ex->e_phoff, - (char *)elf_phdata,size); + (char *)elf_phdata, size); error = -EIO; if (retval != size) { if (retval < 0) @@ -601,7 +596,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) goto out; if (!elf_check_arch(&loc->elf_ex)) goto out; - if (!bprm->file->f_op||!bprm->file->f_op->mmap) + if (!bprm->file->f_op || !bprm->file->f_op->mmap) goto out; /* Now read in all of the header information */ @@ -761,8 +756,8 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) /* There was a PT_LOAD segment with p_memsz > p_filesz before this one. Map anonymous pages, if needed, and clear the area. */ - retval = set_brk (elf_bss + load_bias, - elf_brk + load_bias); + retval = set_brk(elf_bss + load_bias, + elf_brk + load_bias); if (retval) { send_sig(SIGKILL, current, 0); goto out_free_dentry; -- cgit v1.2.2 From e462c448fdc89252d631b26ff0ed4f7ad6fe8ed2 Mon Sep 17 00:00:00 2001 From: Davide Libenzi Date: Wed, 12 Jan 2011 17:00:25 -0800 Subject: pipe: use event aware wakeups Send the events the wakeup refers to, so that epoll, and even the new poll code in fs/select.c can avoid wakeups if the events do not match the requested set. Signed-off-by: Davide Libenzi Acked-by: David S. Miller Acked-by: Eric Dumazet Cc: Jens Axboe Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/pipe.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/pipe.c b/fs/pipe.c index 68f1f8e4e23b..04151e2aee96 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -441,7 +441,7 @@ redo: break; } if (do_wakeup) { - wake_up_interruptible_sync(&pipe->wait); + wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT); kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); } pipe_wait(pipe); @@ -450,7 +450,7 @@ redo: /* Signal writers asynchronously that there is more room. */ if (do_wakeup) { - wake_up_interruptible_sync(&pipe->wait); + wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT); kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); } if (ret > 0) @@ -612,7 +612,7 @@ redo2: break; } if (do_wakeup) { - wake_up_interruptible_sync(&pipe->wait); + wake_up_interruptible_sync_poll(&pipe->wait, POLLIN); kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); do_wakeup = 0; } @@ -623,7 +623,7 @@ redo2: out: mutex_unlock(&inode->i_mutex); if (do_wakeup) { - wake_up_interruptible_sync(&pipe->wait); + wake_up_interruptible_sync_poll(&pipe->wait, POLLIN); kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); } if (ret > 0) @@ -715,7 +715,7 @@ pipe_release(struct inode *inode, int decr, int decw) if (!pipe->readers && !pipe->writers) { free_pipe_info(inode); } else { - wake_up_interruptible_sync(&pipe->wait); + wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT); kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); } -- cgit v1.2.2 From e0e3d32bb40d28cf57a6a24e1e1d87ef03b913bd Mon Sep 17 00:00:00 2001 From: Jesper Juhl Date: Wed, 12 Jan 2011 17:00:26 -0800 Subject: befs: don't pass huge structs by value 'struct befs_disk_data_stream' is huge (~144 bytes) and it's being passed by value in fs/befs/endian.h::cpu_to_fsrun(). It would be better to pass a pointer. Signed-off-by: Jesper Juhl Cc: Will Dyson Cc: Geert Uytterhoeven Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/befs/endian.h | 16 ++++++++-------- fs/befs/linuxvfs.c | 2 +- 2 files changed, 9 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/befs/endian.h b/fs/befs/endian.h index 6cb84d896d05..27223878ba9f 100644 --- a/fs/befs/endian.h +++ b/fs/befs/endian.h @@ -102,22 +102,22 @@ cpu_to_fsrun(const struct super_block *sb, befs_block_run n) } static inline befs_data_stream -fsds_to_cpu(const struct super_block *sb, befs_disk_data_stream n) +fsds_to_cpu(const struct super_block *sb, const befs_disk_data_stream *n) { befs_data_stream data; int i; for (i = 0; i < BEFS_NUM_DIRECT_BLOCKS; ++i) - data.direct[i] = fsrun_to_cpu(sb, n.direct[i]); + data.direct[i] = fsrun_to_cpu(sb, n->direct[i]); - data.max_direct_range = fs64_to_cpu(sb, n.max_direct_range); - data.indirect = fsrun_to_cpu(sb, n.indirect); - data.max_indirect_range = fs64_to_cpu(sb, n.max_indirect_range); - data.double_indirect = fsrun_to_cpu(sb, n.double_indirect); + data.max_direct_range = fs64_to_cpu(sb, n->max_direct_range); + data.indirect = fsrun_to_cpu(sb, n->indirect); + data.max_indirect_range = fs64_to_cpu(sb, n->max_indirect_range); + data.double_indirect = fsrun_to_cpu(sb, n->double_indirect); data.max_double_indirect_range = fs64_to_cpu(sb, - n. + n-> max_double_indirect_range); - data.size = fs64_to_cpu(sb, n.size); + data.size = fs64_to_cpu(sb, n->size); return data; } diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c index de93581b79a2..b1d0c794747b 100644 --- a/fs/befs/linuxvfs.c +++ b/fs/befs/linuxvfs.c @@ -390,7 +390,7 @@ static struct inode *befs_iget(struct super_block *sb, unsigned long ino) int num_blks; befs_ino->i_data.ds = - fsds_to_cpu(sb, raw_inode->data.datastream); + fsds_to_cpu(sb, &raw_inode->data.datastream); num_blks = befs_count_blocks(sb, &befs_ino->i_data.ds); inode->i_blocks = -- cgit v1.2.2 From 566538a6cf5bec260324dc37b6820dacd8631452 Mon Sep 17 00:00:00 2001 From: Jesper Juhl Date: Wed, 12 Jan 2011 17:00:27 -0800 Subject: reiserfs: make sure va_end() is always called after va_start(). A call to va_start() must always be followed by a call to va_end() in the same function. In fs/reiserfs/prints.c::print_block() this is not always the case. If 'bh' is NULL we'll return without calling va_end(). One could add a call to va_end() before the 'return' statement, but it's nicer to just move the call to va_start() after the test for 'bh' being NULL. Signed-off-by: Jesper Juhl Acked-by: Edward Shishkin Cc: Jeff Mahoney Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/reiserfs/prints.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/reiserfs/prints.c b/fs/reiserfs/prints.c index adbc6f538515..45de98b59466 100644 --- a/fs/reiserfs/prints.c +++ b/fs/reiserfs/prints.c @@ -586,13 +586,13 @@ void print_block(struct buffer_head *bh, ...) //int print_mode, int first, int l va_list args; int mode, first, last; - va_start(args, bh); - if (!bh) { printk("print_block: buffer is NULL\n"); return; } + va_start(args, bh); + mode = va_arg(args, int); first = va_arg(args, int); last = va_arg(args, int); -- cgit v1.2.2 From 34e49d4f635d6a800c4089c40fd254e12e451449 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Wed, 12 Jan 2011 17:00:30 -0800 Subject: fs/proc/base.c, kernel/latencytop.c: convert sprintf_symbol() to %ps Use temporary lr for struct latency_record for improved readability and fewer columns used. Removed trailing space from output. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Joe Perches Cc: Jiri Kosina Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/base.c | 22 ++++++++-------------- 1 file changed, 8 insertions(+), 14 deletions(-) (limited to 'fs') diff --git a/fs/proc/base.c b/fs/proc/base.c index b20962c71a52..336b79803e82 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -373,24 +373,18 @@ static int lstats_show_proc(struct seq_file *m, void *v) return -ESRCH; seq_puts(m, "Latency Top version : v0.1\n"); for (i = 0; i < 32; i++) { - if (task->latency_record[i].backtrace[0]) { + struct latency_record *lr = &task->latency_record[i]; + if (lr->backtrace[0]) { int q; - seq_printf(m, "%i %li %li ", - task->latency_record[i].count, - task->latency_record[i].time, - task->latency_record[i].max); + seq_printf(m, "%i %li %li", + lr->count, lr->time, lr->max); for (q = 0; q < LT_BACKTRACEDEPTH; q++) { - char sym[KSYM_SYMBOL_LEN]; - char *c; - if (!task->latency_record[i].backtrace[q]) + unsigned long bt = lr->backtrace[q]; + if (!bt) break; - if (task->latency_record[i].backtrace[q] == ULONG_MAX) + if (bt == ULONG_MAX) break; - sprint_symbol(sym, task->latency_record[i].backtrace[q]); - c = strchr(sym, '+'); - if (c) - *c = 0; - seq_printf(m, "%s ", sym); + seq_printf(m, " %ps", (void *)bt); } seq_printf(m, "\n"); } -- cgit v1.2.2 From a2ade7b6ca37c808128810687cd56e8a44443e65 Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Wed, 12 Jan 2011 17:00:32 -0800 Subject: proc: use unsigned long inside /proc/*/statm /proc/*/statm code needlessly truncates data from unsigned long to int. One needs only 8+ TB of RAM to make truncation visible. Signed-off-by: Alexey Dobriyan Reviewed-by: WANG Cong Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/array.c | 6 +++--- fs/proc/internal.h | 3 ++- fs/proc/task_mmu.c | 5 +++-- fs/proc/task_nommu.c | 7 ++++--- 4 files changed, 12 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/proc/array.c b/fs/proc/array.c index fff6572676ae..842a6564f2ce 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -535,15 +535,15 @@ int proc_tgid_stat(struct seq_file *m, struct pid_namespace *ns, int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) { - int size = 0, resident = 0, shared = 0, text = 0, lib = 0, data = 0; + unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0; struct mm_struct *mm = get_task_mm(task); if (mm) { size = task_statm(mm, &shared, &text, &data, &resident); mmput(mm); } - seq_printf(m, "%d %d %d %d %d %d %d\n", - size, resident, shared, text, lib, data, 0); + seq_printf(m, "%lu %lu %lu %lu 0 %lu 0\n", + size, resident, shared, text, data); return 0; } diff --git a/fs/proc/internal.h b/fs/proc/internal.h index 1f24a3eddd12..659ea6af379a 100644 --- a/fs/proc/internal.h +++ b/fs/proc/internal.h @@ -96,7 +96,8 @@ extern spinlock_t proc_subdir_lock; struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *); int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir); unsigned long task_vsize(struct mm_struct *); -int task_statm(struct mm_struct *, int *, int *, int *, int *); +unsigned long task_statm(struct mm_struct *, + unsigned long *, unsigned long *, unsigned long *, unsigned long *); void task_mem(struct seq_file *, struct mm_struct *); static inline struct proc_dir_entry *pde_get(struct proc_dir_entry *pde) diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index c126c83b9a45..c3755bd8dd3e 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -66,8 +66,9 @@ unsigned long task_vsize(struct mm_struct *mm) return PAGE_SIZE * mm->total_vm; } -int task_statm(struct mm_struct *mm, int *shared, int *text, - int *data, int *resident) +unsigned long task_statm(struct mm_struct *mm, + unsigned long *shared, unsigned long *text, + unsigned long *data, unsigned long *resident) { *shared = get_mm_counter(mm, MM_FILEPAGES); *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c index cb6306e63843..b535d3e5d5f1 100644 --- a/fs/proc/task_nommu.c +++ b/fs/proc/task_nommu.c @@ -92,13 +92,14 @@ unsigned long task_vsize(struct mm_struct *mm) return vsize; } -int task_statm(struct mm_struct *mm, int *shared, int *text, - int *data, int *resident) +unsigned long task_statm(struct mm_struct *mm, + unsigned long *shared, unsigned long *text, + unsigned long *data, unsigned long *resident) { struct vm_area_struct *vma; struct vm_region *region; struct rb_node *p; - int size = kobjsize(mm); + unsigned long size = kobjsize(mm); down_read(&mm->mmap_sem); for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) { -- cgit v1.2.2 From 9d6de12f70d2fb1487c4f482a21fed25fe74e0fd Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Wed, 12 Jan 2011 17:00:32 -0800 Subject: proc: use seq_puts()/seq_putc() where possible For string without format specifiers, use seq_puts(). For seq_printf("\n"), use seq_putc('\n'). text data bss dec hex filename 61866 488 112 62466 f402 fs/proc/proc.o 61729 488 112 62329 f379 fs/proc/proc.o ---------------------------------------------------- -139 Signed-off-by: Alexey Dobriyan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/array.c | 22 +++++++++++----------- fs/proc/base.c | 2 +- fs/proc/devices.c | 4 ++-- fs/proc/proc_tty.c | 26 +++++++++++++------------- fs/proc/softirqs.c | 6 +++--- fs/proc/stat.c | 2 +- 6 files changed, 31 insertions(+), 31 deletions(-) (limited to 'fs') diff --git a/fs/proc/array.c b/fs/proc/array.c index 842a6564f2ce..df2b703b9d0f 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -95,7 +95,7 @@ static inline void task_name(struct seq_file *m, struct task_struct *p) get_task_comm(tcomm, p); - seq_printf(m, "Name:\t"); + seq_puts(m, "Name:\t"); end = m->buf + m->size; buf = m->buf + m->count; name = tcomm; @@ -122,7 +122,7 @@ static inline void task_name(struct seq_file *m, struct task_struct *p) buf++; } m->count = buf - m->buf; - seq_printf(m, "\n"); + seq_putc(m, '\n'); } /* @@ -208,7 +208,7 @@ static inline void task_state(struct seq_file *m, struct pid_namespace *ns, seq_printf(m, "%d ", GROUP_AT(group_info, g)); put_cred(cred); - seq_printf(m, "\n"); + seq_putc(m, '\n'); } static void render_sigset_t(struct seq_file *m, const char *header, @@ -216,7 +216,7 @@ static void render_sigset_t(struct seq_file *m, const char *header, { int i; - seq_printf(m, "%s", header); + seq_puts(m, header); i = _NSIG; do { @@ -230,7 +230,7 @@ static void render_sigset_t(struct seq_file *m, const char *header, seq_printf(m, "%x", x); } while (i >= 4); - seq_printf(m, "\n"); + seq_putc(m, '\n'); } static void collect_sigign_sigcatch(struct task_struct *p, sigset_t *ign, @@ -291,12 +291,12 @@ static void render_cap_t(struct seq_file *m, const char *header, { unsigned __capi; - seq_printf(m, "%s", header); + seq_puts(m, header); CAP_FOR_EACH_U32(__capi) { seq_printf(m, "%08x", a->cap[(_KERNEL_CAPABILITY_U32S-1) - __capi]); } - seq_printf(m, "\n"); + seq_putc(m, '\n'); } static inline void task_cap(struct seq_file *m, struct task_struct *p) @@ -329,12 +329,12 @@ static inline void task_context_switch_counts(struct seq_file *m, static void task_cpus_allowed(struct seq_file *m, struct task_struct *task) { - seq_printf(m, "Cpus_allowed:\t"); + seq_puts(m, "Cpus_allowed:\t"); seq_cpumask(m, &task->cpus_allowed); - seq_printf(m, "\n"); - seq_printf(m, "Cpus_allowed_list:\t"); + seq_putc(m, '\n'); + seq_puts(m, "Cpus_allowed_list:\t"); seq_cpumask_list(m, &task->cpus_allowed); - seq_printf(m, "\n"); + seq_putc(m, '\n'); } int proc_pid_status(struct seq_file *m, struct pid_namespace *ns, diff --git a/fs/proc/base.c b/fs/proc/base.c index 336b79803e82..bf6ba967cffb 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -386,7 +386,7 @@ static int lstats_show_proc(struct seq_file *m, void *v) break; seq_printf(m, " %ps", (void *)bt); } - seq_printf(m, "\n"); + seq_putc(m, '\n'); } } diff --git a/fs/proc/devices.c b/fs/proc/devices.c index 59ee7da959c9..b14347167c35 100644 --- a/fs/proc/devices.c +++ b/fs/proc/devices.c @@ -9,14 +9,14 @@ static int devinfo_show(struct seq_file *f, void *v) if (i < CHRDEV_MAJOR_HASH_SIZE) { if (i == 0) - seq_printf(f, "Character devices:\n"); + seq_puts(f, "Character devices:\n"); chrdev_show(f, i); } #ifdef CONFIG_BLOCK else { i -= CHRDEV_MAJOR_HASH_SIZE; if (i == 0) - seq_printf(f, "\nBlock devices:\n"); + seq_puts(f, "\nBlock devices:\n"); blkdev_show(f, i); } #endif diff --git a/fs/proc/proc_tty.c b/fs/proc/proc_tty.c index 83adcc869437..cb761f010300 100644 --- a/fs/proc/proc_tty.c +++ b/fs/proc/proc_tty.c @@ -36,27 +36,27 @@ static void show_tty_range(struct seq_file *m, struct tty_driver *p, } switch (p->type) { case TTY_DRIVER_TYPE_SYSTEM: - seq_printf(m, "system"); + seq_puts(m, "system"); if (p->subtype == SYSTEM_TYPE_TTY) - seq_printf(m, ":/dev/tty"); + seq_puts(m, ":/dev/tty"); else if (p->subtype == SYSTEM_TYPE_SYSCONS) - seq_printf(m, ":console"); + seq_puts(m, ":console"); else if (p->subtype == SYSTEM_TYPE_CONSOLE) - seq_printf(m, ":vtmaster"); + seq_puts(m, ":vtmaster"); break; case TTY_DRIVER_TYPE_CONSOLE: - seq_printf(m, "console"); + seq_puts(m, "console"); break; case TTY_DRIVER_TYPE_SERIAL: - seq_printf(m, "serial"); + seq_puts(m, "serial"); break; case TTY_DRIVER_TYPE_PTY: if (p->subtype == PTY_TYPE_MASTER) - seq_printf(m, "pty:master"); + seq_puts(m, "pty:master"); else if (p->subtype == PTY_TYPE_SLAVE) - seq_printf(m, "pty:slave"); + seq_puts(m, "pty:slave"); else - seq_printf(m, "pty"); + seq_puts(m, "pty"); break; default: seq_printf(m, "type:%d.%d", p->type, p->subtype); @@ -74,19 +74,19 @@ static int show_tty_driver(struct seq_file *m, void *v) /* pseudo-drivers first */ seq_printf(m, "%-20s /dev/%-8s ", "/dev/tty", "tty"); seq_printf(m, "%3d %7d ", TTYAUX_MAJOR, 0); - seq_printf(m, "system:/dev/tty\n"); + seq_puts(m, "system:/dev/tty\n"); seq_printf(m, "%-20s /dev/%-8s ", "/dev/console", "console"); seq_printf(m, "%3d %7d ", TTYAUX_MAJOR, 1); - seq_printf(m, "system:console\n"); + seq_puts(m, "system:console\n"); #ifdef CONFIG_UNIX98_PTYS seq_printf(m, "%-20s /dev/%-8s ", "/dev/ptmx", "ptmx"); seq_printf(m, "%3d %7d ", TTYAUX_MAJOR, 2); - seq_printf(m, "system\n"); + seq_puts(m, "system\n"); #endif #ifdef CONFIG_VT seq_printf(m, "%-20s /dev/%-8s ", "/dev/vc/0", "vc/0"); seq_printf(m, "%3d %7d ", TTY_MAJOR, 0); - seq_printf(m, "system:vtmaster\n"); + seq_puts(m, "system:vtmaster\n"); #endif } diff --git a/fs/proc/softirqs.c b/fs/proc/softirqs.c index 37994737c983..62604be9f58d 100644 --- a/fs/proc/softirqs.c +++ b/fs/proc/softirqs.c @@ -10,16 +10,16 @@ static int show_softirqs(struct seq_file *p, void *v) { int i, j; - seq_printf(p, " "); + seq_puts(p, " "); for_each_possible_cpu(i) seq_printf(p, "CPU%-8d", i); - seq_printf(p, "\n"); + seq_putc(p, '\n'); for (i = 0; i < NR_SOFTIRQS; i++) { seq_printf(p, "%12s:", softirq_to_name[i]); for_each_possible_cpu(j) seq_printf(p, " %10u", kstat_softirqs_cpu(i, j)); - seq_printf(p, "\n"); + seq_putc(p, '\n'); } return 0; } diff --git a/fs/proc/stat.c b/fs/proc/stat.c index e15a19c93bae..1cffa2b8a2fc 100644 --- a/fs/proc/stat.c +++ b/fs/proc/stat.c @@ -126,7 +126,7 @@ static int show_stat(struct seq_file *p, void *v) for (i = 0; i < NR_SOFTIRQS; i++) seq_printf(p, " %u", per_softirq_sums[i]); - seq_printf(p, "\n"); + seq_putc(p, '\n'); return 0; } -- cgit v1.2.2 From 6d1b6e4eff89475785f60fa00f65da780f869f36 Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Wed, 12 Jan 2011 17:00:33 -0800 Subject: proc: ->low_ino cleanup - ->low_ino is write-once field -- reading it under locks is unnecessary. - /proc/$PID stuff never reaches pde_put()/free_proc_entry() -- PROC_DYNAMIC_FIRST check never triggers. - in proc_get_inode(), inode number always matches proc dir entry, so save one parameter. Signed-off-by: Alexey Dobriyan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/generic.c | 12 ++---------- fs/proc/inode.c | 7 +++---- fs/proc/internal.h | 2 +- 3 files changed, 6 insertions(+), 15 deletions(-) (limited to 'fs') diff --git a/fs/proc/generic.c b/fs/proc/generic.c index f766be29d2c7..d00c5af6f199 100644 --- a/fs/proc/generic.c +++ b/fs/proc/generic.c @@ -425,13 +425,10 @@ struct dentry *proc_lookup_de(struct proc_dir_entry *de, struct inode *dir, if (de->namelen != dentry->d_name.len) continue; if (!memcmp(dentry->d_name.name, de->name, de->namelen)) { - unsigned int ino; - - ino = de->low_ino; pde_get(de); spin_unlock(&proc_subdir_lock); error = -EINVAL; - inode = proc_get_inode(dir->i_sb, ino, de); + inode = proc_get_inode(dir->i_sb, de); goto out_unlock; } } @@ -768,12 +765,7 @@ EXPORT_SYMBOL(proc_create_data); static void free_proc_entry(struct proc_dir_entry *de) { - unsigned int ino = de->low_ino; - - if (ino < PROC_DYNAMIC_FIRST) - return; - - release_inode_number(ino); + release_inode_number(de->low_ino); if (S_ISLNK(de->mode)) kfree(de->data); diff --git a/fs/proc/inode.c b/fs/proc/inode.c index 6bcb926b101b..176ce4cda68a 100644 --- a/fs/proc/inode.c +++ b/fs/proc/inode.c @@ -416,12 +416,11 @@ static const struct file_operations proc_reg_file_ops_no_compat = { }; #endif -struct inode *proc_get_inode(struct super_block *sb, unsigned int ino, - struct proc_dir_entry *de) +struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de) { struct inode * inode; - inode = iget_locked(sb, ino); + inode = iget_locked(sb, de->low_ino); if (!inode) return NULL; if (inode->i_state & I_NEW) { @@ -471,7 +470,7 @@ int proc_fill_super(struct super_block *s) s->s_time_gran = 1; pde_get(&proc_root); - root_inode = proc_get_inode(s, PROC_ROOT_INO, &proc_root); + root_inode = proc_get_inode(s, &proc_root); if (!root_inode) goto out_no_root; root_inode->i_uid = 0; diff --git a/fs/proc/internal.h b/fs/proc/internal.h index 659ea6af379a..9ad561ded409 100644 --- a/fs/proc/internal.h +++ b/fs/proc/internal.h @@ -109,7 +109,7 @@ void pde_put(struct proc_dir_entry *pde); extern struct vfsmount *proc_mnt; int proc_fill_super(struct super_block *); -struct inode *proc_get_inode(struct super_block *, unsigned int, struct proc_dir_entry *); +struct inode *proc_get_inode(struct super_block *, struct proc_dir_entry *); /* * These are generic /proc routines that use the internal -- cgit v1.2.2 From c6a340584607f653e10549c76dd427d4780c8f2c Mon Sep 17 00:00:00 2001 From: Jovi Zhang Date: Wed, 12 Jan 2011 17:00:34 -0800 Subject: proc: use single_open() correctly single_open()'s third argument is for copying into seq_file->private. Use that, rather than open-coding it. Signed-off-by: Jovi Zhang Acked-by: David Rientjes Acked-by: Alexey Dobriyan Reviewed-by: KOSAKI Motohiro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/base.c | 29 +++-------------------------- 1 file changed, 3 insertions(+), 26 deletions(-) (limited to 'fs') diff --git a/fs/proc/base.c b/fs/proc/base.c index bf6ba967cffb..93f1cdd5d3d7 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -745,14 +745,7 @@ static int proc_single_show(struct seq_file *m, void *v) static int proc_single_open(struct inode *inode, struct file *filp) { - int ret; - ret = single_open(filp, proc_single_show, NULL); - if (!ret) { - struct seq_file *m = filp->private_data; - - m->private = inode; - } - return ret; + return single_open(filp, proc_single_show, inode); } static const struct file_operations proc_single_file_operations = { @@ -1380,15 +1373,7 @@ sched_write(struct file *file, const char __user *buf, static int sched_open(struct inode *inode, struct file *filp) { - int ret; - - ret = single_open(filp, sched_show, NULL); - if (!ret) { - struct seq_file *m = filp->private_data; - - m->private = inode; - } - return ret; + return single_open(filp, sched_show, inode); } static const struct file_operations proc_pid_sched_operations = { @@ -1524,15 +1509,7 @@ static int comm_show(struct seq_file *m, void *v) static int comm_open(struct inode *inode, struct file *filp) { - int ret; - - ret = single_open(filp, comm_show, NULL); - if (!ret) { - struct seq_file *m = filp->private_data; - - m->private = inode; - } - return ret; + return single_open(filp, comm_show, inode); } static const struct file_operations proc_pid_set_comm_operations = { -- cgit v1.2.2 From a6fc86d2b43bf1086557f023a24adf91db915559 Mon Sep 17 00:00:00 2001 From: Petr Holasek Date: Wed, 12 Jan 2011 17:00:34 -0800 Subject: kpagecount: add slab page checking because _mapcount is in a union Add a PageSlab() check before adding the _mapcount value to /kpagecount. page->_mapcount is in a union with the SLAB structure so for pages controlled by SLAB, page_mapcount() returns nonsense. Signed-off-by: Petr Holasek Cc: Wu Fengguang Cc: Matt Mackall Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/page.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/proc/page.c b/fs/proc/page.c index 3b8b45660331..b06c674624e6 100644 --- a/fs/proc/page.c +++ b/fs/proc/page.c @@ -40,7 +40,7 @@ static ssize_t kpagecount_read(struct file *file, char __user *buf, ppage = pfn_to_page(pfn); else ppage = NULL; - if (!ppage) + if (!ppage || PageSlab(ppage)) pcount = 0; else pcount = page_mapcount(ppage); -- cgit v1.2.2 From 3740a20c4fe8697cb604a7d51395d23472b1768f Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Wed, 12 Jan 2011 17:00:35 -0800 Subject: proc: less LOCK/UNLOCK in remove_proc_entry() For the common case where a proc entry is being removed and nobody is in the process of using it, save a LOCK/UNLOCK pair. Signed-off-by: Alexey Dobriyan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/generic.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/proc/generic.c b/fs/proc/generic.c index d00c5af6f199..01e07f2a188f 100644 --- a/fs/proc/generic.c +++ b/fs/proc/generic.c @@ -826,12 +826,9 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent) wait_for_completion(de->pde_unload_completion); - goto continue_removing; + spin_lock(&de->pde_unload_lock); } - spin_unlock(&de->pde_unload_lock); -continue_removing: - spin_lock(&de->pde_unload_lock); while (!list_empty(&de->pde_openers)) { struct pde_opener *pdeo; -- cgit v1.2.2 From bf33cbdf8acccf96de268fbfb347d94e72de81ef Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Wed, 12 Jan 2011 17:00:36 -0800 Subject: proc: move proc_console.c to fs/proc/consoles.c Filename is supposed to match procfile name for random junk. Add __init while I'm at it. Signed-off-by: Alexey Dobriyan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/Makefile | 2 +- fs/proc/consoles.c | 114 +++++++++++++++++++++++++++++++++++++++++++++++++ fs/proc/proc_console.c | 114 ------------------------------------------------- 3 files changed, 115 insertions(+), 115 deletions(-) create mode 100644 fs/proc/consoles.c delete mode 100644 fs/proc/proc_console.c (limited to 'fs') diff --git a/fs/proc/Makefile b/fs/proc/Makefile index 288a49e098bf..df434c5f28fb 100644 --- a/fs/proc/Makefile +++ b/fs/proc/Makefile @@ -10,12 +10,12 @@ proc-$(CONFIG_MMU) := mmu.o task_mmu.o proc-y += inode.o root.o base.o generic.o array.o \ proc_tty.o proc-y += cmdline.o +proc-y += consoles.o proc-y += cpuinfo.o proc-y += devices.o proc-y += interrupts.o proc-y += loadavg.o proc-y += meminfo.o -proc-y += proc_console.o proc-y += stat.o proc-y += uptime.o proc-y += version.o diff --git a/fs/proc/consoles.c b/fs/proc/consoles.c new file mode 100644 index 000000000000..eafc22ab1fdd --- /dev/null +++ b/fs/proc/consoles.c @@ -0,0 +1,114 @@ +/* + * Copyright (c) 2010 Werner Fink, Jiri Slaby + * + * Licensed under GPLv2 + */ + +#include +#include +#include +#include +#include + +/* + * This is handler for /proc/consoles + */ +static int show_console_dev(struct seq_file *m, void *v) +{ + static const struct { + short flag; + char name; + } con_flags[] = { + { CON_ENABLED, 'E' }, + { CON_CONSDEV, 'C' }, + { CON_BOOT, 'B' }, + { CON_PRINTBUFFER, 'p' }, + { CON_BRL, 'b' }, + { CON_ANYTIME, 'a' }, + }; + char flags[ARRAY_SIZE(con_flags) + 1]; + struct console *con = v; + unsigned int a; + int len; + dev_t dev = 0; + + if (con->device) { + const struct tty_driver *driver; + int index; + driver = con->device(con, &index); + if (driver) { + dev = MKDEV(driver->major, driver->minor_start); + dev += index; + } + } + + for (a = 0; a < ARRAY_SIZE(con_flags); a++) + flags[a] = (con->flags & con_flags[a].flag) ? + con_flags[a].name : ' '; + flags[a] = 0; + + seq_printf(m, "%s%d%n", con->name, con->index, &len); + len = 21 - len; + if (len < 1) + len = 1; + seq_printf(m, "%*c%c%c%c (%s)", len, ' ', con->read ? 'R' : '-', + con->write ? 'W' : '-', con->unblank ? 'U' : '-', + flags); + if (dev) + seq_printf(m, " %4d:%d", MAJOR(dev), MINOR(dev)); + + seq_printf(m, "\n"); + + return 0; +} + +static void *c_start(struct seq_file *m, loff_t *pos) +{ + struct console *con; + loff_t off = 0; + + acquire_console_sem(); + for_each_console(con) + if (off++ == *pos) + break; + + return con; +} + +static void *c_next(struct seq_file *m, void *v, loff_t *pos) +{ + struct console *con = v; + ++*pos; + return con->next; +} + +static void c_stop(struct seq_file *m, void *v) +{ + release_console_sem(); +} + +static const struct seq_operations consoles_op = { + .start = c_start, + .next = c_next, + .stop = c_stop, + .show = show_console_dev +}; + +static int consoles_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &consoles_op); +} + +static const struct file_operations proc_consoles_operations = { + .open = consoles_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static int __init proc_consoles_init(void) +{ + proc_create("consoles", 0, NULL, &proc_consoles_operations); + return 0; +} +module_init(proc_consoles_init); diff --git a/fs/proc/proc_console.c b/fs/proc/proc_console.c deleted file mode 100644 index 8a707609f528..000000000000 --- a/fs/proc/proc_console.c +++ /dev/null @@ -1,114 +0,0 @@ -/* - * Copyright (c) 2010 Werner Fink, Jiri Slaby - * - * Licensed under GPLv2 - */ - -#include -#include -#include -#include -#include - -/* - * This is handler for /proc/consoles - */ -static int show_console_dev(struct seq_file *m, void *v) -{ - static const struct { - short flag; - char name; - } con_flags[] = { - { CON_ENABLED, 'E' }, - { CON_CONSDEV, 'C' }, - { CON_BOOT, 'B' }, - { CON_PRINTBUFFER, 'p' }, - { CON_BRL, 'b' }, - { CON_ANYTIME, 'a' }, - }; - char flags[ARRAY_SIZE(con_flags) + 1]; - struct console *con = v; - unsigned int a; - int len; - dev_t dev = 0; - - if (con->device) { - const struct tty_driver *driver; - int index; - driver = con->device(con, &index); - if (driver) { - dev = MKDEV(driver->major, driver->minor_start); - dev += index; - } - } - - for (a = 0; a < ARRAY_SIZE(con_flags); a++) - flags[a] = (con->flags & con_flags[a].flag) ? - con_flags[a].name : ' '; - flags[a] = 0; - - seq_printf(m, "%s%d%n", con->name, con->index, &len); - len = 21 - len; - if (len < 1) - len = 1; - seq_printf(m, "%*c%c%c%c (%s)", len, ' ', con->read ? 'R' : '-', - con->write ? 'W' : '-', con->unblank ? 'U' : '-', - flags); - if (dev) - seq_printf(m, " %4d:%d", MAJOR(dev), MINOR(dev)); - - seq_printf(m, "\n"); - - return 0; -} - -static void *c_start(struct seq_file *m, loff_t *pos) -{ - struct console *con; - loff_t off = 0; - - acquire_console_sem(); - for_each_console(con) - if (off++ == *pos) - break; - - return con; -} - -static void *c_next(struct seq_file *m, void *v, loff_t *pos) -{ - struct console *con = v; - ++*pos; - return con->next; -} - -static void c_stop(struct seq_file *m, void *v) -{ - release_console_sem(); -} - -static const struct seq_operations consoles_op = { - .start = c_start, - .next = c_next, - .stop = c_stop, - .show = show_console_dev -}; - -static int consoles_open(struct inode *inode, struct file *file) -{ - return seq_open(file, &consoles_op); -} - -static const struct file_operations proc_consoles_operations = { - .open = consoles_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; - -static int register_proc_consoles(void) -{ - proc_create("consoles", 0, NULL, &proc_consoles_operations); - return 0; -} -module_init(register_proc_consoles); -- cgit v1.2.2 From ceff1a770933e2ca2bf995b453dade4ec47a9878 Mon Sep 17 00:00:00 2001 From: Dave Anderson Date: Wed, 12 Jan 2011 17:00:36 -0800 Subject: /proc/kcore: fix seeking Commit 34aacb2920 ("procfs: Use generic_file_llseek in /proc/kcore") broke seeking on /proc/kcore. This changes it back to use default_llseek in order to restore the original behavior. The problem with generic_file_llseek is that it only allows seeks up to inode->i_sb->s_maxbytes, which is 2GB-1 on procfs, where the memory file offset values in the /proc/kcore PT_LOAD segments may exceed or start beyond that offset value. A similar revert was made for /proc/vmcore. Signed-off-by: Dave Anderson Acked-by: Frederic Weisbecker Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/kcore.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c index 6f37c391468d..d245cb23dd72 100644 --- a/fs/proc/kcore.c +++ b/fs/proc/kcore.c @@ -558,7 +558,7 @@ static int open_kcore(struct inode *inode, struct file *filp) static const struct file_operations proc_kcore_operations = { .read = read_kcore, .open = open_kcore, - .llseek = generic_file_llseek, + .llseek = default_llseek, }; #ifdef CONFIG_MEMORY_HOTPLUG -- cgit v1.2.2 From e6d7202b66d99bf514c8e901db68386b1fcd6d56 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Wed, 12 Jan 2011 17:00:37 -0800 Subject: fs/char_dev.c: remove unused cdev_index() Commit 66fa12c571d3 ("ieee1394: remove the old IEEE 1394 driver stack") eliminated the only user of cdev_index(). So it can be removed too. Signed-off-by: Namhyung Kim Cc: Stefan Richter Cc: Theodore Ts'o Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/char_dev.c | 13 ------------- 1 file changed, 13 deletions(-) (limited to 'fs') diff --git a/fs/char_dev.c b/fs/char_dev.c index e5b9df993b93..6e99b9ddd4e9 100644 --- a/fs/char_dev.c +++ b/fs/char_dev.c @@ -417,18 +417,6 @@ static int chrdev_open(struct inode *inode, struct file *filp) return ret; } -int cdev_index(struct inode *inode) -{ - int idx; - struct kobject *kobj; - - kobj = kobj_lookup(cdev_map, inode->i_rdev, &idx); - if (!kobj) - return -1; - kobject_put(kobj); - return idx; -} - void cd_forget(struct inode *inode) { spin_lock(&cdev_lock); @@ -582,7 +570,6 @@ EXPORT_SYMBOL(cdev_init); EXPORT_SYMBOL(cdev_alloc); EXPORT_SYMBOL(cdev_del); EXPORT_SYMBOL(cdev_add); -EXPORT_SYMBOL(cdev_index); EXPORT_SYMBOL(__register_chrdev); EXPORT_SYMBOL(__unregister_chrdev); EXPORT_SYMBOL(directly_mappable_cdev_bdi); -- cgit v1.2.2 From 2e41025598ea7abb4330db98f78c5a084e25682f Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Wed, 12 Jan 2011 17:01:08 -0800 Subject: aio: remove unnecessary check 'nr >= min_nr >= 0' always satisfies 'nr >= 0' so the check is unnecesary. Signed-off-by: Namhyung Kim Acked-by: Jeff Moyer Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/aio.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/aio.c b/fs/aio.c index 8c8f6c5b6d79..43a716b367cf 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -1839,7 +1839,7 @@ SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id, long ret = -EINVAL; if (likely(ioctx)) { - if (likely(min_nr <= nr && min_nr >= 0 && nr >= 0)) + if (likely(min_nr <= nr && min_nr >= 0)) ret = read_events(ioctx, min_nr, nr, events, timeout); put_ioctx(ioctx); } -- cgit v1.2.2 From d3486f8b9eebcaa15ba8b72b63217e317f4f8635 Mon Sep 17 00:00:00 2001 From: Jeff Moyer Date: Wed, 12 Jan 2011 17:01:08 -0800 Subject: aio: remove unused aio_run_iocbs() aio_run_iocbs() is not used at all, so get rid of it. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Jeff Moyer Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/aio.c | 27 +++++---------------------- 1 file changed, 5 insertions(+), 22 deletions(-) (limited to 'fs') diff --git a/fs/aio.c b/fs/aio.c index 43a716b367cf..5e00f15c54aa 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -798,29 +798,12 @@ static void aio_queue_work(struct kioctx * ctx) queue_delayed_work(aio_wq, &ctx->wq, timeout); } - -/* - * aio_run_iocbs: - * Process all pending retries queued on the ioctx - * run list. - * Assumes it is operating within the aio issuer's mm - * context. - */ -static inline void aio_run_iocbs(struct kioctx *ctx) -{ - int requeue; - - spin_lock_irq(&ctx->ctx_lock); - - requeue = __aio_run_iocbs(ctx); - spin_unlock_irq(&ctx->ctx_lock); - if (requeue) - aio_queue_work(ctx); -} - /* - * just like aio_run_iocbs, but keeps running them until - * the list stays empty + * aio_run_all_iocbs: + * Process all pending retries queued on the ioctx + * run list, and keep running them until the list + * stays empty. + * Assumes it is operating within the aio issuer's mm context. */ static inline void aio_run_all_iocbs(struct kioctx *ctx) { -- cgit v1.2.2 From 6f772fe65c7aa1a4679739d885775f07492a6eea Mon Sep 17 00:00:00 2001 From: Stefani Seibold Date: Wed, 12 Jan 2011 17:01:10 -0800 Subject: cramfs: generate unique inode number for better inode cache usage Generate a unique inode numbers for any entries in the cram file system. For files which did not contain data's (device nodes, fifos and sockets) the offset of the directory entry inside the cramfs plus 1 will be used as inode number. The + 1 for the inode will it make possible to distinguish between a file which contains no data and files which has data, the later one has a inode value where the lower two bits are always 0. It also reimplements the behavior to set the size and the number of block to 0 for special file, which is the right value for empty files, devices, fifos and sockets As a little benefit it will be also more compatible which older mkcramfs, because it will never use the cramfs_inode->offset for creating a inode number for special files. [akpm@linux-foundation.org: trivial comment fix] [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Stefani Seibold Cc: Al Viro Cc: Pekka Enberg Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/cramfs/inode.c | 110 ++++++++++++++++++++++++++++++++++-------------------- 1 file changed, 69 insertions(+), 41 deletions(-) (limited to 'fs') diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c index 32fd5fe9ca0e..e141939080f0 100644 --- a/fs/cramfs/inode.c +++ b/fs/cramfs/inode.c @@ -34,57 +34,81 @@ static const struct address_space_operations cramfs_aops; static DEFINE_MUTEX(read_mutex); -/* These two macros may change in future, to provide better st_ino - semantics. */ -#define CRAMINO(x) (((x)->offset && (x)->size)?(x)->offset<<2:1) +/* These macros may change in future, to provide better st_ino semantics. */ #define OFFSET(x) ((x)->i_ino) -static void setup_inode(struct inode *inode, struct cramfs_inode * cramfs_inode) +static unsigned long cramino(struct cramfs_inode *cino, unsigned int offset) { + if (!cino->offset) + return offset + 1; + if (!cino->size) + return offset + 1; + + /* + * The file mode test fixes buggy mkcramfs implementations where + * cramfs_inode->offset is set to a non zero value for entries + * which did not contain data, like devices node and fifos. + */ + switch (cino->mode & S_IFMT) { + case S_IFREG: + case S_IFDIR: + case S_IFLNK: + return cino->offset << 2; + default: + break; + } + return offset + 1; +} + +static struct inode *get_cramfs_inode(struct super_block *sb, + struct cramfs_inode *cramfs_inode, unsigned int offset) +{ + struct inode *inode; static struct timespec zerotime; + + inode = iget_locked(sb, cramino(cramfs_inode, offset)); + if (!inode) + return ERR_PTR(-ENOMEM); + if (!(inode->i_state & I_NEW)) + return inode; + + switch (cramfs_inode->mode & S_IFMT) { + case S_IFREG: + inode->i_fop = &generic_ro_fops; + inode->i_data.a_ops = &cramfs_aops; + break; + case S_IFDIR: + inode->i_op = &cramfs_dir_inode_operations; + inode->i_fop = &cramfs_directory_operations; + break; + case S_IFLNK: + inode->i_op = &page_symlink_inode_operations; + inode->i_data.a_ops = &cramfs_aops; + break; + default: + init_special_inode(inode, cramfs_inode->mode, + old_decode_dev(cramfs_inode->size)); + } + inode->i_mode = cramfs_inode->mode; inode->i_uid = cramfs_inode->uid; - inode->i_size = cramfs_inode->size; - inode->i_blocks = (cramfs_inode->size - 1) / 512 + 1; inode->i_gid = cramfs_inode->gid; + + /* if the lower 2 bits are zero, the inode contains data */ + if (!(inode->i_ino & 3)) { + inode->i_size = cramfs_inode->size; + inode->i_blocks = (cramfs_inode->size - 1) / 512 + 1; + } + /* Struct copy intentional */ inode->i_mtime = inode->i_atime = inode->i_ctime = zerotime; /* inode->i_nlink is left 1 - arguably wrong for directories, but it's the best we can do without reading the directory contents. 1 yields the right result in GNU find, even without -noleaf option. */ - if (S_ISREG(inode->i_mode)) { - inode->i_fop = &generic_ro_fops; - inode->i_data.a_ops = &cramfs_aops; - } else if (S_ISDIR(inode->i_mode)) { - inode->i_op = &cramfs_dir_inode_operations; - inode->i_fop = &cramfs_directory_operations; - } else if (S_ISLNK(inode->i_mode)) { - inode->i_op = &page_symlink_inode_operations; - inode->i_data.a_ops = &cramfs_aops; - } else { - init_special_inode(inode, inode->i_mode, - old_decode_dev(cramfs_inode->size)); - } -} -static struct inode *get_cramfs_inode(struct super_block *sb, - struct cramfs_inode * cramfs_inode) -{ - struct inode *inode; - if (CRAMINO(cramfs_inode) == 1) { - inode = new_inode(sb); - if (inode) { - inode->i_ino = 1; - setup_inode(inode, cramfs_inode); - } - } else { - inode = iget_locked(sb, CRAMINO(cramfs_inode)); - if (inode && (inode->i_state & I_NEW)) { - setup_inode(inode, cramfs_inode); - unlock_new_inode(inode); - } - } + unlock_new_inode(inode); + return inode; } @@ -265,6 +289,9 @@ static int cramfs_fill_super(struct super_block *sb, void *data, int silent) printk(KERN_ERR "cramfs: root is not a directory\n"); goto out; } + /* correct strange, hard-coded permissions of mkcramfs */ + super.root.mode |= (S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH); + root_offset = super.root.offset << 2; if (super.flags & CRAMFS_FLAG_FSID_VERSION_2) { sbi->size=super.size; @@ -289,7 +316,7 @@ static int cramfs_fill_super(struct super_block *sb, void *data, int silent) /* Set it all up.. */ sb->s_op = &cramfs_ops; - root = get_cramfs_inode(sb, &super.root); + root = get_cramfs_inode(sb, &super.root, 0); if (!root) goto out; sb->s_root = d_alloc_root(root); @@ -365,7 +392,7 @@ static int cramfs_readdir(struct file *filp, void *dirent, filldir_t filldir) */ namelen = de->namelen << 2; memcpy(buf, name, namelen); - ino = CRAMINO(de); + ino = cramino(de, OFFSET(inode) + offset); mode = de->mode; mutex_unlock(&read_mutex); nextoffset = offset + sizeof(*de) + namelen; @@ -404,8 +431,9 @@ static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, s struct cramfs_inode *de; char *name; int namelen, retval; + int dir_off = OFFSET(dir) + offset; - de = cramfs_read(dir->i_sb, OFFSET(dir) + offset, sizeof(*de)+CRAMFS_MAXPATHLEN); + de = cramfs_read(dir->i_sb, dir_off, sizeof(*de)+CRAMFS_MAXPATHLEN); name = (char *)(de+1); /* Try to take advantage of sorted directories */ @@ -436,7 +464,7 @@ static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, s if (!retval) { struct cramfs_inode entry = *de; mutex_unlock(&read_mutex); - d_add(dentry, get_cramfs_inode(dir->i_sb, &entry)); + d_add(dentry, get_cramfs_inode(dir->i_sb, &entry, dir_off)); return NULL; } /* else (retval < 0) */ -- cgit v1.2.2 From 8a0eebf66e3b1deae036553ba641a9c2bdbae678 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 13 Jan 2011 14:15:50 -0500 Subject: NFS: Fix NFSv3 exclusive open semantics Commit c0204fd2b8fe047b18b67e07e1bf2a03691240cd (NFS: Clean up nfs4_proc_create()) broke NFSv3 exclusive open by removing the code that passes the O_EXCL flag down to nfs3_proc_create(). This patch reverts that offending hunk from the original commit. Reported-by: Nick Bowler Signed-off-by: Trond Myklebust Cc: stable@kernel.org [2.6.37] Tested-by: Nick Bowler Signed-off-by: Linus Torvalds --- fs/nfs/dir.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 95b081bc9e25..64ee240f3c80 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1579,6 +1579,7 @@ static int nfs_create(struct inode *dir, struct dentry *dentry, int mode, { struct iattr attr; int error; + int open_flags = 0; dfprintk(VFS, "NFS: create(%s/%ld), %s\n", dir->i_sb->s_id, dir->i_ino, dentry->d_name.name); @@ -1586,7 +1587,10 @@ static int nfs_create(struct inode *dir, struct dentry *dentry, int mode, attr.ia_mode = mode; attr.ia_valid = ATTR_MODE; - error = NFS_PROTO(dir)->create(dir, dentry, &attr, 0, NULL); + if ((nd->flags & LOOKUP_CREATE) != 0) + open_flags = nd->intent.open.flags; + + error = NFS_PROTO(dir)->create(dir, dentry, &attr, open_flags, NULL); if (error != 0) goto out_err; return 0; -- cgit v1.2.2 From 81bb8debd0d570dc67dc1e9d8b612632cb941893 Mon Sep 17 00:00:00 2001 From: Phillip Lougher Date: Thu, 9 Dec 2010 02:02:29 +0000 Subject: Squashfs: add XZ compression support Add support for reading file systems compressed with the XZ compression algorithm. This patch adds the XZ decompressor wrapper code. Signed-off-by: Phillip Lougher --- fs/squashfs/squashfs_fs.h | 1 + fs/squashfs/xz_wrapper.c | 153 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 154 insertions(+) create mode 100644 fs/squashfs/xz_wrapper.c (limited to 'fs') diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h index c5137fc9ab11..39533feffd6d 100644 --- a/fs/squashfs/squashfs_fs.h +++ b/fs/squashfs/squashfs_fs.h @@ -238,6 +238,7 @@ struct meta_index { #define ZLIB_COMPRESSION 1 #define LZMA_COMPRESSION 2 #define LZO_COMPRESSION 3 +#define XZ_COMPRESSION 4 struct squashfs_super_block { __le32 s_magic; diff --git a/fs/squashfs/xz_wrapper.c b/fs/squashfs/xz_wrapper.c new file mode 100644 index 000000000000..856756ca5ee4 --- /dev/null +++ b/fs/squashfs/xz_wrapper.c @@ -0,0 +1,153 @@ +/* + * Squashfs - a compressed read only filesystem for Linux + * + * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 + * Phillip Lougher + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2, + * or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * xz_wrapper.c + */ + + +#include +#include +#include +#include + +#include "squashfs_fs.h" +#include "squashfs_fs_sb.h" +#include "squashfs_fs_i.h" +#include "squashfs.h" +#include "decompressor.h" + +struct squashfs_xz { + struct xz_dec *state; + struct xz_buf buf; +}; + +static void *squashfs_xz_init(struct squashfs_sb_info *msblk) +{ + int block_size = max_t(int, msblk->block_size, SQUASHFS_METADATA_SIZE); + + struct squashfs_xz *stream = kmalloc(sizeof(*stream), GFP_KERNEL); + if (stream == NULL) + goto failed; + + stream->state = xz_dec_init(XZ_PREALLOC, block_size); + if (stream->state == NULL) + goto failed; + + return stream; + +failed: + ERROR("Failed to allocate xz workspace\n"); + kfree(stream); + return NULL; +} + + +static void squashfs_xz_free(void *strm) +{ + struct squashfs_xz *stream = strm; + + if (stream) { + xz_dec_end(stream->state); + kfree(stream); + } +} + + +static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void **buffer, + struct buffer_head **bh, int b, int offset, int length, int srclength, + int pages) +{ + enum xz_ret xz_err; + int avail, total = 0, k = 0, page = 0; + struct squashfs_xz *stream = msblk->stream; + + mutex_lock(&msblk->read_data_mutex); + + xz_dec_reset(stream->state); + stream->buf.in_pos = 0; + stream->buf.in_size = 0; + stream->buf.out_pos = 0; + stream->buf.out_size = PAGE_CACHE_SIZE; + stream->buf.out = buffer[page++]; + + do { + if (stream->buf.in_pos == stream->buf.in_size && k < b) { + avail = min(length, msblk->devblksize - offset); + length -= avail; + wait_on_buffer(bh[k]); + if (!buffer_uptodate(bh[k])) + goto release_mutex; + + if (avail == 0) { + offset = 0; + put_bh(bh[k++]); + continue; + } + + stream->buf.in = bh[k]->b_data + offset; + stream->buf.in_size = avail; + stream->buf.in_pos = 0; + offset = 0; + } + + if (stream->buf.out_pos == stream->buf.out_size + && page < pages) { + stream->buf.out = buffer[page++]; + stream->buf.out_pos = 0; + total += PAGE_CACHE_SIZE; + } + + xz_err = xz_dec_run(stream->state, &stream->buf); + + if (stream->buf.in_pos == stream->buf.in_size && k < b) + put_bh(bh[k++]); + } while (xz_err == XZ_OK); + + if (xz_err != XZ_STREAM_END) { + ERROR("xz_dec_run error, data probably corrupt\n"); + goto release_mutex; + } + + if (k < b) { + ERROR("xz_uncompress error, input remaining\n"); + goto release_mutex; + } + + total += stream->buf.out_pos; + mutex_unlock(&msblk->read_data_mutex); + return total; + +release_mutex: + mutex_unlock(&msblk->read_data_mutex); + + for (; k < b; k++) + put_bh(bh[k]); + + return -EIO; +} + +const struct squashfs_decompressor squashfs_xz_comp_ops = { + .init = squashfs_xz_init, + .free = squashfs_xz_free, + .decompress = squashfs_xz_uncompress, + .id = XZ_COMPRESSION, + .name = "xz", + .supported = 1 +}; -- cgit v1.2.2 From 7a43ae523744c01b6187013e781f44c2281c579c Mon Sep 17 00:00:00 2001 From: Phillip Lougher Date: Thu, 9 Dec 2010 02:08:31 +0000 Subject: Squashfs: Add XZ compression configuration option Signed-off-by: Phillip Lougher --- fs/squashfs/Kconfig | 15 +++++++++++++++ fs/squashfs/Makefile | 1 + fs/squashfs/decompressor.c | 7 +++++++ fs/squashfs/decompressor.h | 5 +++++ 4 files changed, 28 insertions(+) (limited to 'fs') diff --git a/fs/squashfs/Kconfig b/fs/squashfs/Kconfig index e5f63da64d04..50cc4edbca06 100644 --- a/fs/squashfs/Kconfig +++ b/fs/squashfs/Kconfig @@ -53,6 +53,21 @@ config SQUASHFS_LZO If unsure, say N. +config SQUASHFS_XZ + bool "Include support for XZ compressed file systems" + depends on SQUASHFS + select XZ_DEC + help + Saying Y here includes support for reading Squashfs file systems + compressed with XZ compresssion. XZ gives better compression than + the default zlib compression, at the expense of greater CPU and + memory overhead. + + XZ is not the standard compression used in Squashfs and so most + file systems will be readable without selecting this option. + + If unsure, say N. + config SQUASHFS_EMBEDDED bool "Additional option for memory-constrained systems" depends on SQUASHFS diff --git a/fs/squashfs/Makefile b/fs/squashfs/Makefile index 7672bac8d328..cecf2bea07af 100644 --- a/fs/squashfs/Makefile +++ b/fs/squashfs/Makefile @@ -7,3 +7,4 @@ squashfs-y += block.o cache.o dir.o export.o file.o fragment.o id.o inode.o squashfs-y += namei.o super.o symlink.o zlib_wrapper.o decompressor.o squashfs-$(CONFIG_SQUASHFS_XATTR) += xattr.o xattr_id.o squashfs-$(CONFIG_SQUASHFS_LZO) += lzo_wrapper.o +squashfs-$(CONFIG_SQUASHFS_XZ) += xz_wrapper.o diff --git a/fs/squashfs/decompressor.c b/fs/squashfs/decompressor.c index 24af9ce9722f..482d78197811 100644 --- a/fs/squashfs/decompressor.c +++ b/fs/squashfs/decompressor.c @@ -46,6 +46,12 @@ static const struct squashfs_decompressor squashfs_lzo_unsupported_comp_ops = { }; #endif +#ifndef CONFIG_SQUASHFS_XZ +static const struct squashfs_decompressor squashfs_xz_comp_ops = { + NULL, NULL, NULL, XZ_COMPRESSION, "xz", 0 +}; +#endif + static const struct squashfs_decompressor squashfs_unknown_comp_ops = { NULL, NULL, NULL, 0, "unknown", 0 }; @@ -58,6 +64,7 @@ static const struct squashfs_decompressor *decompressor[] = { #else &squashfs_lzo_unsupported_comp_ops, #endif + &squashfs_xz_comp_ops, &squashfs_unknown_comp_ops }; diff --git a/fs/squashfs/decompressor.h b/fs/squashfs/decompressor.h index 7425f80783f6..57e1acb4c6a9 100644 --- a/fs/squashfs/decompressor.h +++ b/fs/squashfs/decompressor.h @@ -52,4 +52,9 @@ static inline int squashfs_decompress(struct squashfs_sb_info *msblk, return msblk->decompressor->decompress(msblk, buffer, bh, b, offset, length, srclength, pages); } + +#ifdef CONFIG_SQUASHFS_XZ +extern const struct squashfs_decompressor squashfs_xz_comp_ops; +#endif + #endif -- cgit v1.2.2 From 170cf02165272dfe026eba183563bad973ca4f05 Mon Sep 17 00:00:00 2001 From: Phillip Lougher Date: Wed, 5 Jan 2011 17:52:26 +0000 Subject: Squashfs: remove unnecessary variable in zlib_wrapper Get rid of unnecessary bytes variable, and remove redundant initialisation of zlib_err. Signed-off-by: Phillip Lougher --- fs/squashfs/zlib_wrapper.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/squashfs/zlib_wrapper.c b/fs/squashfs/zlib_wrapper.c index 7a603874e483..1f4833b87ea3 100644 --- a/fs/squashfs/zlib_wrapper.c +++ b/fs/squashfs/zlib_wrapper.c @@ -66,8 +66,8 @@ static int zlib_uncompress(struct squashfs_sb_info *msblk, void **buffer, struct buffer_head **bh, int b, int offset, int length, int srclength, int pages) { - int zlib_err = 0, zlib_init = 0; - int avail, bytes, k = 0, page = 0; + int zlib_err, zlib_init = 0; + int k = 0, page = 0; z_stream *stream = msblk->stream; mutex_lock(&msblk->read_data_mutex); @@ -75,11 +75,10 @@ static int zlib_uncompress(struct squashfs_sb_info *msblk, void **buffer, stream->avail_out = 0; stream->avail_in = 0; - bytes = length; do { if (stream->avail_in == 0 && k < b) { - avail = min(bytes, msblk->devblksize - offset); - bytes -= avail; + int avail = min(length, msblk->devblksize - offset); + length -= avail; wait_on_buffer(bh[k]); if (!buffer_uptodate(bh[k])) goto release_mutex; -- cgit v1.2.2 From e7ee11f0ecd587caed0063c5f68ca20fef699f32 Mon Sep 17 00:00:00 2001 From: Phillip Lougher Date: Wed, 5 Jan 2011 18:02:37 +0000 Subject: Squashfs: add missing check in zlib_wrapper On file system corruption zlib can return Z_STREAM_OK with input buffers remaining, which will not be released. Signed-off-by: Phillip Lougher --- fs/squashfs/zlib_wrapper.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'fs') diff --git a/fs/squashfs/zlib_wrapper.c b/fs/squashfs/zlib_wrapper.c index 1f4833b87ea3..ab5801f66e26 100644 --- a/fs/squashfs/zlib_wrapper.c +++ b/fs/squashfs/zlib_wrapper.c @@ -127,6 +127,11 @@ static int zlib_uncompress(struct squashfs_sb_info *msblk, void **buffer, goto release_mutex; } + if (k < b) { + ERROR("zlib_uncompress error, data remaining\n"); + goto release_mutex; + } + length = stream->total_out; mutex_unlock(&msblk->read_data_mutex); return length; -- cgit v1.2.2 From 6197fd86789a28760f8375b5ae8885cd7258042f Mon Sep 17 00:00:00 2001 From: Phillip Lougher Date: Wed, 5 Jan 2011 18:15:58 +0000 Subject: Squashfs: get rid of default n in Kconfig As pointed out by Geert Uytterhoeven, "default n" is the default, no reason to specify it. Signed-off-by: Phillip Lougher --- fs/squashfs/Kconfig | 3 --- 1 file changed, 3 deletions(-) (limited to 'fs') diff --git a/fs/squashfs/Kconfig b/fs/squashfs/Kconfig index 50cc4edbca06..aa68a8a31518 100644 --- a/fs/squashfs/Kconfig +++ b/fs/squashfs/Kconfig @@ -29,7 +29,6 @@ config SQUASHFS config SQUASHFS_XATTR bool "Squashfs XATTR support" depends on SQUASHFS - default n help Saying Y here includes support for extended attributes (xattrs). Xattrs are name:value pairs associated with inodes by @@ -40,7 +39,6 @@ config SQUASHFS_XATTR config SQUASHFS_LZO bool "Include support for LZO compressed file systems" depends on SQUASHFS - default n select LZO_DECOMPRESS help Saying Y here includes support for reading Squashfs file systems @@ -71,7 +69,6 @@ config SQUASHFS_XZ config SQUASHFS_EMBEDDED bool "Additional option for memory-constrained systems" depends on SQUASHFS - default n help Saying Y here allows you to specify cache size. -- cgit v1.2.2 From 8fcd97216f45b1691f8f91f35cc108d06e0bfca8 Mon Sep 17 00:00:00 2001 From: Phillip Lougher Date: Thu, 6 Jan 2011 06:08:50 +0000 Subject: Squashfs: move squashfs_i() definition from squashfs.h Move squashfs_i() definition out of squashfs.h, this eliminates the need to #include squashfs_fs_i.h from numerous files. Signed-off-by: Phillip Lougher --- fs/squashfs/block.c | 1 - fs/squashfs/cache.c | 1 - fs/squashfs/decompressor.c | 1 - fs/squashfs/fragment.c | 1 - fs/squashfs/id.c | 1 - fs/squashfs/lzo_wrapper.c | 1 - fs/squashfs/squashfs.h | 5 ----- fs/squashfs/squashfs_fs_i.h | 6 ++++++ fs/squashfs/xattr_id.c | 1 - fs/squashfs/zlib_wrapper.c | 1 - 10 files changed, 6 insertions(+), 13 deletions(-) (limited to 'fs') diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c index 653c030eb840..2fb2882f0fa7 100644 --- a/fs/squashfs/block.c +++ b/fs/squashfs/block.c @@ -34,7 +34,6 @@ #include "squashfs_fs.h" #include "squashfs_fs_sb.h" -#include "squashfs_fs_i.h" #include "squashfs.h" #include "decompressor.h" diff --git a/fs/squashfs/cache.c b/fs/squashfs/cache.c index 57314bee9059..26b15ae34d6f 100644 --- a/fs/squashfs/cache.c +++ b/fs/squashfs/cache.c @@ -55,7 +55,6 @@ #include "squashfs_fs.h" #include "squashfs_fs_sb.h" -#include "squashfs_fs_i.h" #include "squashfs.h" /* diff --git a/fs/squashfs/decompressor.c b/fs/squashfs/decompressor.c index 482d78197811..50b22d330cec 100644 --- a/fs/squashfs/decompressor.c +++ b/fs/squashfs/decompressor.c @@ -27,7 +27,6 @@ #include "squashfs_fs.h" #include "squashfs_fs_sb.h" -#include "squashfs_fs_i.h" #include "decompressor.h" #include "squashfs.h" diff --git a/fs/squashfs/fragment.c b/fs/squashfs/fragment.c index 7c90bbd6879d..7eef571443c6 100644 --- a/fs/squashfs/fragment.c +++ b/fs/squashfs/fragment.c @@ -39,7 +39,6 @@ #include "squashfs_fs.h" #include "squashfs_fs_sb.h" -#include "squashfs_fs_i.h" #include "squashfs.h" /* diff --git a/fs/squashfs/id.c b/fs/squashfs/id.c index b7f64bcd2b70..d8f32452638e 100644 --- a/fs/squashfs/id.c +++ b/fs/squashfs/id.c @@ -37,7 +37,6 @@ #include "squashfs_fs.h" #include "squashfs_fs_sb.h" -#include "squashfs_fs_i.h" #include "squashfs.h" /* diff --git a/fs/squashfs/lzo_wrapper.c b/fs/squashfs/lzo_wrapper.c index 5d87789bf1c1..7da759e34c52 100644 --- a/fs/squashfs/lzo_wrapper.c +++ b/fs/squashfs/lzo_wrapper.c @@ -29,7 +29,6 @@ #include "squashfs_fs.h" #include "squashfs_fs_sb.h" -#include "squashfs_fs_i.h" #include "squashfs.h" #include "decompressor.h" diff --git a/fs/squashfs/squashfs.h b/fs/squashfs/squashfs.h index 5d45569d5f72..18f187fb486b 100644 --- a/fs/squashfs/squashfs.h +++ b/fs/squashfs/squashfs.h @@ -27,11 +27,6 @@ #define WARNING(s, args...) pr_warning("SQUASHFS: "s, ## args) -static inline struct squashfs_inode_info *squashfs_i(struct inode *inode) -{ - return list_entry(inode, struct squashfs_inode_info, vfs_inode); -} - /* block.c */ extern int squashfs_read_data(struct super_block *, void **, u64, int, u64 *, int, int); diff --git a/fs/squashfs/squashfs_fs_i.h b/fs/squashfs/squashfs_fs_i.h index d3e3a37f28a1..359baefc01fc 100644 --- a/fs/squashfs/squashfs_fs_i.h +++ b/fs/squashfs/squashfs_fs_i.h @@ -45,4 +45,10 @@ struct squashfs_inode_info { }; struct inode vfs_inode; }; + + +static inline struct squashfs_inode_info *squashfs_i(struct inode *inode) +{ + return list_entry(inode, struct squashfs_inode_info, vfs_inode); +} #endif diff --git a/fs/squashfs/xattr_id.c b/fs/squashfs/xattr_id.c index d33be5dd6c32..05385dbe1465 100644 --- a/fs/squashfs/xattr_id.c +++ b/fs/squashfs/xattr_id.c @@ -32,7 +32,6 @@ #include "squashfs_fs.h" #include "squashfs_fs_sb.h" -#include "squashfs_fs_i.h" #include "squashfs.h" #include "xattr.h" diff --git a/fs/squashfs/zlib_wrapper.c b/fs/squashfs/zlib_wrapper.c index ab5801f66e26..818a5e063faf 100644 --- a/fs/squashfs/zlib_wrapper.c +++ b/fs/squashfs/zlib_wrapper.c @@ -29,7 +29,6 @@ #include "squashfs_fs.h" #include "squashfs_fs_sb.h" -#include "squashfs_fs_i.h" #include "squashfs.h" #include "decompressor.h" -- cgit v1.2.2 From 01a678c5a2f41663b8faf03d17e2bbdbf44158a9 Mon Sep 17 00:00:00 2001 From: Phillip Lougher Date: Wed, 5 Jan 2011 18:23:53 +0000 Subject: Squashfs: simplify CONFIG_SQUASHFS_LZO handling Get rid of messy repeated #if(n)def CONFIG_SQUASHFS_LZO code in decompressor.c Signed-off-by: Phillip Lougher --- fs/squashfs/decompressor.c | 8 ++------ fs/squashfs/decompressor.h | 4 ++++ fs/squashfs/squashfs.h | 3 --- 3 files changed, 6 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/squashfs/decompressor.c b/fs/squashfs/decompressor.c index 50b22d330cec..a5940e54c4dd 100644 --- a/fs/squashfs/decompressor.c +++ b/fs/squashfs/decompressor.c @@ -40,7 +40,7 @@ static const struct squashfs_decompressor squashfs_lzma_unsupported_comp_ops = { }; #ifndef CONFIG_SQUASHFS_LZO -static const struct squashfs_decompressor squashfs_lzo_unsupported_comp_ops = { +static const struct squashfs_decompressor squashfs_lzo_comp_ops = { NULL, NULL, NULL, LZO_COMPRESSION, "lzo", 0 }; #endif @@ -57,13 +57,9 @@ static const struct squashfs_decompressor squashfs_unknown_comp_ops = { static const struct squashfs_decompressor *decompressor[] = { &squashfs_zlib_comp_ops, - &squashfs_lzma_unsupported_comp_ops, -#ifdef CONFIG_SQUASHFS_LZO &squashfs_lzo_comp_ops, -#else - &squashfs_lzo_unsupported_comp_ops, -#endif &squashfs_xz_comp_ops, + &squashfs_lzma_unsupported_comp_ops, &squashfs_unknown_comp_ops }; diff --git a/fs/squashfs/decompressor.h b/fs/squashfs/decompressor.h index 57e1acb4c6a9..3b305a70f7aa 100644 --- a/fs/squashfs/decompressor.h +++ b/fs/squashfs/decompressor.h @@ -57,4 +57,8 @@ static inline int squashfs_decompress(struct squashfs_sb_info *msblk, extern const struct squashfs_decompressor squashfs_xz_comp_ops; #endif +#ifdef CONFIG_SQUASHFS_LZO +extern const struct squashfs_decompressor squashfs_lzo_comp_ops; +#endif + #endif diff --git a/fs/squashfs/squashfs.h b/fs/squashfs/squashfs.h index 18f187fb486b..ba729d808876 100644 --- a/fs/squashfs/squashfs.h +++ b/fs/squashfs/squashfs.h @@ -99,6 +99,3 @@ extern const struct xattr_handler *squashfs_xattr_handlers[]; /* zlib_wrapper.c */ extern const struct squashfs_decompressor squashfs_zlib_comp_ops; - -/* lzo_wrapper.c */ -extern const struct squashfs_decompressor squashfs_lzo_comp_ops; -- cgit v1.2.2 From 1c1266bb916e6a6b362d3be95f2cc7f3c41277a6 Mon Sep 17 00:00:00 2001 From: Yehuda Sadeh Date: Wed, 12 Jan 2011 16:53:27 -0800 Subject: ceph: fix getattr on directory when using norbytes The norbytes mount option was broken, and when doing getattr on a directory it return the rbytes instead of the number of entities. This commit fixes it. Signed-off-by: Yehuda Sadeh Signed-off-by: Sage Weil --- fs/ceph/inode.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index e791fa34b23d..50001de66c69 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -701,10 +701,6 @@ static int fill_inode(struct inode *inode, ci->i_ceph_flags |= CEPH_I_COMPLETE; ci->i_max_offset = 2; } - - /* it may be better to set st_size in getattr instead? */ - if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), RBYTES)) - inode->i_size = ci->i_rbytes; break; default: pr_err("fill_inode %llx.%llx BAD mode 0%o\n", @@ -1805,7 +1801,11 @@ int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry, else stat->dev = 0; if (S_ISDIR(inode->i_mode)) { - stat->size = ci->i_rbytes; + if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), + RBYTES)) + stat->size = ci->i_rbytes; + else + stat->size = ci->i_files + ci->i_subdirs; stat->blocks = 0; stat->blksize = 65536; } -- cgit v1.2.2 From 17db143fc091238c43ab9f373974ca2224a4c3f8 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Thu, 13 Jan 2011 15:27:29 -0800 Subject: ceph: fix xattr rbtree search Fix xattr name comparison in rbtree search for strings that share a prefix. The *name argument is null terminated, but the xattr name is not, so we need to use strncmp, but that means adjusting for the case where name is a prefix of xattr->name. The corresponding case in __set_xattr() already handles this properly (although in that case *name is also not null terminated). Reported-by: Sergiy Kibrik Signed-off-by: Sage Weil --- fs/ceph/xattr.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'fs') diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c index 6e12a6ba5f79..8c9eba6ef9df 100644 --- a/fs/ceph/xattr.c +++ b/fs/ceph/xattr.c @@ -219,6 +219,7 @@ static struct ceph_inode_xattr *__get_xattr(struct ceph_inode_info *ci, struct rb_node **p; struct rb_node *parent = NULL; struct ceph_inode_xattr *xattr = NULL; + int name_len = strlen(name); int c; p = &ci->i_xattrs.index.rb_node; @@ -226,6 +227,8 @@ static struct ceph_inode_xattr *__get_xattr(struct ceph_inode_info *ci, parent = *p; xattr = rb_entry(parent, struct ceph_inode_xattr, node); c = strncmp(name, xattr->name, xattr->name_len); + if (c == 0 && name_len > xattr->name_len) + c = 1; if (c < 0) p = &(*p)->rb_left; else if (c > 0) -- cgit v1.2.2 From 6254b32b5791e47ba1c679d023f26985fa34755a Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Thu, 13 Jan 2011 17:19:38 -0800 Subject: ecryptfs: fix broken build Stephen Rothwell reports that the vfs merge broke the build of ecryptfs. The breakage comes from commit 66cb76666d69 ("sanitize ecryptfs ->mount()") which was obviously not even build tested. Tssk, tssk, Al. This is the minimal build fixup for the situation, although I don't have a filesystem to actually test it with. Reported-by: Stephen Rothwell Cc: Al Viro Signed-off-by: Linus Torvalds --- fs/ecryptfs/main.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c index 9ed476906327..d3b28abdd6aa 100644 --- a/fs/ecryptfs/main.c +++ b/fs/ecryptfs/main.c @@ -141,13 +141,12 @@ int ecryptfs_init_persistent_file(struct dentry *ecryptfs_dentry) return rc; } -static inode *ecryptfs_get_inode(struct inode *lower_inode, +static struct inode *ecryptfs_get_inode(struct inode *lower_inode, struct super_block *sb) { struct inode *inode; int rc = 0; - lower_inode = lower_dentry->d_inode; if (lower_inode->i_sb != ecryptfs_superblock_to_lower(sb)) { rc = -EXDEV; goto out; @@ -202,7 +201,7 @@ int ecryptfs_interpose(struct dentry *lower_dentry, struct dentry *dentry, { struct inode *lower_inode = lower_dentry->d_inode; struct inode *inode = ecryptfs_get_inode(lower_inode, sb); - if (IS_ERR(inode) + if (IS_ERR(inode)) return PTR_ERR(inode); if (flags & ECRYPTFS_INTERPOSE_FLAG_D_ADD) d_add(dentry, inode); -- cgit v1.2.2 From 6585027a5e8cb490e3a761b2f3f3c3acf722aff2 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Thu, 13 Jan 2011 15:45:44 -0800 Subject: writeback: integrated background writeback work Check whether background writeback is needed after finishing each work. When bdi flusher thread finishes doing some work check whether any kind of background writeback needs to be done (either because dirty_background_ratio is exceeded or because we need to start flushing old inodes). If so, just do background write back. This way, bdi_start_background_writeback() just needs to wake up the flusher thread. It will do background writeback as soon as there is no other work. This is a preparatory patch for the next patch which stops background writeback as soon as there is other work to do. Signed-off-by: Jan Kara Signed-off-by: Wu Fengguang Cc: Johannes Weiner Cc: Dave Chinner Cc: Christoph Hellwig Cc: Jan Engelhardt Cc: Jens Axboe Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/fs-writeback.c | 61 +++++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 46 insertions(+), 15 deletions(-) (limited to 'fs') diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 3d06ccc953aa..3a07f6d8bc0b 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -84,13 +84,9 @@ static inline struct inode *wb_inode(struct list_head *head) return list_entry(head, struct inode, i_wb_list); } -static void bdi_queue_work(struct backing_dev_info *bdi, - struct wb_writeback_work *work) +/* Wakeup flusher thread or forker thread to fork it. Requires bdi->wb_lock. */ +static void bdi_wakeup_flusher(struct backing_dev_info *bdi) { - trace_writeback_queue(bdi, work); - - spin_lock_bh(&bdi->wb_lock); - list_add_tail(&work->list, &bdi->work_list); if (bdi->wb.task) { wake_up_process(bdi->wb.task); } else { @@ -98,15 +94,26 @@ static void bdi_queue_work(struct backing_dev_info *bdi, * The bdi thread isn't there, wake up the forker thread which * will create and run it. */ - trace_writeback_nothread(bdi, work); wake_up_process(default_backing_dev_info.wb.task); } +} + +static void bdi_queue_work(struct backing_dev_info *bdi, + struct wb_writeback_work *work) +{ + trace_writeback_queue(bdi, work); + + spin_lock_bh(&bdi->wb_lock); + list_add_tail(&work->list, &bdi->work_list); + if (!bdi->wb.task) + trace_writeback_nothread(bdi, work); + bdi_wakeup_flusher(bdi); spin_unlock_bh(&bdi->wb_lock); } static void __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, - bool range_cyclic, bool for_background) + bool range_cyclic) { struct wb_writeback_work *work; @@ -126,7 +133,6 @@ __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, work->sync_mode = WB_SYNC_NONE; work->nr_pages = nr_pages; work->range_cyclic = range_cyclic; - work->for_background = for_background; bdi_queue_work(bdi, work); } @@ -144,7 +150,7 @@ __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, */ void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages) { - __bdi_start_writeback(bdi, nr_pages, true, false); + __bdi_start_writeback(bdi, nr_pages, true); } /** @@ -152,13 +158,20 @@ void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages) * @bdi: the backing device to write from * * Description: - * This does WB_SYNC_NONE background writeback. The IO is only - * started when this function returns, we make no guarentees on - * completion. Caller need not hold sb s_umount semaphore. + * This makes sure WB_SYNC_NONE background writeback happens. When + * this function returns, it is only guaranteed that for given BDI + * some IO is happening if we are over background dirty threshold. + * Caller need not hold sb s_umount semaphore. */ void bdi_start_background_writeback(struct backing_dev_info *bdi) { - __bdi_start_writeback(bdi, LONG_MAX, true, true); + /* + * We just wake up the flusher thread. It will perform background + * writeback as soon as there is no other work to do. + */ + spin_lock_bh(&bdi->wb_lock); + bdi_wakeup_flusher(bdi); + spin_unlock_bh(&bdi->wb_lock); } /* @@ -718,6 +731,23 @@ static unsigned long get_nr_dirty_pages(void) get_nr_dirty_inodes(); } +static long wb_check_background_flush(struct bdi_writeback *wb) +{ + if (over_bground_thresh()) { + + struct wb_writeback_work work = { + .nr_pages = LONG_MAX, + .sync_mode = WB_SYNC_NONE, + .for_background = 1, + .range_cyclic = 1, + }; + + return wb_writeback(wb, &work); + } + + return 0; +} + static long wb_check_old_data_flush(struct bdi_writeback *wb) { unsigned long expired; @@ -787,6 +817,7 @@ long wb_do_writeback(struct bdi_writeback *wb, int force_wait) * Check for periodic writeback, kupdated() style */ wrote += wb_check_old_data_flush(wb); + wrote += wb_check_background_flush(wb); clear_bit(BDI_writeback_running, &wb->bdi->state); return wrote; @@ -873,7 +904,7 @@ void wakeup_flusher_threads(long nr_pages) list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { if (!bdi_has_dirty_io(bdi)) continue; - __bdi_start_writeback(bdi, nr_pages, false, false); + __bdi_start_writeback(bdi, nr_pages, false); } rcu_read_unlock(); } -- cgit v1.2.2 From 71927e84e0aebfbe5a91565c3b207af25a4e9162 Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Thu, 13 Jan 2011 15:45:46 -0800 Subject: writeback: trace wakeup event for background writeback This tracks when balance_dirty_pages() tries to wakeup the flusher thread for background writeback (if it was not started already). Suggested-by: Christoph Hellwig Signed-off-by: Wu Fengguang Cc: Jan Kara Cc: Johannes Weiner Cc: Dave Chinner Cc: Jan Engelhardt Cc: Jens Axboe Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/fs-writeback.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 3a07f6d8bc0b..482de0a92ca7 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -169,6 +169,7 @@ void bdi_start_background_writeback(struct backing_dev_info *bdi) * We just wake up the flusher thread. It will perform background * writeback as soon as there is no other work to do. */ + trace_writeback_wake_background(bdi); spin_lock_bh(&bdi->wb_lock); bdi_wakeup_flusher(bdi); spin_unlock_bh(&bdi->wb_lock); -- cgit v1.2.2 From aa373cf550994623efb5d49a4d8775bafd10bbc1 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Thu, 13 Jan 2011 15:45:47 -0800 Subject: writeback: stop background/kupdate works from livelocking other works Background writeback is easily livelockable in a loop in wb_writeback() by a process continuously re-dirtying pages (or continuously appending to a file). This is in fact intended as the target of background writeback is to write dirty pages it can find as long as we are over dirty_background_threshold. But the above behavior gets inconvenient at times because no other work queued in the flusher thread's queue gets processed. In particular, since e.g. sync(1) relies on flusher thread to do all the IO for it, sync(1) can hang forever waiting for flusher thread to do the work. Generally, when a flusher thread has some work queued, someone submitted the work to achieve a goal more specific than what background writeback does. Moreover by working on the specific work, we also reduce amount of dirty pages which is exactly the target of background writeout. So it makes sense to give specific work a priority over a generic page cleaning. Thus we interrupt background writeback if there is some other work to do. We return to the background writeback after completing all the queued work. This may delay the writeback of expired inodes for a while, however the expired inodes will eventually be flushed to disk as long as the other works won't livelock. [fengguang.wu@intel.com: update comment] Signed-off-by: Jan Kara Signed-off-by: Wu Fengguang Cc: Johannes Weiner Cc: Dave Chinner Cc: Christoph Hellwig Cc: Jan Engelhardt Cc: Jens Axboe Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/fs-writeback.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'fs') diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 482de0a92ca7..9e72d04e706e 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -650,6 +650,16 @@ static long wb_writeback(struct bdi_writeback *wb, if (work->nr_pages <= 0) break; + /* + * Background writeout and kupdate-style writeback may + * run forever. Stop them if there is other work to do + * so that e.g. sync can proceed. They'll be restarted + * after the other works are all done. + */ + if ((work->for_background || work->for_kupdate) && + !list_empty(&wb->bdi->work_list)) + break; + /* * For background writeout, stop when we are below the * background dirty threshold -- cgit v1.2.2 From b9543dac5bbc4aef0a598965b6b34f6259ab9a9b Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Thu, 13 Jan 2011 15:45:48 -0800 Subject: writeback: avoid livelocking WB_SYNC_ALL writeback When wb_writeback() is called in WB_SYNC_ALL mode, work->nr_to_write is usually set to LONG_MAX. The logic in wb_writeback() then calls __writeback_inodes_sb() with nr_to_write == MAX_WRITEBACK_PAGES and we easily end up with non-positive nr_to_write after the function returns, if the inode has more than MAX_WRITEBACK_PAGES dirty pages at the moment. When nr_to_write is <= 0 wb_writeback() decides we need another round of writeback but this is wrong in some cases! For example when a single large file is continuously dirtied, we would never finish syncing it because each pass would be able to write MAX_WRITEBACK_PAGES and inode dirty timestamp never gets updated (as inode is never completely clean). Thus __writeback_inodes_sb() would write the redirtied inode again and again. Fix the issue by setting nr_to_write to LONG_MAX in WB_SYNC_ALL mode. We do not need nr_to_write in WB_SYNC_ALL mode anyway since write_cache_pages() does livelock avoidance using page tagging in WB_SYNC_ALL mode. This makes wb_writeback() call __writeback_inodes_sb() only once on WB_SYNC_ALL. The latter function won't livelock because it works on - a finite set of files by doing queue_io() once at the beginning - a finite set of pages by PAGECACHE_TAG_TOWRITE page tagging After this patch, program from http://lkml.org/lkml/2010/10/24/154 is no longer able to stall sync forever. [fengguang.wu@intel.com: fix locking comment] Signed-off-by: Jan Kara Signed-off-by: Wu Fengguang Cc: Johannes Weiner Cc: Dave Chinner Cc: Christoph Hellwig Cc: Jan Engelhardt Cc: Jens Axboe Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/fs-writeback.c | 27 +++++++++++++++++++++++---- 1 file changed, 23 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 9e72d04e706e..e8063c938dd2 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -630,6 +630,7 @@ static long wb_writeback(struct bdi_writeback *wb, }; unsigned long oldest_jif; long wrote = 0; + long write_chunk; struct inode *inode; if (wbc.for_kupdate) { @@ -642,6 +643,24 @@ static long wb_writeback(struct bdi_writeback *wb, wbc.range_end = LLONG_MAX; } + /* + * WB_SYNC_ALL mode does livelock avoidance by syncing dirty + * inodes/pages in one big loop. Setting wbc.nr_to_write=LONG_MAX + * here avoids calling into writeback_inodes_wb() more than once. + * + * The intended call sequence for WB_SYNC_ALL writeback is: + * + * wb_writeback() + * __writeback_inodes_sb() <== called only once + * write_cache_pages() <== called once for each inode + * (quickly) tag currently dirty pages + * (maybe slowly) sync all tagged pages + */ + if (wbc.sync_mode == WB_SYNC_NONE) + write_chunk = MAX_WRITEBACK_PAGES; + else + write_chunk = LONG_MAX; + wbc.wb_start = jiffies; /* livelock avoidance */ for (;;) { /* @@ -668,7 +687,7 @@ static long wb_writeback(struct bdi_writeback *wb, break; wbc.more_io = 0; - wbc.nr_to_write = MAX_WRITEBACK_PAGES; + wbc.nr_to_write = write_chunk; wbc.pages_skipped = 0; trace_wbc_writeback_start(&wbc, wb->bdi); @@ -678,8 +697,8 @@ static long wb_writeback(struct bdi_writeback *wb, writeback_inodes_wb(wb, &wbc); trace_wbc_writeback_written(&wbc, wb->bdi); - work->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write; - wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write; + work->nr_pages -= write_chunk - wbc.nr_to_write; + wrote += write_chunk - wbc.nr_to_write; /* * If we consumed everything, see if we have more @@ -694,7 +713,7 @@ static long wb_writeback(struct bdi_writeback *wb, /* * Did we write something? Try for more */ - if (wbc.nr_to_write < MAX_WRITEBACK_PAGES) + if (wbc.nr_to_write < write_chunk) continue; /* * Nothing written. Wait for some inode to -- cgit v1.2.2 From c691b9d983d7015d54057034f4cd9b6d8affd976 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Thu, 13 Jan 2011 15:45:48 -0800 Subject: sync_inode_metadata: fix comment Use correct function name, remove incorrect apostrophe Cc: Christoph Hellwig Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/fs-writeback.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index e8063c938dd2..05aab263e9aa 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -1303,11 +1303,11 @@ int sync_inode(struct inode *inode, struct writeback_control *wbc) EXPORT_SYMBOL(sync_inode); /** - * sync_inode - write an inode to disk + * sync_inode_metadata - write an inode to disk * @inode: the inode to sync * @wait: wait for I/O to complete. * - * Write an inode to disk and adjust it's dirty state after completion. + * Write an inode to disk and adjust its dirty state after completion. * * Note: only writes the actual inode, no associated data or other metadata. */ -- cgit v1.2.2 From c32b0d4b3f19c2f5d29568f8b7b72b61693f1277 Mon Sep 17 00:00:00 2001 From: Hai Shan Date: Thu, 13 Jan 2011 15:45:51 -0800 Subject: fs/mpage.c: consolidate code Merge mpage_end_io_read() and mpage_end_io_write() into mpage_end_io() to eliminate code duplication. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Hai Shan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/mpage.c | 49 +++++++++++++++++-------------------------------- 1 file changed, 17 insertions(+), 32 deletions(-) (limited to 'fs') diff --git a/fs/mpage.c b/fs/mpage.c index fd56ca2ea556..d78455a81ec9 100644 --- a/fs/mpage.c +++ b/fs/mpage.c @@ -40,7 +40,7 @@ * status of that page is hard. See end_buffer_async_read() for the details. * There is no point in duplicating all that complexity. */ -static void mpage_end_io_read(struct bio *bio, int err) +static void mpage_end_io(struct bio *bio, int err) { const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; @@ -50,44 +50,29 @@ static void mpage_end_io_read(struct bio *bio, int err) if (--bvec >= bio->bi_io_vec) prefetchw(&bvec->bv_page->flags); - - if (uptodate) { - SetPageUptodate(page); - } else { - ClearPageUptodate(page); - SetPageError(page); - } - unlock_page(page); - } while (bvec >= bio->bi_io_vec); - bio_put(bio); -} - -static void mpage_end_io_write(struct bio *bio, int err) -{ - const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); - struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; - - do { - struct page *page = bvec->bv_page; - - if (--bvec >= bio->bi_io_vec) - prefetchw(&bvec->bv_page->flags); - - if (!uptodate){ - SetPageError(page); - if (page->mapping) - set_bit(AS_EIO, &page->mapping->flags); + if (bio_data_dir(bio) == READ) { + if (uptodate) { + SetPageUptodate(page); + } else { + ClearPageUptodate(page); + SetPageError(page); + } + unlock_page(page); + } else { /* bio_data_dir(bio) == WRITE */ + if (!uptodate) { + SetPageError(page); + if (page->mapping) + set_bit(AS_EIO, &page->mapping->flags); + } + end_page_writeback(page); } - end_page_writeback(page); } while (bvec >= bio->bi_io_vec); bio_put(bio); } static struct bio *mpage_bio_submit(int rw, struct bio *bio) { - bio->bi_end_io = mpage_end_io_read; - if (rw == WRITE) - bio->bi_end_io = mpage_end_io_write; + bio->bi_end_io = mpage_end_io; submit_bio(rw, bio); return NULL; } -- cgit v1.2.2 From 2d90508f638241a2e7422d884767398296ebe720 Mon Sep 17 00:00:00 2001 From: Nikanth Karthikesan Date: Thu, 13 Jan 2011 15:45:53 -0800 Subject: mm: smaps: export mlock information Currently there is no way to find whether a process has locked its pages in memory or not. And which of the memory regions are locked in memory. Add a new field "Locked" to export this information via the smaps file. Signed-off-by: Nikanth Karthikesan Acked-by: Balbir Singh Acked-by: Wu Fengguang Cc: Matt Mackall Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/task_mmu.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index c3755bd8dd3e..60b914860f81 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -418,7 +418,8 @@ static int show_smap(struct seq_file *m, void *v) "Anonymous: %8lu kB\n" "Swap: %8lu kB\n" "KernelPageSize: %8lu kB\n" - "MMUPageSize: %8lu kB\n", + "MMUPageSize: %8lu kB\n" + "Locked: %8lu kB\n", (vma->vm_end - vma->vm_start) >> 10, mss.resident >> 10, (unsigned long)(mss.pss >> (10 + PSS_SHIFT)), @@ -430,7 +431,9 @@ static int show_smap(struct seq_file *m, void *v) mss.anonymous >> 10, mss.swap >> 10, vma_kernel_pagesize(vma) >> 10, - vma_mmu_pagesize(vma) >> 10); + vma_mmu_pagesize(vma) >> 10, + (vma->vm_flags & VM_LOCKED) ? + (unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0); if (m->count < m->size) /* vma is copied successfully */ m->version = (vma != get_gate_vma(task)) ? vma->vm_start : 0; -- cgit v1.2.2 From dabb16f639820267b3850d804571c70bd93d4e07 Mon Sep 17 00:00:00 2001 From: Mandeep Singh Baines Date: Thu, 13 Jan 2011 15:46:05 -0800 Subject: oom: allow a non-CAP_SYS_RESOURCE proces to oom_score_adj down We'd like to be able to oom_score_adj a process up/down as it enters/leaves the foreground. Currently, it is not possible to oom_adj down without CAP_SYS_RESOURCE. This patch allows a task to decrease its oom_score_adj back to the value that a CAP_SYS_RESOURCE thread set it to or its inherited value at fork. Assuming the thread that has forked it has oom_score_adj of 0, each process could decrease it back from 0 upon activation unless a CAP_SYS_RESOURCE thread elevated it to something higher. Alternative considered: * a setuid binary * a daemon with CAP_SYS_RESOURCE Since you don't wan't all processes to be able to reduce their oom_adj, a setuid or daemon implementation would be complex. The alternatives also have much higher overhead. This patch updated from original patch based on feedback from David Rientjes. Signed-off-by: Mandeep Singh Baines Acked-by: David Rientjes Cc: KAMEZAWA Hiroyuki Cc: KOSAKI Motohiro Cc: Rik van Riel Cc: Ying Han Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/base.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/proc/base.c b/fs/proc/base.c index 93f1cdd5d3d7..9d096e82b201 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -1151,7 +1151,7 @@ static ssize_t oom_score_adj_write(struct file *file, const char __user *buf, goto err_task_lock; } - if (oom_score_adj < task->signal->oom_score_adj && + if (oom_score_adj < task->signal->oom_score_adj_min && !capable(CAP_SYS_RESOURCE)) { err = -EACCES; goto err_sighand; @@ -1164,6 +1164,8 @@ static ssize_t oom_score_adj_write(struct file *file, const char __user *buf, atomic_dec(&task->mm->oom_disable_count); } task->signal->oom_score_adj = oom_score_adj; + if (has_capability_noaudit(current, CAP_SYS_RESOURCE)) + task->signal->oom_score_adj_min = oom_score_adj; /* * Scale /proc/pid/oom_adj appropriately ensuring that OOM_DISABLE is * always attainable. -- cgit v1.2.2 From 79134171df238171daa4c024a42b77b401ccb00b Mon Sep 17 00:00:00 2001 From: Andrea Arcangeli Date: Thu, 13 Jan 2011 15:46:58 -0800 Subject: thp: transparent hugepage vmstat Add hugepage stat information to /proc/vmstat and /proc/meminfo. Signed-off-by: Andrea Arcangeli Acked-by: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/meminfo.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c index a65239cfd97e..ed257d141568 100644 --- a/fs/proc/meminfo.c +++ b/fs/proc/meminfo.c @@ -100,6 +100,9 @@ static int meminfo_proc_show(struct seq_file *m, void *v) "VmallocChunk: %8lu kB\n" #ifdef CONFIG_MEMORY_FAILURE "HardwareCorrupted: %5lu kB\n" +#endif +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + "AnonHugePages: %8lu kB\n" #endif , K(i.totalram), @@ -128,7 +131,12 @@ static int meminfo_proc_show(struct seq_file *m, void *v) K(i.freeswap), K(global_page_state(NR_FILE_DIRTY)), K(global_page_state(NR_WRITEBACK)), - K(global_page_state(NR_ANON_PAGES)), + K(global_page_state(NR_ANON_PAGES) +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + + global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) * + HPAGE_PMD_NR +#endif + ), K(global_page_state(NR_FILE_MAPPED)), K(global_page_state(NR_SHMEM)), K(global_page_state(NR_SLAB_RECLAIMABLE) + @@ -150,6 +158,10 @@ static int meminfo_proc_show(struct seq_file *m, void *v) vmi.largest_chunk >> 10 #ifdef CONFIG_MEMORY_FAILURE ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10) +#endif +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) * + HPAGE_PMD_NR) #endif ); -- cgit v1.2.2 From 5f24ce5fd34c3ca1b3d10d30da754732da64d5c0 Mon Sep 17 00:00:00 2001 From: Andrea Arcangeli Date: Thu, 13 Jan 2011 15:47:00 -0800 Subject: thp: remove PG_buddy PG_buddy can be converted to _mapcount == -2. So the PG_compound_lock can be added to page->flags without overflowing (because of the sparse section bits increasing) with CONFIG_X86_PAE=y and CONFIG_X86_PAT=y. This also has to move the memory hotplug code from _mapcount to lru.next to avoid any risk of clashes. We can't use lru.next for PG_buddy removal, but memory hotplug can use lru.next even more easily than the mapcount instead. Signed-off-by: Andrea Arcangeli Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/page.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/proc/page.c b/fs/proc/page.c index b06c674624e6..6d8e6a9e93ab 100644 --- a/fs/proc/page.c +++ b/fs/proc/page.c @@ -116,15 +116,17 @@ u64 stable_page_flags(struct page *page) if (PageHuge(page)) u |= 1 << KPF_HUGE; - u |= kpf_copy_bit(k, KPF_LOCKED, PG_locked); - /* - * Caveats on high order pages: - * PG_buddy will only be set on the head page; SLUB/SLQB do the same - * for PG_slab; SLOB won't set PG_slab at all on compound pages. + * Caveats on high order pages: page->_count will only be set + * -1 on the head page; SLUB/SLQB do the same for PG_slab; + * SLOB won't set PG_slab at all on compound pages. */ + if (PageBuddy(page)) + u |= 1 << KPF_BUDDY; + + u |= kpf_copy_bit(k, KPF_LOCKED, PG_locked); + u |= kpf_copy_bit(k, KPF_SLAB, PG_slab); - u |= kpf_copy_bit(k, KPF_BUDDY, PG_buddy); u |= kpf_copy_bit(k, KPF_ERROR, PG_error); u |= kpf_copy_bit(k, KPF_DIRTY, PG_dirty); -- cgit v1.2.2 From cb9ef8d5e394f70db64bda79c20d3569a20d2574 Mon Sep 17 00:00:00 2001 From: Stefan Hajnoczi Date: Thu, 13 Jan 2011 15:47:26 -0800 Subject: fs/fs-writeback.c: fix sync_inodes_sb() return value kernel-doc The sync_inodes_sb() function does not have a return value. Remove the outdated documentation comment. Signed-off-by: Stefan Hajnoczi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/fs-writeback.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 05aab263e9aa..59c6e4956786 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -1225,7 +1225,7 @@ EXPORT_SYMBOL(writeback_inodes_sb_nr_if_idle); * @sb: the superblock * * This function writes and waits on any dirty inode belonging to this - * super_block. The number of pages synced is returned. + * super_block. */ void sync_inodes_sb(struct super_block *sb) { -- cgit v1.2.2 From 9ee1ba5402e9d35fb35f8e61c968f4987b5fb443 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Thu, 13 Jan 2011 17:08:19 -0500 Subject: nfsd4: initialize cb_per_client Otherwise a callback that is aborted before it runs will result in a list_del on an uninitialized list head. Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4callback.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index f1d9dd45553a..209e186386a0 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -866,6 +866,7 @@ void nfsd4_cb_recall(struct nfs4_delegation *dp) cb->cb_ops = &nfsd4_cb_recall_ops; dp->dl_retries = 1; + INIT_LIST_HEAD(&cb->cb_per_client); cb->cb_done = true; run_nfsd4_cb(&dp->dl_recall); -- cgit v1.2.2 From 9ce137eee4febaabca81143be07d4205d2bd52d4 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Tue, 11 Jan 2011 14:07:12 -0500 Subject: nfsd: don't support msnfs export option We've long had these pointless #ifdef MSNFS's sprinkled throughout the code--pointless because MSNFS is always defined (and we give no config option to make that easy to change). So we could just remove the ifdef's and compile the resulting code unconditionally. But as long as we're there: why not just rip out this code entirely? The only purpose is to implement the "msnfs" export option which turns on Windows-like behavior in some cases, and: - the export option isn't documented anywhere; - the userland utilities (which would need to be able to parse "msnfs" in an export file) don't support it; - I don't know how to maintain this, as I don't know what the proper behavior is; and - google shows no evidence that anyone has ever used this. Signed-off-by: J. Bruce Fields --- fs/nfsd/export.c | 4 ---- fs/nfsd/vfs.c | 41 ++--------------------------------------- 2 files changed, 2 insertions(+), 43 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c index c0fcb7ab7f6d..8b31e5f8795d 100644 --- a/fs/nfsd/export.c +++ b/fs/nfsd/export.c @@ -1,4 +1,3 @@ -#define MSNFS /* HACK HACK */ /* * NFS exporting and validation. * @@ -1444,9 +1443,6 @@ static struct flags { { NFSEXP_NOSUBTREECHECK, {"no_subtree_check", ""}}, { NFSEXP_NOAUTHNLM, {"insecure_locks", ""}}, { NFSEXP_V4ROOT, {"v4root", ""}}, -#ifdef MSNFS - { NFSEXP_MSNFS, {"msnfs", ""}}, -#endif { 0, {"", ""}} }; diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index b991125ce4a5..0a01e2fc5dda 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -1,4 +1,3 @@ -#define MSNFS /* HACK HACK */ /* * File operations used by nfsd. Some of these have been ripped from * other parts of the kernel because they weren't exported, others @@ -875,15 +874,6 @@ static int nfsd_direct_splice_actor(struct pipe_inode_info *pipe, return __splice_from_pipe(pipe, sd, nfsd_splice_actor); } -static inline int svc_msnfs(struct svc_fh *ffhp) -{ -#ifdef MSNFS - return (ffhp->fh_export->ex_flags & NFSEXP_MSNFS); -#else - return 0; -#endif -} - static __be32 nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, loff_t offset, struct kvec *vec, int vlen, unsigned long *count) @@ -896,9 +886,6 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, err = nfserr_perm; inode = file->f_path.dentry->d_inode; - if (svc_msnfs(fhp) && !lock_may_read(inode, offset, *count)) - goto out; - if (file->f_op->splice_read && rqstp->rq_splice_ok) { struct splice_desc sd = { .len = 0, @@ -923,7 +910,6 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, fsnotify_access(file); } else err = nfserrno(host_err); -out: return err; } @@ -988,14 +974,6 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, int stable = *stablep; int use_wgather; -#ifdef MSNFS - err = nfserr_perm; - - if ((fhp->fh_export->ex_flags & NFSEXP_MSNFS) && - (!lock_may_write(file->f_path.dentry->d_inode, offset, *cnt))) - goto out; -#endif - dentry = file->f_path.dentry; inode = dentry->d_inode; exp = fhp->fh_export; @@ -1046,7 +1024,6 @@ out_nfserr: err = 0; else err = nfserrno(host_err); -out: return err; } @@ -1751,13 +1728,6 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen, if (ndentry == trap) goto out_dput_new; - if (svc_msnfs(ffhp) && - ((atomic_read(&odentry->d_count) > 1) - || (atomic_read(&ndentry->d_count) > 1))) { - host_err = -EPERM; - goto out_dput_new; - } - host_err = -EXDEV; if (ffhp->fh_export->ex_path.mnt != tfhp->fh_export->ex_path.mnt) goto out_dput_new; @@ -1836,17 +1806,10 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, if (host_err) goto out_nfserr; - if (type != S_IFDIR) { /* It's UNLINK */ -#ifdef MSNFS - if ((fhp->fh_export->ex_flags & NFSEXP_MSNFS) && - (atomic_read(&rdentry->d_count) > 1)) { - host_err = -EPERM; - } else -#endif + if (type != S_IFDIR) host_err = vfs_unlink(dirp, rdentry); - } else { /* It's RMDIR */ + else host_err = vfs_rmdir(dirp, rdentry); - } dput(rdentry); -- cgit v1.2.2 From 6a76bebefe15d9a08864f824d7f8d5beaf37c997 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Tue, 11 Jan 2011 12:54:39 -0500 Subject: nfsd4: break lease on nfsd setattr Leases (delegations) should really be broken on any metadata change, not just on size change. Signed-off-by: J. Bruce Fields --- fs/nfsd/vfs.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 0a01e2fc5dda..f97d4356431b 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -374,14 +374,6 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap, goto out; } - /* - * If we are changing the size of the file, then - * we need to break all leases. - */ - host_err = break_lease(inode, O_WRONLY | O_NONBLOCK); - if (host_err) /* ENOMEM or EWOULDBLOCK */ - goto out_nfserr; - host_err = get_write_access(inode); if (host_err) goto out_nfserr; @@ -422,7 +414,11 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap, err = nfserr_notsync; if (!check_guard || guardtime == inode->i_ctime.tv_sec) { + host_err = break_lease(inode, O_WRONLY | O_NONBLOCK); + if (host_err) + goto out_nfserr; fh_lock(fhp); + host_err = notify_change(dentry, iap); err = nfserrno(host_err); fh_unlock(fhp); -- cgit v1.2.2 From 4795bb37effb7b8fe77e2d2034545d062d3788a8 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Tue, 11 Jan 2011 13:55:46 -0500 Subject: nfsd: break lease on unlink, link, and rename Any change to any of the links pointing to an entry should also break delegations. Signed-off-by: J. Bruce Fields --- fs/nfsd/vfs.c | 27 +++++++++++++++++++++++---- 1 file changed, 23 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index f97d4356431b..a110adbb3673 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -272,6 +272,13 @@ out: return err; } +static int nfsd_break_lease(struct inode *inode) +{ + if (!S_ISREG(inode->i_mode)) + return 0; + return break_lease(inode, O_WRONLY | O_NONBLOCK); +} + /* * Commit metadata changes to stable storage. */ @@ -414,7 +421,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap, err = nfserr_notsync; if (!check_guard || guardtime == inode->i_ctime.tv_sec) { - host_err = break_lease(inode, O_WRONLY | O_NONBLOCK); + host_err = nfsd_break_lease(inode); if (host_err) goto out_nfserr; fh_lock(fhp); @@ -1639,6 +1646,12 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp, err = nfserrno(host_err); goto out_dput; } + err = nfserr_noent; + if (!dold->d_inode) + goto out_drop_write; + host_err = nfsd_break_lease(dold->d_inode); + if (host_err) + goto out_drop_write; host_err = vfs_link(dold, dirp, dnew); if (!host_err) { err = nfserrno(commit_metadata(ffhp)); @@ -1650,6 +1663,7 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp, else err = nfserrno(host_err); } +out_drop_write: mnt_drop_write(tfhp->fh_export->ex_path.mnt); out_dput: dput(dnew); @@ -1731,15 +1745,17 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen, if (host_err) goto out_dput_new; + host_err = nfsd_break_lease(odentry->d_inode); + if (host_err) + goto out_drop_write; host_err = vfs_rename(fdir, odentry, tdir, ndentry); if (!host_err) { host_err = commit_metadata(tfhp); if (!host_err) host_err = commit_metadata(ffhp); } - +out_drop_write: mnt_drop_write(ffhp->fh_export->ex_path.mnt); - out_dput_new: dput(ndentry); out_dput_old: @@ -1802,11 +1818,14 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, if (host_err) goto out_nfserr; + host_err = nfsd_break_lease(rdentry->d_inode); + if (host_err) + goto out_put; if (type != S_IFDIR) host_err = vfs_unlink(dirp, rdentry); else host_err = vfs_rmdir(dirp, rdentry); - +out_put: dput(rdentry); if (!host_err) -- cgit v1.2.2 From bb20c18db6fbb5e6ba499c76473a487d35073467 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Fri, 14 Jan 2011 02:35:53 +0000 Subject: fs: force_reval_path drop rcu-walk before d_invalidate d_revalidate can return in rcu-walk mode even when it returns 0. We can't just call any old dcache function on rcu-walk dentry (the dentry is unstable, so even through d_lock can safely be taken, the result may no longer be what we expect -- careful re-checks would be required). So just drop rcu in this case. (I missed this conversion when switching to the rcu-walk convention that Linus suggested) Signed-off-by: Nick Piggin --- fs/namei.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index 19433cdba011..0f02359ce685 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -583,6 +583,13 @@ void release_open_intent(struct nameidata *nd) fput(nd->intent.open.file); } +/* + * Call d_revalidate and handle filesystems that request rcu-walk + * to be dropped. This may be called and return in rcu-walk mode, + * regardless of success or error. If -ECHILD is returned, the caller + * must return -ECHILD back up the path walk stack so path walk may + * be restarted in ref-walk mode. + */ static int d_revalidate(struct dentry *dentry, struct nameidata *nd) { int status; @@ -673,6 +680,9 @@ force_reval_path(struct path *path, struct nameidata *nd) return 0; if (!status) { + /* Don't d_invalidate in rcu-walk mode */ + if (nameidata_drop_rcu(nd)) + return -ECHILD; d_invalidate(dentry); status = -ESTALE; } -- cgit v1.2.2 From 90dbb77ba48dddb87445d238e84cd137cf97dd98 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Fri, 14 Jan 2011 02:36:19 +0000 Subject: fs: fix dropping of rcu-walk from force_reval_path As J. R. Okajima noted, force_reval_path passes in the same dentry to d_revalidate as the one in the nameidata structure (other callers pass in a child), so the locking breaks. This can oops with a chrooted nfs mount, for example. Similarly there can be other problems with revalidating a dentry which is already in nameidata of the path walk. Signed-off-by: Nick Piggin --- fs/namei.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index 0f02359ce685..14c73edca9ce 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -479,6 +479,14 @@ static int nameidata_dentry_drop_rcu(struct nameidata *nd, struct dentry *dentry struct fs_struct *fs = current->fs; struct dentry *parent = nd->path.dentry; + /* + * It can be possible to revalidate the dentry that we started + * the path walk with. force_reval_path may also revalidate the + * dentry already committed to the nameidata. + */ + if (unlikely(parent == dentry)) + return nameidata_drop_rcu(nd); + BUG_ON(!(nd->flags & LOOKUP_RCU)); if (nd->root.mnt) { spin_lock(&fs->lock); -- cgit v1.2.2 From 657e94b673a805b427903c5628e95348235fad06 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Fri, 14 Jan 2011 02:48:39 +0000 Subject: nfs: add missing rcu-walk check Signed-off-by: Nick Piggin --- fs/nfs/dir.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index d33da530097a..a0d8320bed9c 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1410,11 +1410,15 @@ no_open: static int nfs_open_revalidate(struct dentry *dentry, struct nameidata *nd) { struct dentry *parent = NULL; - struct inode *inode = dentry->d_inode; + struct inode *inode; struct inode *dir; struct nfs_open_context *ctx; int openflags, ret = 0; + if (nd->flags & LOOKUP_RCU) + return -ECHILD; + + inode = dentry->d_inode; if (!is_atomic_open(nd) || d_mountpoint(dentry)) goto no_open; -- cgit v1.2.2 From f20877d94a74557b7c28b4ed8920d834c31e0ea5 Mon Sep 17 00:00:00 2001 From: "J. R. Okajima" Date: Fri, 14 Jan 2011 03:56:04 +0000 Subject: fs: fix do_last error case when need_reval_dot When open(2) without O_DIRECTORY opens an existing dir, it should return EISDIR. In do_last(), the variable 'error' is initialized EISDIR, but it is changed by d_revalidate() which returns any positive to represent 'the target dir is valid.' Should we keep and return the initialized 'error' in this case. Signed-off-by: Nick Piggin --- fs/namei.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index 14c73edca9ce..bc24894c5f14 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -2122,11 +2122,13 @@ static struct file *do_last(struct nameidata *nd, struct path *path, dir = nd->path.dentry; case LAST_DOT: if (need_reval_dot(dir)) { - error = d_revalidate(nd->path.dentry, nd); - if (!error) - error = -ESTALE; - if (error < 0) + int status = d_revalidate(nd->path.dentry, nd); + if (!status) + status = -ESTALE; + if (status < 0) { + error = status; goto exit; + } } /* fallthrough */ case LAST_ROOT: -- cgit v1.2.2 From 7b9337aaf98f9941d0927a75217d3ff31afec609 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Fri, 14 Jan 2011 08:42:43 +0000 Subject: fs: namei fix ->put_link on wrong inode in do_filp_open J. R. Okajima noticed that ->put_link is being attempted on the wrong inode, and suggested the way to fix it. I changed it a bit according to Al's suggestion to keep an explicit link path around. Signed-off-by: Nick Piggin --- fs/namei.c | 37 +++++++++++++++++++------------------ 1 file changed, 19 insertions(+), 18 deletions(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index bc24894c5f14..9cda4c452a6d 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -779,7 +779,8 @@ static void path_put_conditional(struct path *path, struct nameidata *nd) mntput(path->mnt); } -static inline void path_to_nameidata(struct path *path, struct nameidata *nd) +static inline void path_to_nameidata(const struct path *path, + struct nameidata *nd) { if (!(nd->flags & LOOKUP_RCU)) { dput(nd->path.dentry); @@ -791,20 +792,20 @@ static inline void path_to_nameidata(struct path *path, struct nameidata *nd) } static __always_inline int -__do_follow_link(struct path *path, struct nameidata *nd, void **p) +__do_follow_link(const struct path *link, struct nameidata *nd, void **p) { int error; - struct dentry *dentry = path->dentry; + struct dentry *dentry = link->dentry; - touch_atime(path->mnt, dentry); + touch_atime(link->mnt, dentry); nd_set_link(nd, NULL); - if (path->mnt != nd->path.mnt) { - path_to_nameidata(path, nd); + if (link->mnt != nd->path.mnt) { + path_to_nameidata(link, nd); nd->inode = nd->path.dentry->d_inode; dget(dentry); } - mntget(path->mnt); + mntget(link->mnt); nd->last_type = LAST_BIND; *p = dentry->d_inode->i_op->follow_link(dentry, nd); @@ -2347,11 +2348,12 @@ reval: nd.flags = flags; filp = do_last(&nd, &path, open_flag, acc_mode, mode, pathname); while (unlikely(!filp)) { /* trailing symlink */ - struct path holder; + struct path link = path; + struct inode *linki = link.dentry->d_inode; void *cookie; error = -ELOOP; /* S_ISDIR part is a temporary automount kludge */ - if (!(nd.flags & LOOKUP_FOLLOW) && !S_ISDIR(nd.inode->i_mode)) + if (!(nd.flags & LOOKUP_FOLLOW) && !S_ISDIR(linki->i_mode)) goto exit_dput; if (count++ == 32) goto exit_dput; @@ -2367,23 +2369,22 @@ reval: * just set LAST_BIND. */ nd.flags |= LOOKUP_PARENT; - error = security_inode_follow_link(path.dentry, &nd); + error = security_inode_follow_link(link.dentry, &nd); if (error) goto exit_dput; - error = __do_follow_link(&path, &nd, &cookie); + error = __do_follow_link(&link, &nd, &cookie); if (unlikely(error)) { - if (!IS_ERR(cookie) && nd.inode->i_op->put_link) - nd.inode->i_op->put_link(path.dentry, &nd, cookie); + if (!IS_ERR(cookie) && linki->i_op->put_link) + linki->i_op->put_link(link.dentry, &nd, cookie); /* nd.path had been dropped */ - nd.path = path; + nd.path = link; goto out_path; } - holder = path; nd.flags &= ~LOOKUP_PARENT; filp = do_last(&nd, &path, open_flag, acc_mode, mode, pathname); - if (nd.inode->i_op->put_link) - nd.inode->i_op->put_link(holder.dentry, &nd, cookie); - path_put(&holder); + if (linki->i_op->put_link) + linki->i_op->put_link(link.dentry, &nd, cookie); + path_put(&link); } out: if (nd.root.mnt) -- cgit v1.2.2 From ba28b93a5227cc69ec811507f7d85ac25fa20fe2 Mon Sep 17 00:00:00 2001 From: Akshat Aranya Date: Fri, 14 Jan 2011 16:00:47 +0000 Subject: FS-Cache: Fix operation handling fscache_submit_exclusive_op() adds an operation to the pending list if other operations are pending. Fix the check for pending ops as n_ops must be greater than 0 at the point it is checked as it is incremented immediately before under lock. Signed-off-by: Akshat Aranya Signed-off-by: David Howells Signed-off-by: Linus Torvalds --- fs/fscache/operation.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c index b9f34eaede09..48a18f184d50 100644 --- a/fs/fscache/operation.c +++ b/fs/fscache/operation.c @@ -101,7 +101,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object, object->n_ops++; object->n_exclusive++; /* reads and writes must wait */ - if (object->n_ops > 0) { + if (object->n_ops > 1) { atomic_inc(&op->usage); list_add_tail(&op->pend_link, &object->pending_ops); fscache_stat(&fscache_n_op_pend); -- cgit v1.2.2 From 0ad53eeefcbb2620b6a71ffdaad4add20b450b8b Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 14 Jan 2011 15:56:37 +0000 Subject: afs: add afs_wq and use it instead of the system workqueue flush_scheduled_work() is going away. afs needs to make sure all the works it has queued have finished before being unloaded and there can be arbitrary number of pending works. Add afs_wq and use it as the flush domain instead of the system workqueue. Also, convert cancel_delayed_work() + flush_scheduled_work() to cancel_delayed_work_sync() in afs_mntpt_kill_timer(). Signed-off-by: Tejun Heo Signed-off-by: David Howells Cc: linux-afs@lists.infradead.org Signed-off-by: Linus Torvalds --- fs/afs/cmservice.c | 12 ++++++------ fs/afs/internal.h | 1 + fs/afs/main.c | 13 +++++++++++-- fs/afs/mntpt.c | 11 +++++------ fs/afs/rxrpc.c | 2 +- fs/afs/server.c | 13 +++++++------ fs/afs/vlocation.c | 14 +++++++------- 7 files changed, 38 insertions(+), 28 deletions(-) (limited to 'fs') diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c index a3bcec75c54a..1c8c6cc6de30 100644 --- a/fs/afs/cmservice.c +++ b/fs/afs/cmservice.c @@ -289,7 +289,7 @@ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb, call->server = server; INIT_WORK(&call->work, SRXAFSCB_CallBack); - schedule_work(&call->work); + queue_work(afs_wq, &call->work); return 0; } @@ -336,7 +336,7 @@ static int afs_deliver_cb_init_call_back_state(struct afs_call *call, call->server = server; INIT_WORK(&call->work, SRXAFSCB_InitCallBackState); - schedule_work(&call->work); + queue_work(afs_wq, &call->work); return 0; } @@ -367,7 +367,7 @@ static int afs_deliver_cb_init_call_back_state3(struct afs_call *call, call->server = server; INIT_WORK(&call->work, SRXAFSCB_InitCallBackState); - schedule_work(&call->work); + queue_work(afs_wq, &call->work); return 0; } @@ -400,7 +400,7 @@ static int afs_deliver_cb_probe(struct afs_call *call, struct sk_buff *skb, call->state = AFS_CALL_REPLYING; INIT_WORK(&call->work, SRXAFSCB_Probe); - schedule_work(&call->work); + queue_work(afs_wq, &call->work); return 0; } @@ -496,7 +496,7 @@ static int afs_deliver_cb_probe_uuid(struct afs_call *call, struct sk_buff *skb, call->state = AFS_CALL_REPLYING; INIT_WORK(&call->work, SRXAFSCB_ProbeUuid); - schedule_work(&call->work); + queue_work(afs_wq, &call->work); return 0; } @@ -580,6 +580,6 @@ static int afs_deliver_cb_tell_me_about_yourself(struct afs_call *call, call->state = AFS_CALL_REPLYING; INIT_WORK(&call->work, SRXAFSCB_TellMeAboutYourself); - schedule_work(&call->work); + queue_work(afs_wq, &call->work); return 0; } diff --git a/fs/afs/internal.h b/fs/afs/internal.h index ab6db5abaf53..58c633b80246 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -577,6 +577,7 @@ extern int afs_drop_inode(struct inode *); /* * main.c */ +extern struct workqueue_struct *afs_wq; extern struct afs_uuid afs_uuid; /* diff --git a/fs/afs/main.c b/fs/afs/main.c index cfd1cbe25b22..42dd2e499ed8 100644 --- a/fs/afs/main.c +++ b/fs/afs/main.c @@ -30,6 +30,7 @@ module_param(rootcell, charp, 0); MODULE_PARM_DESC(rootcell, "root AFS cell name and VL server IP addr list"); struct afs_uuid afs_uuid; +struct workqueue_struct *afs_wq; /* * get a client UUID @@ -87,10 +88,16 @@ static int __init afs_init(void) if (ret < 0) return ret; + /* create workqueue */ + ret = -ENOMEM; + afs_wq = alloc_workqueue("afs", 0, 0); + if (!afs_wq) + return ret; + /* register the /proc stuff */ ret = afs_proc_init(); if (ret < 0) - return ret; + goto error_proc; #ifdef CONFIG_AFS_FSCACHE /* we want to be able to cache */ @@ -140,6 +147,8 @@ error_cell_init: error_cache: #endif afs_proc_cleanup(); +error_proc: + destroy_workqueue(afs_wq); rcu_barrier(); printk(KERN_ERR "kAFS: failed to register: %d\n", ret); return ret; @@ -163,7 +172,7 @@ static void __exit afs_exit(void) afs_purge_servers(); afs_callback_update_kill(); afs_vlocation_purge(); - flush_scheduled_work(); + destroy_workqueue(afs_wq); afs_cell_purge(); #ifdef CONFIG_AFS_FSCACHE fscache_unregister_netfs(&afs_cache_netfs); diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c index 6153417caf57..e83c0336e7b5 100644 --- a/fs/afs/mntpt.c +++ b/fs/afs/mntpt.c @@ -268,8 +268,8 @@ static void *afs_mntpt_follow_link(struct dentry *dentry, struct nameidata *nd) path_put(&nd->path); nd->path.mnt = newmnt; nd->path.dentry = dget(newmnt->mnt_root); - schedule_delayed_work(&afs_mntpt_expiry_timer, - afs_mntpt_expiry_timeout * HZ); + queue_delayed_work(afs_wq, &afs_mntpt_expiry_timer, + afs_mntpt_expiry_timeout * HZ); break; case -EBUSY: /* someone else made a mount here whilst we were busy */ @@ -295,8 +295,8 @@ static void afs_mntpt_expiry_timed_out(struct work_struct *work) if (!list_empty(&afs_vfsmounts)) { mark_mounts_for_expiry(&afs_vfsmounts); - schedule_delayed_work(&afs_mntpt_expiry_timer, - afs_mntpt_expiry_timeout * HZ); + queue_delayed_work(afs_wq, &afs_mntpt_expiry_timer, + afs_mntpt_expiry_timeout * HZ); } _leave(""); @@ -310,6 +310,5 @@ void afs_mntpt_kill_timer(void) _enter(""); ASSERT(list_empty(&afs_vfsmounts)); - cancel_delayed_work(&afs_mntpt_expiry_timer); - flush_scheduled_work(); + cancel_delayed_work_sync(&afs_mntpt_expiry_timer); } diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index 654d8fdbf01f..e45a323aebb4 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -410,7 +410,7 @@ static void afs_rx_interceptor(struct sock *sk, unsigned long user_call_ID, if (!call) { /* its an incoming call for our callback service */ skb_queue_tail(&afs_incoming_calls, skb); - schedule_work(&afs_collect_incoming_call_work); + queue_work(afs_wq, &afs_collect_incoming_call_work); } else { /* route the messages directly to the appropriate call */ skb_queue_tail(&call->rx_queue, skb); diff --git a/fs/afs/server.c b/fs/afs/server.c index 9fdc7fe3a7bc..d59b7516e943 100644 --- a/fs/afs/server.c +++ b/fs/afs/server.c @@ -238,8 +238,8 @@ void afs_put_server(struct afs_server *server) if (atomic_read(&server->usage) == 0) { list_move_tail(&server->grave, &afs_server_graveyard); server->time_of_death = get_seconds(); - schedule_delayed_work(&afs_server_reaper, - afs_server_timeout * HZ); + queue_delayed_work(afs_wq, &afs_server_reaper, + afs_server_timeout * HZ); } spin_unlock(&afs_server_graveyard_lock); _leave(" [dead]"); @@ -285,10 +285,11 @@ static void afs_reap_server(struct work_struct *work) expiry = server->time_of_death + afs_server_timeout; if (expiry > now) { delay = (expiry - now) * HZ; - if (!schedule_delayed_work(&afs_server_reaper, delay)) { + if (!queue_delayed_work(afs_wq, &afs_server_reaper, + delay)) { cancel_delayed_work(&afs_server_reaper); - schedule_delayed_work(&afs_server_reaper, - delay); + queue_delayed_work(afs_wq, &afs_server_reaper, + delay); } break; } @@ -323,5 +324,5 @@ void __exit afs_purge_servers(void) { afs_server_timeout = 0; cancel_delayed_work(&afs_server_reaper); - schedule_delayed_work(&afs_server_reaper, 0); + queue_delayed_work(afs_wq, &afs_server_reaper, 0); } diff --git a/fs/afs/vlocation.c b/fs/afs/vlocation.c index 9ac260d1361d..431984d2e372 100644 --- a/fs/afs/vlocation.c +++ b/fs/afs/vlocation.c @@ -507,8 +507,8 @@ void afs_put_vlocation(struct afs_vlocation *vl) _debug("buried"); list_move_tail(&vl->grave, &afs_vlocation_graveyard); vl->time_of_death = get_seconds(); - schedule_delayed_work(&afs_vlocation_reap, - afs_vlocation_timeout * HZ); + queue_delayed_work(afs_wq, &afs_vlocation_reap, + afs_vlocation_timeout * HZ); /* suspend updates on this record */ if (!list_empty(&vl->update)) { @@ -561,11 +561,11 @@ static void afs_vlocation_reaper(struct work_struct *work) if (expiry > now) { delay = (expiry - now) * HZ; _debug("delay %lu", delay); - if (!schedule_delayed_work(&afs_vlocation_reap, - delay)) { + if (!queue_delayed_work(afs_wq, &afs_vlocation_reap, + delay)) { cancel_delayed_work(&afs_vlocation_reap); - schedule_delayed_work(&afs_vlocation_reap, - delay); + queue_delayed_work(afs_wq, &afs_vlocation_reap, + delay); } break; } @@ -620,7 +620,7 @@ void afs_vlocation_purge(void) destroy_workqueue(afs_vlocation_update_worker); cancel_delayed_work(&afs_vlocation_reap); - schedule_delayed_work(&afs_vlocation_reap, 0); + queue_delayed_work(afs_wq, &afs_vlocation_reap, 0); } /* -- cgit v1.2.2 From 49731baa41df404c2c3f44555869ab387363af43 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 14 Jan 2011 18:43:57 +0100 Subject: block: restore multiple bd_link_disk_holder() support Commit e09b457b (block: simplify holder symlink handling) incorrectly assumed that there is only one link at maximum. dm may use multiple links and expects block layer to track reference count for each link, which is different from and unrelated to the exclusive device holder identified by @holder when the device is opened. Remove the single holder assumption and automatic removal of the link and revive the per-link reference count tracking. The code essentially behaves the same as before commit e09b457b sans the unnecessary kobject reference count dancing. While at it, note that this facility should not be used by anyone else than the current ones. Sysfs symlinks shouldn't be abused like this and the whole thing doesn't belong in the block layer at all. Signed-off-by: Tejun Heo Reported-by: Milan Broz Cc: Jun'ichi Nomura Cc: Neil Brown Cc: linux-raid@vger.kernel.org Cc: Kay Sievers Signed-off-by: Jens Axboe --- fs/block_dev.c | 93 ++++++++++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 75 insertions(+), 18 deletions(-) (limited to 'fs') diff --git a/fs/block_dev.c b/fs/block_dev.c index fe3f59c14a02..333a7bb4cb9c 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -432,6 +432,9 @@ static void init_once(void *foo) mutex_init(&bdev->bd_mutex); INIT_LIST_HEAD(&bdev->bd_inodes); INIT_LIST_HEAD(&bdev->bd_list); +#ifdef CONFIG_SYSFS + INIT_LIST_HEAD(&bdev->bd_holder_disks); +#endif inode_init_once(&ei->vfs_inode); /* Initialize mutex for freeze. */ mutex_init(&bdev->bd_fsfreeze_mutex); @@ -779,6 +782,23 @@ static struct block_device *bd_start_claiming(struct block_device *bdev, } #ifdef CONFIG_SYSFS +struct bd_holder_disk { + struct list_head list; + struct gendisk *disk; + int refcnt; +}; + +static struct bd_holder_disk *bd_find_holder_disk(struct block_device *bdev, + struct gendisk *disk) +{ + struct bd_holder_disk *holder; + + list_for_each_entry(holder, &bdev->bd_holder_disks, list) + if (holder->disk == disk) + return holder; + return NULL; +} + static int add_symlink(struct kobject *from, struct kobject *to) { return sysfs_create_link(from, to, kobject_name(to)); @@ -794,6 +814,8 @@ static void del_symlink(struct kobject *from, struct kobject *to) * @bdev: the claimed slave bdev * @disk: the holding disk * + * DON'T USE THIS UNLESS YOU'RE ALREADY USING IT. + * * This functions creates the following sysfs symlinks. * * - from "slaves" directory of the holder @disk to the claimed @bdev @@ -817,47 +839,83 @@ static void del_symlink(struct kobject *from, struct kobject *to) */ int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk) { + struct bd_holder_disk *holder; int ret = 0; mutex_lock(&bdev->bd_mutex); - WARN_ON_ONCE(!bdev->bd_holder || bdev->bd_holder_disk); + WARN_ON_ONCE(!bdev->bd_holder); /* FIXME: remove the following once add_disk() handles errors */ if (WARN_ON(!disk->slave_dir || !bdev->bd_part->holder_dir)) goto out_unlock; - ret = add_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj); - if (ret) + holder = bd_find_holder_disk(bdev, disk); + if (holder) { + holder->refcnt++; goto out_unlock; + } - ret = add_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj); - if (ret) { - del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj); + holder = kzalloc(sizeof(*holder), GFP_KERNEL); + if (!holder) { + ret = -ENOMEM; goto out_unlock; } - bdev->bd_holder_disk = disk; + INIT_LIST_HEAD(&holder->list); + holder->disk = disk; + holder->refcnt = 1; + + ret = add_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj); + if (ret) + goto out_free; + + ret = add_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj); + if (ret) + goto out_del; + + list_add(&holder->list, &bdev->bd_holder_disks); + goto out_unlock; + +out_del: + del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj); +out_free: + kfree(holder); out_unlock: mutex_unlock(&bdev->bd_mutex); return ret; } EXPORT_SYMBOL_GPL(bd_link_disk_holder); -static void bd_unlink_disk_holder(struct block_device *bdev) +/** + * bd_unlink_disk_holder - destroy symlinks created by bd_link_disk_holder() + * @bdev: the calimed slave bdev + * @disk: the holding disk + * + * DON'T USE THIS UNLESS YOU'RE ALREADY USING IT. + * + * CONTEXT: + * Might sleep. + */ +void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk) { - struct gendisk *disk = bdev->bd_holder_disk; + struct bd_holder_disk *holder; - bdev->bd_holder_disk = NULL; - if (!disk) - return; + mutex_lock(&bdev->bd_mutex); - del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj); - del_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj); + holder = bd_find_holder_disk(bdev, disk); + + if (!WARN_ON_ONCE(holder == NULL) && !--holder->refcnt) { + del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj); + del_symlink(bdev->bd_part->holder_dir, + &disk_to_dev(disk)->kobj); + list_del_init(&holder->list); + kfree(holder); + } + + mutex_unlock(&bdev->bd_mutex); } -#else -static inline void bd_unlink_disk_holder(struct block_device *bdev) -{ } +EXPORT_SYMBOL_GPL(bd_unlink_disk_holder); #endif /** @@ -1380,7 +1438,6 @@ int blkdev_put(struct block_device *bdev, fmode_t mode) * unblock evpoll if it was a write holder. */ if (bdev_free) { - bd_unlink_disk_holder(bdev); if (bdev->bd_write_holder) { disk_unblock_events(bdev->bd_disk); bdev->bd_write_holder = false; -- cgit v1.2.2 From 56c24305d1494a7e345c75669dc60e8b231b735b Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Tue, 11 Jan 2011 07:24:25 -0500 Subject: cifs: cFYI the entire error code in map_smb_to_linux_error We currently only print the DOS error part. Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/netmisc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c index 9aad47a2d62f..6783ce6cdc89 100644 --- a/fs/cifs/netmisc.c +++ b/fs/cifs/netmisc.c @@ -899,8 +899,8 @@ map_smb_to_linux_error(struct smb_hdr *smb, int logErr) } /* else ERRHRD class errors or junk - return EIO */ - cFYI(1, "Mapping smb error code %d to POSIX err %d", - smberrcode, rc); + cFYI(1, "Mapping smb error code 0x%x to POSIX err %d", + le32_to_cpu(smb->Status.CifsError), rc); /* generic corrective action e.g. reconnect SMB session on * ERRbaduid could be added */ -- cgit v1.2.2 From bd7633195581c7665ce9dd80c665ec93466d1b64 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Tue, 11 Jan 2011 10:33:24 -0500 Subject: cifs: add cruid= mount option In commit 3e4b3e1f we separated the "uid" mount option such that it no longer determined the owner of the credential cache by default. When we did this, we added a new option to cifs.upcall (--legacy-uid) to try to make it so that it would behave the same was as it did before. This ignored a rather important point -- the kernel has no way to know what options are being passed to cifs.upcall, so it doesn't know what uid it should use to determine whether to match an existing krb5 session. The simplest solution is to simply add a new "cruid=" mount option that only governs the uid owner of the credential cache for the mount. Unfortunately, this means that the --legacy-uid option in cifs.upcall was ill-considered and is now useless, but I don't see a better way to deal with this. A patch for the mount.cifs manpage will follow once this patch has been accepted. Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/connect.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs') diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index a65d311d163a..9f59887badd2 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -1113,6 +1113,8 @@ cifs_parse_mount_options(char *options, const char *devname, } else if (!strnicmp(data, "uid", 3) && value && *value) { vol->linux_uid = simple_strtoul(value, &value, 0); uid_specified = true; + } else if (!strnicmp(data, "cruid", 5) && value && *value) { + vol->cred_uid = simple_strtoul(value, &value, 0); } else if (!strnicmp(data, "forceuid", 8)) { override_uid = 1; } else if (!strnicmp(data, "noforceuid", 10)) { -- cgit v1.2.2 From a8f2800b4f7b76cecb7209cb6a7d2b14904fc711 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Fri, 14 Jan 2011 14:25:48 -0500 Subject: nfsd4: fix callback restarting Ensure a new callback is added to the client's list of callbacks at most once. Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4callback.c | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index 209e186386a0..ae93c5c83e87 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -639,9 +639,12 @@ static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata) if (!nfsd41_cb_get_slot(clp, task)) return; } - cb->cb_done = false; spin_lock(&clp->cl_lock); - list_add(&cb->cb_per_client, &clp->cl_callbacks); + if (list_empty(&cb->cb_per_client)) { + /* This is the first call, not a restart */ + cb->cb_done = false; + list_add(&cb->cb_per_client, &clp->cl_callbacks); + } spin_unlock(&clp->cl_lock); rpc_call_start(task); } @@ -678,10 +681,10 @@ static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata) nfsd4_cb_done(task, calldata); - if (current_rpc_client == NULL) { - /* We're shutting down; give up. */ - /* XXX: err, or is it ok just to fall through - * and rpc_restart_call? */ + if (current_rpc_client != task->tk_client) { + /* We're shutting down or changing cl_cb_client; leave + * it to nfsd4_process_cb_update to restart the call if + * necessary. */ return; } @@ -699,12 +702,6 @@ static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata) default: /* Network partition? */ nfsd4_mark_cb_down(clp, task->tk_status); - if (current_rpc_client != task->tk_client) { - /* queue a callback on the new connection: */ - atomic_inc(&dp->dl_count); - run_nfsd4_cb(&dp->dl_recall); - return; - } } if (dp->dl_retries--) { rpc_delay(task, 2*HZ); -- cgit v1.2.2 From 6f7f7caab259026234277b659485d22c1dcb1ab4 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Fri, 14 Jan 2011 13:26:18 -0800 Subject: Turn d_set_d_op() BUG_ON() into WARN_ON_ONCE() It's indicative of a real problem, and it actually triggers with autofs4, but the BUG_ON() is excessive. The autofs4 case is being fixed (to only set d_op in the ->lookup method) but not merged yet. In the meantime this gets the code limping along. Reported-by: Alex Elder Cc: Ian Kent Cc: Nick Piggin Cc: Al Viro Signed-off-by: Linus Torvalds --- fs/dcache.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/dcache.c b/fs/dcache.c index 0c6d5c549d84..274a22250380 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -1357,8 +1357,8 @@ EXPORT_SYMBOL(d_alloc_name); void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op) { - BUG_ON(dentry->d_op); - BUG_ON(dentry->d_flags & (DCACHE_OP_HASH | + WARN_ON_ONCE(dentry->d_op); + WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH | DCACHE_OP_COMPARE | DCACHE_OP_REVALIDATE | DCACHE_OP_DELETE )); -- cgit v1.2.2 From 1a8edf40e7c3eee955e0dd0316a7c9d85e36f597 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 15 Jan 2011 13:12:53 -0500 Subject: do_lookup() fix do_lookup() has a path leading from LOOKUP_RCU case to non-RCU crossing of mountpoints, which breaks things badly. If we hit need_revalidate: and do nothing in there, we need to come back into LOOKUP_RCU half of things, not to done: in non-RCU one. Signed-off-by: Al Viro --- fs/namei.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index 8df7a78ace58..529e917ad2fc 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1089,6 +1089,7 @@ static int do_lookup(struct nameidata *nd, struct qstr *name, nd->seq = seq; if (dentry->d_flags & DCACHE_OP_REVALIDATE) goto need_revalidate; +done2: path->mnt = mnt; path->dentry = dentry; __follow_mount_rcu(nd, path, inode); @@ -1143,6 +1144,8 @@ need_revalidate: goto need_lookup; if (IS_ERR(dentry)) goto fail; + if (nd->flags & LOOKUP_RCU) + goto done2; goto done; fail: -- cgit v1.2.2 From 9875cf806403fae66b2410a3c2cc820d97731e04 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Jan 2011 18:45:21 +0000 Subject: Add a dentry op to handle automounting rather than abusing follow_link() Add a dentry op (d_automount) to handle automounting directories rather than abusing the follow_link() inode operation. The operation is keyed off a new dentry flag (DCACHE_NEED_AUTOMOUNT). This also makes it easier to add an AT_ flag to suppress terminal segment automount during pathwalk and removes the need for the kludge code in the pathwalk algorithm to handle directories with follow_link() semantics. The ->d_automount() dentry operation: struct vfsmount *(*d_automount)(struct path *mountpoint); takes a pointer to the directory to be mounted upon, which is expected to provide sufficient data to determine what should be mounted. If successful, it should return the vfsmount struct it creates (which it should also have added to the namespace using do_add_mount() or similar). If there's a collision with another automount attempt, NULL should be returned. If the directory specified by the parameter should be used directly rather than being mounted upon, -EISDIR should be returned. In any other case, an error code should be returned. The ->d_automount() operation is called with no locks held and may sleep. At this point the pathwalk algorithm will be in ref-walk mode. Within fs/namei.c itself, a new pathwalk subroutine (follow_automount()) is added to handle mountpoints. It will return -EREMOTE if the automount flag was set, but no d_automount() op was supplied, -ELOOP if we've encountered too many symlinks or mountpoints, -EISDIR if the walk point should be used without mounting and 0 if successful. The path will be updated to point to the mounted filesystem if a successful automount took place. __follow_mount() is replaced by follow_managed() which is more generic (especially with the patch that adds ->d_manage()). This handles transits from directories during pathwalk, including automounting and skipping over mountpoints (and holding processes with the next patch). __follow_mount_rcu() will jump out of RCU-walk mode if it encounters an automount point with nothing mounted on it. follow_dotdot*() does not handle automounts as you don't want to trigger them whilst following "..". I've also extracted the mount/don't-mount logic from autofs4 and included it here. It makes the mount go ahead anyway if someone calls open() or creat(), tries to traverse the directory, tries to chdir/chroot/etc. into the directory, or sticks a '/' on the end of the pathname. If they do a stat(), however, they'll only trigger the automount if they didn't also say O_NOFOLLOW. I've also added an inode flag (S_AUTOMOUNT) so that filesystems can mark their inodes as automount points. This flag is automatically propagated to the dentry as DCACHE_NEED_AUTOMOUNT by __d_instantiate(). This saves NFS and could save AFS a private flag bit apiece, but is not strictly necessary. It would be preferable to do the propagation in d_set_d_op(), but that doesn't normally have access to the inode. [AV: fixed breakage in case if __follow_mount_rcu() fails and nameidata_drop_rcu() succeeds in RCU case of do_lookup(); we need to fall through to non-RCU case after that, rather than just returning with ungrabbed *path] Signed-off-by: David Howells Was-Acked-by: Ian Kent Signed-off-by: Al Viro --- fs/dcache.c | 5 +- fs/namei.c | 226 +++++++++++++++++++++++++++++++++++++++++++++--------------- 2 files changed, 174 insertions(+), 57 deletions(-) (limited to 'fs') diff --git a/fs/dcache.c b/fs/dcache.c index 0c6d5c549d84..51f7bb6463af 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -1380,8 +1380,11 @@ EXPORT_SYMBOL(d_set_d_op); static void __d_instantiate(struct dentry *dentry, struct inode *inode) { spin_lock(&dentry->d_lock); - if (inode) + if (inode) { + if (unlikely(IS_AUTOMOUNT(inode))) + dentry->d_flags |= DCACHE_NEED_AUTOMOUNT; list_add(&dentry->d_alias, &inode->i_dentry); + } dentry->d_inode = inode; dentry_rcuwalk_barrier(dentry); spin_unlock(&dentry->d_lock); diff --git a/fs/namei.c b/fs/namei.c index 529e917ad2fc..16109da68bbf 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -896,51 +896,120 @@ int follow_up(struct path *path) } /* - * serialization is taken care of in namespace.c + * Perform an automount + * - return -EISDIR to tell follow_managed() to stop and return the path we + * were called with. */ -static void __follow_mount_rcu(struct nameidata *nd, struct path *path, - struct inode **inode) +static int follow_automount(struct path *path, unsigned flags, + bool *need_mntput) { - while (d_mountpoint(path->dentry)) { - struct vfsmount *mounted; - mounted = __lookup_mnt(path->mnt, path->dentry, 1); - if (!mounted) - return; - path->mnt = mounted; - path->dentry = mounted->mnt_root; - nd->seq = read_seqcount_begin(&path->dentry->d_seq); - *inode = path->dentry->d_inode; + struct vfsmount *mnt; + + if (!path->dentry->d_op || !path->dentry->d_op->d_automount) + return -EREMOTE; + + /* We want to mount if someone is trying to open/create a file of any + * type under the mountpoint, wants to traverse through the mountpoint + * or wants to open the mounted directory. + * + * We don't want to mount if someone's just doing a stat and they've + * set AT_SYMLINK_NOFOLLOW - unless they're stat'ing a directory and + * appended a '/' to the name. + */ + if (!(flags & LOOKUP_FOLLOW) && + !(flags & (LOOKUP_CONTINUE | LOOKUP_DIRECTORY | + LOOKUP_OPEN | LOOKUP_CREATE))) + return -EISDIR; + + current->total_link_count++; + if (current->total_link_count >= 40) + return -ELOOP; + + mnt = path->dentry->d_op->d_automount(path); + if (IS_ERR(mnt)) { + /* + * The filesystem is allowed to return -EISDIR here to indicate + * it doesn't want to automount. For instance, autofs would do + * this so that its userspace daemon can mount on this dentry. + * + * However, we can only permit this if it's a terminal point in + * the path being looked up; if it wasn't then the remainder of + * the path is inaccessible and we should say so. + */ + if (PTR_ERR(mnt) == -EISDIR && (flags & LOOKUP_CONTINUE)) + return -EREMOTE; + return PTR_ERR(mnt); } -} + if (!mnt) /* mount collision */ + return 0; -static int __follow_mount(struct path *path) -{ - int res = 0; - while (d_mountpoint(path->dentry)) { - struct vfsmount *mounted = lookup_mnt(path); - if (!mounted) - break; - dput(path->dentry); - if (res) - mntput(path->mnt); - path->mnt = mounted; - path->dentry = dget(mounted->mnt_root); - res = 1; + if (mnt->mnt_sb == path->mnt->mnt_sb && + mnt->mnt_root == path->dentry) { + mntput(mnt); + return -ELOOP; } - return res; + + dput(path->dentry); + if (*need_mntput) + mntput(path->mnt); + path->mnt = mnt; + path->dentry = dget(mnt->mnt_root); + *need_mntput = true; + return 0; } -static void follow_mount(struct path *path) +/* + * Handle a dentry that is managed in some way. + * - Flagged as mountpoint + * - Flagged as automount point + * + * This may only be called in refwalk mode. + * + * Serialization is taken care of in namespace.c + */ +static int follow_managed(struct path *path, unsigned flags) { - while (d_mountpoint(path->dentry)) { - struct vfsmount *mounted = lookup_mnt(path); - if (!mounted) - break; - dput(path->dentry); - mntput(path->mnt); - path->mnt = mounted; - path->dentry = dget(mounted->mnt_root); + unsigned managed; + bool need_mntput = false; + int ret; + + /* Given that we're not holding a lock here, we retain the value in a + * local variable for each dentry as we look at it so that we don't see + * the components of that value change under us */ + while (managed = ACCESS_ONCE(path->dentry->d_flags), + managed &= DCACHE_MANAGED_DENTRY, + unlikely(managed != 0)) { + /* Transit to a mounted filesystem. */ + if (managed & DCACHE_MOUNTED) { + struct vfsmount *mounted = lookup_mnt(path); + if (mounted) { + dput(path->dentry); + if (need_mntput) + mntput(path->mnt); + path->mnt = mounted; + path->dentry = dget(mounted->mnt_root); + need_mntput = true; + continue; + } + + /* Something is mounted on this dentry in another + * namespace and/or whatever was mounted there in this + * namespace got unmounted before we managed to get the + * vfsmount_lock */ + } + + /* Handle an automount point */ + if (managed & DCACHE_NEED_AUTOMOUNT) { + ret = follow_automount(path, flags, &need_mntput); + if (ret < 0) + return ret == -EISDIR ? 0 : ret; + continue; + } + + /* We didn't change the current path point */ + break; } + return 0; } int follow_down(struct path *path) @@ -958,13 +1027,37 @@ int follow_down(struct path *path) return 0; } +/* + * Skip to top of mountpoint pile in rcuwalk mode. We abort the rcu-walk if we + * meet an automount point and we're not walking to "..". True is returned to + * continue, false to abort. + */ +static bool __follow_mount_rcu(struct nameidata *nd, struct path *path, + struct inode **inode, bool reverse_transit) +{ + while (d_mountpoint(path->dentry)) { + struct vfsmount *mounted; + mounted = __lookup_mnt(path->mnt, path->dentry, 1); + if (!mounted) + break; + path->mnt = mounted; + path->dentry = mounted->mnt_root; + nd->seq = read_seqcount_begin(&path->dentry->d_seq); + *inode = path->dentry->d_inode; + } + + if (unlikely(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT)) + return reverse_transit; + return true; +} + static int follow_dotdot_rcu(struct nameidata *nd) { struct inode *inode = nd->inode; set_root_rcu(nd); - while(1) { + while (1) { if (nd->path.dentry == nd->root.dentry && nd->path.mnt == nd->root.mnt) { break; @@ -987,12 +1080,28 @@ static int follow_dotdot_rcu(struct nameidata *nd) nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq); inode = nd->path.dentry->d_inode; } - __follow_mount_rcu(nd, &nd->path, &inode); + __follow_mount_rcu(nd, &nd->path, &inode, true); nd->inode = inode; return 0; } +/* + * Skip to top of mountpoint pile in refwalk mode for follow_dotdot() + */ +static void follow_mount(struct path *path) +{ + while (d_mountpoint(path->dentry)) { + struct vfsmount *mounted = lookup_mnt(path); + if (!mounted) + break; + dput(path->dentry); + mntput(path->mnt); + path->mnt = mounted; + path->dentry = dget(mounted->mnt_root); + } +} + static void follow_dotdot(struct nameidata *nd) { set_root(nd); @@ -1057,12 +1166,14 @@ static int do_lookup(struct nameidata *nd, struct qstr *name, struct vfsmount *mnt = nd->path.mnt; struct dentry *dentry, *parent = nd->path.dentry; struct inode *dir; + int err; + /* * See if the low-level filesystem might want * to use its own hash.. */ if (unlikely(parent->d_flags & DCACHE_OP_HASH)) { - int err = parent->d_op->d_hash(parent, nd->inode, name); + err = parent->d_op->d_hash(parent, nd->inode, name); if (err < 0) return err; } @@ -1092,20 +1203,25 @@ static int do_lookup(struct nameidata *nd, struct qstr *name, done2: path->mnt = mnt; path->dentry = dentry; - __follow_mount_rcu(nd, path, inode); - } else { - dentry = __d_lookup(parent, name); - if (!dentry) - goto need_lookup; + if (likely(__follow_mount_rcu(nd, path, inode, false))) + return 0; + if (nameidata_drop_rcu(nd)) + return -ECHILD; + /* fallthru */ + } + dentry = __d_lookup(parent, name); + if (!dentry) + goto need_lookup; found: - if (dentry->d_flags & DCACHE_OP_REVALIDATE) - goto need_revalidate; + if (dentry->d_flags & DCACHE_OP_REVALIDATE) + goto need_revalidate; done: - path->mnt = mnt; - path->dentry = dentry; - __follow_mount(path); - *inode = path->dentry->d_inode; - } + path->mnt = mnt; + path->dentry = dentry; + err = follow_managed(path, nd->flags); + if (unlikely(err < 0)) + return err; + *inode = path->dentry->d_inode; return 0; need_lookup: @@ -2203,11 +2319,9 @@ static struct file *do_last(struct nameidata *nd, struct path *path, if (open_flag & O_EXCL) goto exit_dput; - if (__follow_mount(path)) { - error = -ELOOP; - if (open_flag & O_NOFOLLOW) - goto exit_dput; - } + error = follow_managed(path, nd->flags); + if (error < 0) + goto exit_dput; error = -ENOENT; if (!path->dentry->d_inode) -- cgit v1.2.2 From cc53ce53c86924bfe98a12ea20b7465038a08792 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Jan 2011 18:45:26 +0000 Subject: Add a dentry op to allow processes to be held during pathwalk transit Add a dentry op (d_manage) to permit a filesystem to hold a process and make it sleep when it tries to transit away from one of that filesystem's directories during a pathwalk. The operation is keyed off a new dentry flag (DCACHE_MANAGE_TRANSIT). The filesystem is allowed to be selective about which processes it holds and which it permits to continue on or prohibits from transiting from each flagged directory. This will allow autofs to hold up client processes whilst letting its userspace daemon through to maintain the directory or the stuff behind it or mounted upon it. The ->d_manage() dentry operation: int (*d_manage)(struct path *path, bool mounting_here); takes a pointer to the directory about to be transited away from and a flag indicating whether the transit is undertaken by do_add_mount() or do_move_mount() skipping through a pile of filesystems mounted on a mountpoint. It should return 0 if successful and to let the process continue on its way; -EISDIR to prohibit the caller from skipping to overmounted filesystems or automounting, and to use this directory; or some other error code to return to the user. ->d_manage() is called with namespace_sem writelocked if mounting_here is true and no other locks held, so it may sleep. However, if mounting_here is true, it may not initiate or wait for a mount or unmount upon the parameter directory, even if the act is actually performed by userspace. Within fs/namei.c, follow_managed() is extended to check with d_manage() first on each managed directory, before transiting away from it or attempting to automount upon it. follow_down() is renamed follow_down_one() and should only be used where the filesystem deliberately intends to avoid management steps (e.g. autofs). A new follow_down() is added that incorporates the loop done by all other callers of follow_down() (do_add/move_mount(), autofs and NFSD; whilst AFS, NFS and CIFS do use it, their use is removed by converting them to use d_automount()). The new follow_down() calls d_manage() as appropriate. It also takes an extra parameter to indicate if it is being called from mount code (with namespace_sem writelocked) which it passes to d_manage(). follow_down() ignores automount points so that it can be used to mount on them. __follow_mount_rcu() is made to abort rcu-walk mode if it hits a directory with DCACHE_MANAGE_TRANSIT set on the basis that we're probably going to have to sleep. It would be possible to enter d_manage() in rcu-walk mode too, and have that determine whether to abort or not itself. That would allow the autofs daemon to continue on in rcu-walk mode. Note that DCACHE_MANAGE_TRANSIT on a directory should be cleared when it isn't required as every tranist from that directory will cause d_manage() to be invoked. It can always be set again when necessary. ========================== WHAT THIS MEANS FOR AUTOFS ========================== Autofs currently uses the lookup() inode op and the d_revalidate() dentry op to trigger the automounting of indirect mounts, and both of these can be called with i_mutex held. autofs knows that the i_mutex will be held by the caller in lookup(), and so can drop it before invoking the daemon - but this isn't so for d_revalidate(), since the lock is only held on _some_ of the code paths that call it. This means that autofs can't risk dropping i_mutex from its d_revalidate() function before it calls the daemon. The bug could manifest itself as, for example, a process that's trying to validate an automount dentry that gets made to wait because that dentry is expired and needs cleaning up: mkdir S ffffffff8014e05a 0 32580 24956 Call Trace: [] :autofs4:autofs4_wait+0x674/0x897 [] avc_has_perm+0x46/0x58 [] autoremove_wake_function+0x0/0x2e [] :autofs4:autofs4_expire_wait+0x41/0x6b [] :autofs4:autofs4_revalidate+0x91/0x149 [] __lookup_hash+0xa0/0x12f [] lookup_create+0x46/0x80 [] sys_mkdirat+0x56/0xe4 versus the automount daemon which wants to remove that dentry, but can't because the normal process is holding the i_mutex lock: automount D ffffffff8014e05a 0 32581 1 32561 Call Trace: [] __mutex_lock_slowpath+0x60/0x9b [] do_path_lookup+0x2ca/0x2f1 [] .text.lock.mutex+0xf/0x14 [] do_rmdir+0x77/0xde [] tracesys+0x71/0xe0 [] tracesys+0xd5/0xe0 which means that the system is deadlocked. This patch allows autofs to hold up normal processes whilst the daemon goes ahead and does things to the dentry tree behind the automouter point without risking a deadlock as almost no locks are held in d_manage() and none in d_automount(). Signed-off-by: David Howells Was-Acked-by: Ian Kent Signed-off-by: Al Viro --- fs/afs/mntpt.c | 5 +--- fs/autofs4/autofs_i.h | 13 --------- fs/autofs4/dev-ioctl.c | 2 +- fs/autofs4/expire.c | 2 +- fs/autofs4/root.c | 11 ++++---- fs/cifs/cifs_dfs_ref.c | 5 +--- fs/namei.c | 72 ++++++++++++++++++++++++++++++++++++++++++++++++-- fs/namespace.c | 14 +++++----- fs/nfs/namespace.c | 5 +--- fs/nfsd/vfs.c | 5 ++-- 10 files changed, 91 insertions(+), 43 deletions(-) (limited to 'fs') diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c index e83c0336e7b5..f3e891d57a2c 100644 --- a/fs/afs/mntpt.c +++ b/fs/afs/mntpt.c @@ -273,10 +273,7 @@ static void *afs_mntpt_follow_link(struct dentry *dentry, struct nameidata *nd) break; case -EBUSY: /* someone else made a mount here whilst we were busy */ - while (d_mountpoint(nd->path.dentry) && - follow_down(&nd->path)) - ; - err = 0; + err = follow_down(&nd->path, false); default: mntput(newmnt); break; diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h index 0fffe1c24cec..eb67953452bb 100644 --- a/fs/autofs4/autofs_i.h +++ b/fs/autofs4/autofs_i.h @@ -229,19 +229,6 @@ int autofs4_wait(struct autofs_sb_info *,struct dentry *, enum autofs_notify); int autofs4_wait_release(struct autofs_sb_info *,autofs_wqt_t,int); void autofs4_catatonic_mode(struct autofs_sb_info *); -static inline int autofs4_follow_mount(struct path *path) -{ - int res = 0; - - while (d_mountpoint(path->dentry)) { - int followed = follow_down(path); - if (!followed) - break; - res = 1; - } - return res; -} - static inline u32 autofs4_get_dev(struct autofs_sb_info *sbi) { return new_encode_dev(sbi->sb->s_dev); diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c index eff9a419469a..1442da4860e5 100644 --- a/fs/autofs4/dev-ioctl.c +++ b/fs/autofs4/dev-ioctl.c @@ -551,7 +551,7 @@ static int autofs_dev_ioctl_ismountpoint(struct file *fp, err = have_submounts(path.dentry); - if (follow_down(&path)) + if (follow_down_one(&path)) magic = path.mnt->mnt_sb->s_magic; } diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c index cc1d01365905..6a930b90d389 100644 --- a/fs/autofs4/expire.c +++ b/fs/autofs4/expire.c @@ -56,7 +56,7 @@ static int autofs4_mount_busy(struct vfsmount *mnt, struct dentry *dentry) path_get(&path); - if (!follow_down(&path)) + if (!follow_down_one(&path)) goto done; if (is_autofs4_dentry(path.dentry)) { diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c index 651e4ef563b1..20225636a4e9 100644 --- a/fs/autofs4/root.c +++ b/fs/autofs4/root.c @@ -234,7 +234,7 @@ static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd) nd->flags); /* * For an expire of a covered direct or offset mount we need - * to break out of follow_down() at the autofs mount trigger + * to break out of follow_down_one() at the autofs mount trigger * (d_mounted--), so we can see the expiring flag, and manage * the blocking and following here until the expire is completed. */ @@ -243,7 +243,7 @@ static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd) if (ino->flags & AUTOFS_INF_EXPIRING) { spin_unlock(&sbi->fs_lock); /* Follow down to our covering mount. */ - if (!follow_down(&nd->path)) + if (!follow_down_one(&nd->path)) goto done; goto follow; } @@ -292,11 +292,10 @@ follow: * multi-mount with no root offset so we don't need * to follow it. */ - if (d_mountpoint(dentry)) { - if (!autofs4_follow_mount(&nd->path)) { - status = -ENOENT; + if (d_managed(dentry)) { + status = follow_down(&nd->path, false); + if (status < 0) goto out_error; - } } done: diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c index c68a056f27fd..83479cf63f96 100644 --- a/fs/cifs/cifs_dfs_ref.c +++ b/fs/cifs/cifs_dfs_ref.c @@ -273,10 +273,7 @@ static int add_mount_helper(struct vfsmount *newmnt, struct nameidata *nd, break; case -EBUSY: /* someone else made a mount here whilst we were busy */ - while (d_mountpoint(nd->path.dentry) && - follow_down(&nd->path)) - ; - err = 0; + err = follow_down(&nd->path, false); default: mntput(newmnt); break; diff --git a/fs/namei.c b/fs/namei.c index 16109da68bbf..9d3033dc22e9 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -960,6 +960,7 @@ static int follow_automount(struct path *path, unsigned flags, /* * Handle a dentry that is managed in some way. + * - Flagged for transit management (autofs) * - Flagged as mountpoint * - Flagged as automount point * @@ -979,6 +980,16 @@ static int follow_managed(struct path *path, unsigned flags) while (managed = ACCESS_ONCE(path->dentry->d_flags), managed &= DCACHE_MANAGED_DENTRY, unlikely(managed != 0)) { + /* Allow the filesystem to manage the transit without i_mutex + * being held. */ + if (managed & DCACHE_MANAGE_TRANSIT) { + BUG_ON(!path->dentry->d_op); + BUG_ON(!path->dentry->d_op->d_manage); + ret = path->dentry->d_op->d_manage(path->dentry, false); + if (ret < 0) + return ret == -EISDIR ? 0 : ret; + } + /* Transit to a mounted filesystem. */ if (managed & DCACHE_MOUNTED) { struct vfsmount *mounted = lookup_mnt(path); @@ -1012,7 +1023,7 @@ static int follow_managed(struct path *path, unsigned flags) return 0; } -int follow_down(struct path *path) +int follow_down_one(struct path *path) { struct vfsmount *mounted; @@ -1029,14 +1040,19 @@ int follow_down(struct path *path) /* * Skip to top of mountpoint pile in rcuwalk mode. We abort the rcu-walk if we - * meet an automount point and we're not walking to "..". True is returned to + * meet a managed dentry and we're not walking to "..". True is returned to * continue, false to abort. */ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path, struct inode **inode, bool reverse_transit) { + unsigned abort_mask = + reverse_transit ? 0 : DCACHE_MANAGE_TRANSIT; + while (d_mountpoint(path->dentry)) { struct vfsmount *mounted; + if (path->dentry->d_flags & abort_mask) + return true; mounted = __lookup_mnt(path->mnt, path->dentry, 1); if (!mounted) break; @@ -1086,6 +1102,57 @@ static int follow_dotdot_rcu(struct nameidata *nd) return 0; } +/* + * Follow down to the covering mount currently visible to userspace. At each + * point, the filesystem owning that dentry may be queried as to whether the + * caller is permitted to proceed or not. + * + * Care must be taken as namespace_sem may be held (indicated by mounting_here + * being true). + */ +int follow_down(struct path *path, bool mounting_here) +{ + unsigned managed; + int ret; + + while (managed = ACCESS_ONCE(path->dentry->d_flags), + unlikely(managed & DCACHE_MANAGED_DENTRY)) { + /* Allow the filesystem to manage the transit without i_mutex + * being held. + * + * We indicate to the filesystem if someone is trying to mount + * something here. This gives autofs the chance to deny anyone + * other than its daemon the right to mount on its + * superstructure. + * + * The filesystem may sleep at this point. + */ + if (managed & DCACHE_MANAGE_TRANSIT) { + BUG_ON(!path->dentry->d_op); + BUG_ON(!path->dentry->d_op->d_manage); + ret = path->dentry->d_op->d_manage(path->dentry, mounting_here); + if (ret < 0) + return ret == -EISDIR ? 0 : ret; + } + + /* Transit to a mounted filesystem. */ + if (managed & DCACHE_MOUNTED) { + struct vfsmount *mounted = lookup_mnt(path); + if (!mounted) + break; + dput(path->dentry); + mntput(path->mnt); + path->mnt = mounted; + path->dentry = dget(mounted->mnt_root); + continue; + } + + /* Don't handle automount points here */ + break; + } + return 0; +} + /* * Skip to top of mountpoint pile in refwalk mode for follow_dotdot() */ @@ -3530,6 +3597,7 @@ const struct inode_operations page_symlink_inode_operations = { }; EXPORT_SYMBOL(user_path_at); +EXPORT_SYMBOL(follow_down_one); EXPORT_SYMBOL(follow_down); EXPORT_SYMBOL(follow_up); EXPORT_SYMBOL(get_write_access); /* binfmt_aout */ diff --git a/fs/namespace.c b/fs/namespace.c index 3ddfd9046c44..d94ccd6ddafd 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -1844,9 +1844,10 @@ static int do_move_mount(struct path *path, char *old_name) return err; down_write(&namespace_sem); - while (d_mountpoint(path->dentry) && - follow_down(path)) - ; + err = follow_down(path, true); + if (err < 0) + goto out; + err = -EINVAL; if (!check_mnt(path->mnt) || !check_mnt(old_path.mnt)) goto out; @@ -1940,9 +1941,10 @@ int do_add_mount(struct vfsmount *newmnt, struct path *path, down_write(&namespace_sem); /* Something was mounted here while we slept */ - while (d_mountpoint(path->dentry) && - follow_down(path)) - ; + err = follow_down(path, true); + if (err < 0) + goto unlock; + err = -EINVAL; if (!(mnt_flags & MNT_SHRINKABLE) && !check_mnt(path->mnt)) goto unlock; diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c index 74aaf3963c10..bfcb933e5755 100644 --- a/fs/nfs/namespace.c +++ b/fs/nfs/namespace.c @@ -176,10 +176,7 @@ out_err: path_put(&nd->path); goto out; out_follow: - while (d_mountpoint(nd->path.dentry) && - follow_down(&nd->path)) - ; - err = 0; + err = follow_down(&nd->path, false); goto out; } diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 230b79fbf005..0f79e33a65d7 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -88,8 +88,9 @@ nfsd_cross_mnt(struct svc_rqst *rqstp, struct dentry **dpp, .dentry = dget(dentry)}; int err = 0; - while (d_mountpoint(path.dentry) && follow_down(&path)) - ; + err = follow_down(&path, false); + if (err < 0) + goto out; exp2 = rqst_exp_get_by_name(rqstp, &path); if (IS_ERR(exp2)) { -- cgit v1.2.2 From 6f45b65672c8017d5e210e338bb5858a938ef445 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Jan 2011 18:45:31 +0000 Subject: Add an AT_NO_AUTOMOUNT flag to suppress terminal automount Add an AT_NO_AUTOMOUNT flag to suppress terminal automounting of automount point directories. This can be used by fstatat() users to permit the gathering of attributes on an automount point and also prevent mass-automounting of a directory of automount points by ls. Signed-off-by: David Howells Acked-by: Ian Kent Signed-off-by: Al Viro --- fs/namei.c | 6 ++++++ fs/stat.c | 4 +++- 2 files changed, 9 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index 9d3033dc22e9..dc50bfb2f5d6 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -908,6 +908,12 @@ static int follow_automount(struct path *path, unsigned flags, if (!path->dentry->d_op || !path->dentry->d_op->d_automount) return -EREMOTE; + /* We don't want to mount if someone supplied AT_NO_AUTOMOUNT + * and this is the terminal part of the path. + */ + if ((flags & LOOKUP_NO_AUTOMOUNT) && !(flags & LOOKUP_CONTINUE)) + return -EISDIR; /* we actually want to stop here */ + /* We want to mount if someone is trying to open/create a file of any * type under the mountpoint, wants to traverse through the mountpoint * or wants to open the mounted directory. diff --git a/fs/stat.c b/fs/stat.c index 12e90e213900..d5c61cf2b703 100644 --- a/fs/stat.c +++ b/fs/stat.c @@ -75,11 +75,13 @@ int vfs_fstatat(int dfd, const char __user *filename, struct kstat *stat, int error = -EINVAL; int lookup_flags = 0; - if ((flag & ~AT_SYMLINK_NOFOLLOW) != 0) + if ((flag & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT)) != 0) goto out; if (!(flag & AT_SYMLINK_NOFOLLOW)) lookup_flags |= LOOKUP_FOLLOW; + if (flag & AT_NO_AUTOMOUNT) + lookup_flags |= LOOKUP_NO_AUTOMOUNT; error = user_path_at(dfd, filename, lookup_flags, &path); if (error) -- cgit v1.2.2 From d18610b0ce9eb48c60649d8fcbf68374c84349d3 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Jan 2011 19:04:05 +0000 Subject: AFS: Use d_automount() rather than abusing follow_link() Make AFS use the new d_automount() dentry operation rather than abusing follow_link() on directories. Signed-off-by: David Howells Signed-off-by: Al Viro --- fs/afs/dir.c | 1 + fs/afs/inode.c | 3 ++- fs/afs/internal.h | 1 + fs/afs/mntpt.c | 44 +++++++++++++++----------------------------- 4 files changed, 19 insertions(+), 30 deletions(-) (limited to 'fs') diff --git a/fs/afs/dir.c b/fs/afs/dir.c index e6a4ab980e31..20c106f24927 100644 --- a/fs/afs/dir.c +++ b/fs/afs/dir.c @@ -66,6 +66,7 @@ const struct dentry_operations afs_fs_dentry_operations = { .d_revalidate = afs_d_revalidate, .d_delete = afs_d_delete, .d_release = afs_d_release, + .d_automount = afs_d_automount, }; #define AFS_DIR_HASHTBL_SIZE 128 diff --git a/fs/afs/inode.c b/fs/afs/inode.c index 0747339011c3..db66c5201474 100644 --- a/fs/afs/inode.c +++ b/fs/afs/inode.c @@ -184,7 +184,8 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name, inode->i_generation = 0; set_bit(AFS_VNODE_PSEUDODIR, &vnode->flags); - inode->i_flags |= S_NOATIME; + set_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags); + inode->i_flags |= S_AUTOMOUNT | S_NOATIME; unlock_new_inode(inode); _leave(" = %p", inode); return inode; diff --git a/fs/afs/internal.h b/fs/afs/internal.h index 58c633b80246..5a9b6843bac1 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -592,6 +592,7 @@ extern const struct inode_operations afs_mntpt_inode_operations; extern const struct inode_operations afs_autocell_inode_operations; extern const struct file_operations afs_mntpt_file_operations; +extern struct vfsmount *afs_d_automount(struct path *); extern int afs_mntpt_check_symlink(struct afs_vnode *, struct key *); extern void afs_mntpt_kill_timer(void); diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c index f3e891d57a2c..d23b2e344a78 100644 --- a/fs/afs/mntpt.c +++ b/fs/afs/mntpt.c @@ -24,7 +24,6 @@ static struct dentry *afs_mntpt_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd); static int afs_mntpt_open(struct inode *inode, struct file *file); -static void *afs_mntpt_follow_link(struct dentry *dentry, struct nameidata *nd); static void afs_mntpt_expiry_timed_out(struct work_struct *work); const struct file_operations afs_mntpt_file_operations = { @@ -34,13 +33,11 @@ const struct file_operations afs_mntpt_file_operations = { const struct inode_operations afs_mntpt_inode_operations = { .lookup = afs_mntpt_lookup, - .follow_link = afs_mntpt_follow_link, .readlink = page_readlink, .getattr = afs_getattr, }; const struct inode_operations afs_autocell_inode_operations = { - .follow_link = afs_mntpt_follow_link, .getattr = afs_getattr, }; @@ -88,6 +85,7 @@ int afs_mntpt_check_symlink(struct afs_vnode *vnode, struct key *key) _debug("symlink is a mountpoint"); spin_lock(&vnode->lock); set_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags); + vnode->vfs_inode.i_flags |= S_AUTOMOUNT; spin_unlock(&vnode->lock); } @@ -238,49 +236,37 @@ error_no_devname: } /* - * follow a link from a mountpoint directory, thus causing it to be mounted + * handle an automount point */ -static void *afs_mntpt_follow_link(struct dentry *dentry, struct nameidata *nd) +struct vfsmount *afs_d_automount(struct path *path) { struct vfsmount *newmnt; int err; - _enter("%p{%s},{%s:%p{%s},}", - dentry, - dentry->d_name.name, - nd->path.mnt->mnt_devname, - dentry, - nd->path.dentry->d_name.name); - - dput(nd->path.dentry); - nd->path.dentry = dget(dentry); + _enter("{%s,%s}", path->mnt->mnt_devname, path->dentry->d_name.name); - newmnt = afs_mntpt_do_automount(nd->path.dentry); - if (IS_ERR(newmnt)) { - path_put(&nd->path); - return (void *)newmnt; - } + newmnt = afs_mntpt_do_automount(path->dentry); + if (IS_ERR(newmnt)) + return newmnt; mntget(newmnt); - err = do_add_mount(newmnt, &nd->path, MNT_SHRINKABLE, &afs_vfsmounts); + err = do_add_mount(newmnt, path, MNT_SHRINKABLE, &afs_vfsmounts); switch (err) { case 0: - path_put(&nd->path); - nd->path.mnt = newmnt; - nd->path.dentry = dget(newmnt->mnt_root); queue_delayed_work(afs_wq, &afs_mntpt_expiry_timer, afs_mntpt_expiry_timeout * HZ); - break; + _leave(" = %p {%s}", newmnt, newmnt->mnt_devname); + return newmnt; case -EBUSY: /* someone else made a mount here whilst we were busy */ - err = follow_down(&nd->path, false); + mntput(newmnt); + _leave(" = NULL [EBUSY]"); + return NULL; default: mntput(newmnt); - break; + _leave(" = %d", err); + return ERR_PTR(err); } - - _leave(" = %d", err); - return ERR_PTR(err); } /* -- cgit v1.2.2 From 36d43a43761b004ad1879ac21471d8fc5f3157ec Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Jan 2011 18:45:42 +0000 Subject: NFS: Use d_automount() rather than abusing follow_link() Make NFS use the new d_automount() dentry operation rather than abusing follow_link() on directories. Signed-off-by: David Howells Acked-by: Trond Myklebust Acked-by: Ian Kent Signed-off-by: Al Viro --- fs/nfs/dir.c | 4 ++- fs/nfs/inode.c | 4 +-- fs/nfs/internal.h | 1 + fs/nfs/namespace.c | 84 ++++++++++++++++++++++++++---------------------------- 4 files changed, 46 insertions(+), 47 deletions(-) (limited to 'fs') diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index df8c03a02161..2c3eb33b904d 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -970,7 +970,7 @@ int nfs_lookup_verify_inode(struct inode *inode, struct nameidata *nd) { struct nfs_server *server = NFS_SERVER(inode); - if (test_bit(NFS_INO_MOUNTPOINT, &NFS_I(inode)->flags)) + if (IS_AUTOMOUNT(inode)) return 0; if (nd != NULL) { /* VFS wants an on-the-wire revalidation */ @@ -1173,6 +1173,7 @@ const struct dentry_operations nfs_dentry_operations = { .d_revalidate = nfs_lookup_revalidate, .d_delete = nfs_dentry_delete, .d_iput = nfs_dentry_iput, + .d_automount = nfs_d_automount, }; static struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *nd) @@ -1246,6 +1247,7 @@ const struct dentry_operations nfs4_dentry_operations = { .d_revalidate = nfs_open_revalidate, .d_delete = nfs_dentry_delete, .d_iput = nfs_dentry_iput, + .d_automount = nfs_d_automount, }; /* diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index ce00b704452c..d8512423ba72 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -300,7 +300,7 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr) else inode->i_op = &nfs_mountpoint_inode_operations; inode->i_fop = NULL; - set_bit(NFS_INO_MOUNTPOINT, &nfsi->flags); + inode->i_flags |= S_AUTOMOUNT; } } else if (S_ISLNK(inode->i_mode)) inode->i_op = &nfs_symlink_inode_operations; @@ -1208,7 +1208,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) /* Update the fsid? */ if (S_ISDIR(inode->i_mode) && (fattr->valid & NFS_ATTR_FATTR_FSID) && !nfs_fsid_equal(&server->fsid, &fattr->fsid) && - !test_bit(NFS_INO_MOUNTPOINT, &nfsi->flags)) + !IS_AUTOMOUNT(inode)) server->fsid = fattr->fsid; /* diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index bfa3a34af801..4644f04b4b46 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -252,6 +252,7 @@ extern char *nfs_path(const char *base, const struct dentry *droot, const struct dentry *dentry, char *buffer, ssize_t buflen); +extern struct vfsmount *nfs_d_automount(struct path *path); /* getroot.c */ extern struct dentry *nfs_get_root(struct super_block *, struct nfs_fh *); diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c index bfcb933e5755..f3fbb1bf3f18 100644 --- a/fs/nfs/namespace.c +++ b/fs/nfs/namespace.c @@ -97,9 +97,8 @@ Elong: } /* - * nfs_follow_mountpoint - handle crossing a mountpoint on the server - * @dentry - dentry of mountpoint - * @nd - nameidata info + * nfs_d_automount - Handle crossing a mountpoint on the server + * @path - The mountpoint * * When we encounter a mountpoint on the server, we want to set up * a mountpoint on the client too, to prevent inode numbers from @@ -109,84 +108,81 @@ Elong: * situation, and that different filesystems may want to use * different security flavours. */ -static void * nfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd) +struct vfsmount *nfs_d_automount(struct path *path) { struct vfsmount *mnt; - struct nfs_server *server = NFS_SERVER(dentry->d_inode); + struct nfs_server *server = NFS_SERVER(path->dentry->d_inode); struct dentry *parent; struct nfs_fh *fh = NULL; struct nfs_fattr *fattr = NULL; int err; - dprintk("--> nfs_follow_mountpoint()\n"); + dprintk("--> nfs_d_automount()\n"); - err = -ESTALE; - if (IS_ROOT(dentry)) - goto out_err; + mnt = ERR_PTR(-ESTALE); + if (IS_ROOT(path->dentry)) + goto out_nofree; - err = -ENOMEM; + mnt = ERR_PTR(-ENOMEM); fh = nfs_alloc_fhandle(); fattr = nfs_alloc_fattr(); if (fh == NULL || fattr == NULL) - goto out_err; + goto out; dprintk("%s: enter\n", __func__); - dput(nd->path.dentry); - nd->path.dentry = dget(dentry); - /* Look it up again */ - parent = dget_parent(nd->path.dentry); + /* Look it up again to get its attributes */ + parent = dget_parent(path->dentry); err = server->nfs_client->rpc_ops->lookup(parent->d_inode, - &nd->path.dentry->d_name, + &path->dentry->d_name, fh, fattr); dput(parent); - if (err != 0) - goto out_err; + if (err != 0) { + mnt = ERR_PTR(err); + goto out; + } if (fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) - mnt = nfs_do_refmount(nd->path.mnt, nd->path.dentry); + mnt = nfs_do_refmount(path->mnt, path->dentry); else - mnt = nfs_do_submount(nd->path.mnt, nd->path.dentry, fh, - fattr); - err = PTR_ERR(mnt); + mnt = nfs_do_submount(path->mnt, path->dentry, fh, fattr); if (IS_ERR(mnt)) - goto out_err; + goto out; mntget(mnt); - err = do_add_mount(mnt, &nd->path, nd->path.mnt->mnt_flags|MNT_SHRINKABLE, + err = do_add_mount(mnt, path, path->mnt->mnt_flags | MNT_SHRINKABLE, &nfs_automount_list); - if (err < 0) { + switch (err) { + case 0: + dprintk("%s: done, success\n", __func__); + schedule_delayed_work(&nfs_automount_task, nfs_mountpoint_expiry_timeout); + break; + case -EBUSY: + /* someone else made a mount here whilst we were busy */ mntput(mnt); - if (err == -EBUSY) - goto out_follow; - goto out_err; + dprintk("%s: done, collision\n", __func__); + mnt = NULL; + break; + default: + mntput(mnt); + dprintk("%s: done, error %d\n", __func__, err); + mnt = ERR_PTR(err); + break; } - path_put(&nd->path); - nd->path.mnt = mnt; - nd->path.dentry = dget(mnt->mnt_root); - schedule_delayed_work(&nfs_automount_task, nfs_mountpoint_expiry_timeout); + out: nfs_free_fattr(fattr); nfs_free_fhandle(fh); - dprintk("%s: done, returned %d\n", __func__, err); - - dprintk("<-- nfs_follow_mountpoint() = %d\n", err); - return ERR_PTR(err); -out_err: - path_put(&nd->path); - goto out; -out_follow: - err = follow_down(&nd->path, false); - goto out; +out_nofree: + dprintk("<-- nfs_follow_mountpoint() = %p\n", mnt); + return mnt; } const struct inode_operations nfs_mountpoint_inode_operations = { - .follow_link = nfs_follow_mountpoint, .getattr = nfs_getattr, }; const struct inode_operations nfs_referral_inode_operations = { - .follow_link = nfs_follow_mountpoint, }; static void nfs_expire_automounts(struct work_struct *work) -- cgit v1.2.2 From 01c64feac45cea1317263eabc4f7ee1b240f297f Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Jan 2011 18:45:47 +0000 Subject: CIFS: Use d_automount() rather than abusing follow_link() Make CIFS use the new d_automount() dentry operation rather than abusing follow_link() on directories. [NOTE: THIS IS UNTESTED!] Signed-off-by: David Howells Cc: Steve French Signed-off-by: Al Viro --- fs/cifs/cifs_dfs_ref.c | 131 +++++++++++++++++++++++++------------------------ fs/cifs/cifsfs.h | 6 +++ fs/cifs/dir.c | 2 + fs/cifs/inode.c | 8 +-- 4 files changed, 80 insertions(+), 67 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c index 83479cf63f96..0fc163808de3 100644 --- a/fs/cifs/cifs_dfs_ref.c +++ b/fs/cifs/cifs_dfs_ref.c @@ -255,32 +255,6 @@ static struct vfsmount *cifs_dfs_do_refmount(struct cifs_sb_info *cifs_sb, } -static int add_mount_helper(struct vfsmount *newmnt, struct nameidata *nd, - struct list_head *mntlist) -{ - /* stolen from afs code */ - int err; - - mntget(newmnt); - err = do_add_mount(newmnt, &nd->path, nd->path.mnt->mnt_flags | MNT_SHRINKABLE, mntlist); - switch (err) { - case 0: - path_put(&nd->path); - nd->path.mnt = newmnt; - nd->path.dentry = dget(newmnt->mnt_root); - schedule_delayed_work(&cifs_dfs_automount_task, - cifs_dfs_mountpoint_expiry_timeout); - break; - case -EBUSY: - /* someone else made a mount here whilst we were busy */ - err = follow_down(&nd->path, false); - default: - mntput(newmnt); - break; - } - return err; -} - static void dump_referral(const struct dfs_info3_param *ref) { cFYI(1, "DFS: ref path: %s", ref->path_name); @@ -290,45 +264,43 @@ static void dump_referral(const struct dfs_info3_param *ref) ref->path_consumed); } - -static void* -cifs_dfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd) +/* + * Create a vfsmount that we can automount + */ +static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt) { struct dfs_info3_param *referrals = NULL; unsigned int num_referrals = 0; struct cifs_sb_info *cifs_sb; struct cifsSesInfo *ses; - char *full_path = NULL; + char *full_path; int xid, i; - int rc = 0; - struct vfsmount *mnt = ERR_PTR(-ENOENT); + int rc; + struct vfsmount *mnt; struct tcon_link *tlink; cFYI(1, "in %s", __func__); - BUG_ON(IS_ROOT(dentry)); + BUG_ON(IS_ROOT(mntpt)); xid = GetXid(); - dput(nd->path.dentry); - nd->path.dentry = dget(dentry); - /* * The MSDFS spec states that paths in DFS referral requests and * responses must be prefixed by a single '\' character instead of * the double backslashes usually used in the UNC. This function * gives us the latter, so we must adjust the result. */ - full_path = build_path_from_dentry(dentry); - if (full_path == NULL) { - rc = -ENOMEM; - goto out_err; - } + mnt = ERR_PTR(-ENOMEM); + full_path = build_path_from_dentry(mntpt); + if (full_path == NULL) + goto free_xid; - cifs_sb = CIFS_SB(dentry->d_inode->i_sb); + cifs_sb = CIFS_SB(mntpt->d_inode->i_sb); tlink = cifs_sb_tlink(cifs_sb); + mnt = ERR_PTR(-EINVAL); if (IS_ERR(tlink)) { - rc = PTR_ERR(tlink); - goto out_err; + mnt = ERR_CAST(tlink); + goto free_full_path; } ses = tlink_tcon(tlink)->ses; @@ -338,46 +310,77 @@ cifs_dfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd) cifs_put_tlink(tlink); + mnt = ERR_PTR(-ENOENT); for (i = 0; i < num_referrals; i++) { int len; - dump_referral(referrals+i); + dump_referral(referrals + i); /* connect to a node */ len = strlen(referrals[i].node_name); if (len < 2) { cERROR(1, "%s: Net Address path too short: %s", __func__, referrals[i].node_name); - rc = -EINVAL; - goto out_err; + mnt = ERR_PTR(-EINVAL); + break; } mnt = cifs_dfs_do_refmount(cifs_sb, full_path, referrals + i); cFYI(1, "%s: cifs_dfs_do_refmount:%s , mnt:%p", __func__, referrals[i].node_name, mnt); - - /* complete mount procedure if we accured submount */ if (!IS_ERR(mnt)) - break; + goto success; } - /* we need it cause for() above could exit without valid submount */ - rc = PTR_ERR(mnt); - if (IS_ERR(mnt)) - goto out_err; - - rc = add_mount_helper(mnt, nd, &cifs_dfs_automount_list); + /* no valid submounts were found; return error from get_dfs_path() by + * preference */ + if (rc != 0) + mnt = ERR_PTR(rc); -out: - FreeXid(xid); +success: free_dfs_info_array(referrals, num_referrals); +free_full_path: kfree(full_path); +free_xid: + FreeXid(xid); cFYI(1, "leaving %s" , __func__); - return ERR_PTR(rc); -out_err: - path_put(&nd->path); - goto out; + return mnt; +} + +/* + * Attempt to automount the referral + */ +struct vfsmount *cifs_dfs_d_automount(struct path *path) +{ + struct vfsmount *newmnt; + int err; + + cFYI(1, "in %s", __func__); + + newmnt = cifs_dfs_do_automount(path->dentry); + if (IS_ERR(newmnt)) { + cFYI(1, "leaving %s [automount failed]" , __func__); + return newmnt; + } + + mntget(newmnt); + err = do_add_mount(newmnt, path, path->mnt->mnt_flags | MNT_SHRINKABLE, + &cifs_dfs_automount_list); + switch (err) { + case 0: + schedule_delayed_work(&cifs_dfs_automount_task, + cifs_dfs_mountpoint_expiry_timeout); + cFYI(1, "leaving %s [ok]" , __func__); + return newmnt; + case -EBUSY: + /* someone else made a mount here whilst we were busy */ + mntput(newmnt); + cFYI(1, "leaving %s [EBUSY]" , __func__); + return NULL; + default: + mntput(newmnt); + cFYI(1, "leaving %s [error %d]" , __func__, err); + return ERR_PTR(err); + } } const struct inode_operations cifs_dfs_referral_inode_operations = { - .follow_link = cifs_dfs_follow_mountpoint, }; - diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index 897b2b2b28b5..851030f74939 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -93,6 +93,12 @@ extern int cifs_readdir(struct file *file, void *direntry, filldir_t filldir); extern const struct dentry_operations cifs_dentry_ops; extern const struct dentry_operations cifs_ci_dentry_ops; +#ifdef CONFIG_CIFS_DFS_UPCALL +extern struct vfsmount *cifs_dfs_d_automount(struct path *path); +#else +#define cifs_dfs_d_automount NULL +#endif + /* Functions related to symlinks */ extern void *cifs_follow_link(struct dentry *direntry, struct nameidata *nd); extern void cifs_put_link(struct dentry *direntry, diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index 1e95dd635632..dd5f22918c33 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -675,6 +675,7 @@ cifs_d_revalidate(struct dentry *direntry, struct nameidata *nd) const struct dentry_operations cifs_dentry_ops = { .d_revalidate = cifs_d_revalidate, + .d_automount = cifs_dfs_d_automount, /* d_delete: cifs_d_delete, */ /* not needed except for debugging */ }; @@ -711,4 +712,5 @@ const struct dentry_operations cifs_ci_dentry_ops = { .d_revalidate = cifs_d_revalidate, .d_hash = cifs_ci_hash, .d_compare = cifs_ci_compare, + .d_automount = cifs_dfs_d_automount, }; diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index b06b60620240..6c9ee8014ff0 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -32,7 +32,7 @@ #include "fscache.h" -static void cifs_set_ops(struct inode *inode, const bool is_dfs_referral) +static void cifs_set_ops(struct inode *inode) { struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); @@ -60,7 +60,7 @@ static void cifs_set_ops(struct inode *inode, const bool is_dfs_referral) break; case S_IFDIR: #ifdef CONFIG_CIFS_DFS_UPCALL - if (is_dfs_referral) { + if (IS_AUTOMOUNT(inode)) { inode->i_op = &cifs_dfs_referral_inode_operations; } else { #else /* NO DFS support, treat as a directory */ @@ -167,7 +167,9 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr) } spin_unlock(&inode->i_lock); - cifs_set_ops(inode, fattr->cf_flags & CIFS_FATTR_DFS_REFERRAL); + if (fattr->cf_flags & CIFS_FATTR_DFS_REFERRAL) + inode->i_flags |= S_AUTOMOUNT; + cifs_set_ops(inode); } void -- cgit v1.2.2 From db3729153e82ba3ada89681f26c4f1b6d6807a80 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Jan 2011 18:45:53 +0000 Subject: Remove the automount through follow_link() kludge code from pathwalk Remove the automount through follow_link() kludge code from pathwalk in favour of using d_automount(). Signed-off-by: David Howells Acked-by: Ian Kent Signed-off-by: Al Viro --- fs/namei.c | 17 +++-------------- 1 file changed, 3 insertions(+), 14 deletions(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index dc50bfb2f5d6..61995fba4e21 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1341,17 +1341,6 @@ fail: return PTR_ERR(dentry); } -/* - * This is a temporary kludge to deal with "automount" symlinks; proper - * solution is to trigger them on follow_mount(), so that do_lookup() - * would DTRT. To be killed before 2.6.34-final. - */ -static inline int follow_on_final(struct inode *inode, unsigned lookup_flags) -{ - return inode && unlikely(inode->i_op->follow_link) && - ((lookup_flags & LOOKUP_FOLLOW) || S_ISDIR(inode->i_mode)); -} - /* * Name resolution. * This is the basic name resolution function, turning a pathname into @@ -1490,7 +1479,8 @@ last_component: err = do_lookup(nd, &this, &next, &inode); if (err) break; - if (follow_on_final(inode, lookup_flags)) { + if (inode && unlikely(inode->i_op->follow_link) && + (lookup_flags & LOOKUP_FOLLOW)) { if (nameidata_dentry_drop_rcu_maybe(nd, next.dentry)) return -ECHILD; BUG_ON(inode != next.dentry->d_inode); @@ -2543,8 +2533,7 @@ reval: struct inode *linki = link.dentry->d_inode; void *cookie; error = -ELOOP; - /* S_ISDIR part is a temporary automount kludge */ - if (!(nd.flags & LOOKUP_FOLLOW) && !S_ISDIR(linki->i_mode)) + if (!(nd.flags & LOOKUP_FOLLOW)) goto exit_dput; if (count++ == 32) goto exit_dput; -- cgit v1.2.2 From 10584211e48036182212b598cc53331776406d60 Mon Sep 17 00:00:00 2001 From: Ian Kent Date: Fri, 14 Jan 2011 18:45:58 +0000 Subject: autofs4: Add d_automount() dentry operation Add a function to use the newly defined ->d_automount() dentry operation for triggering mounts instead of doing the user space callback in ->lookup() and ->d_revalidate(). Note, to be useful the subsequent patch to add the ->d_manage() dentry operation is also needed so the discussion of functionality is deferred to that patch. Signed-off-by: Ian Kent Signed-off-by: David Howells Signed-off-by: Al Viro --- fs/autofs4/autofs_i.h | 30 ++++++ fs/autofs4/expire.c | 6 ++ fs/autofs4/inode.c | 4 + fs/autofs4/root.c | 261 ++++++++++++++++++++++++++++---------------------- 4 files changed, 189 insertions(+), 112 deletions(-) (limited to 'fs') diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h index eb67953452bb..1ebfe53872b5 100644 --- a/fs/autofs4/autofs_i.h +++ b/fs/autofs4/autofs_i.h @@ -218,6 +218,36 @@ extern const struct inode_operations autofs4_direct_root_inode_operations; extern const struct file_operations autofs4_dir_operations; extern const struct file_operations autofs4_root_operations; +/* Operations methods */ + +struct vfsmount *autofs4_d_automount(struct path *); + +/* VFS automount flags management functions */ + +static inline void __managed_dentry_set_automount(struct dentry *dentry) +{ + dentry->d_flags |= DCACHE_NEED_AUTOMOUNT; +} + +static inline void managed_dentry_set_automount(struct dentry *dentry) +{ + spin_lock(&dentry->d_lock); + __managed_dentry_set_automount(dentry); + spin_unlock(&dentry->d_lock); +} + +static inline void __managed_dentry_clear_automount(struct dentry *dentry) +{ + dentry->d_flags &= ~DCACHE_NEED_AUTOMOUNT; +} + +static inline void managed_dentry_clear_automount(struct dentry *dentry) +{ + spin_lock(&dentry->d_lock); + __managed_dentry_clear_automount(dentry); + spin_unlock(&dentry->d_lock); +} + /* Initializing function */ int autofs4_fill_super(struct super_block *, void *, int); diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c index 6a930b90d389..0571ec8352b7 100644 --- a/fs/autofs4/expire.c +++ b/fs/autofs4/expire.c @@ -300,6 +300,7 @@ struct dentry *autofs4_expire_direct(struct super_block *sb, spin_unlock(&root->d_lock); } ino->flags |= AUTOFS_INF_EXPIRING; + managed_dentry_set_automount(root); init_completion(&ino->expire_complete); spin_unlock(&sbi->fs_lock); return root; @@ -408,6 +409,7 @@ found: expired, (int)expired->d_name.len, expired->d_name.name); ino = autofs4_dentry_ino(expired); ino->flags |= AUTOFS_INF_EXPIRING; + managed_dentry_set_automount(expired); init_completion(&ino->expire_complete); spin_unlock(&sbi->fs_lock); spin_lock(&autofs4_lock); @@ -479,6 +481,8 @@ int autofs4_expire_run(struct super_block *sb, spin_lock(&sbi->fs_lock); ino = autofs4_dentry_ino(dentry); ino->flags &= ~AUTOFS_INF_EXPIRING; + if (!d_unhashed(dentry)) + managed_dentry_clear_automount(dentry); complete_all(&ino->expire_complete); spin_unlock(&sbi->fs_lock); @@ -516,6 +520,8 @@ int autofs4_do_expire_multi(struct super_block *sb, struct vfsmount *mnt, ino->flags &= ~AUTOFS_INF_MOUNTPOINT; } ino->flags &= ~AUTOFS_INF_EXPIRING; + if (ret) + managed_dentry_clear_automount(dentry); complete_all(&ino->expire_complete); spin_unlock(&sbi->fs_lock); dput(dentry); diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c index a7bdb9dcac84..d0aa38cac302 100644 --- a/fs/autofs4/inode.c +++ b/fs/autofs4/inode.c @@ -252,6 +252,7 @@ static struct autofs_info *autofs4_mkroot(struct autofs_sb_info *sbi) } static const struct dentry_operations autofs4_sb_dentry_operations = { + .d_automount = autofs4_d_automount, .d_release = autofs4_dentry_release, }; @@ -320,6 +321,9 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent) goto fail_dput; } + if (autofs_type_trigger(sbi->type)) + __managed_dentry_set_automount(root); + root_inode->i_fop = &autofs4_root_operations; root_inode->i_op = autofs_type_trigger(sbi->type) ? &autofs4_direct_root_inode_operations : diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c index 20225636a4e9..27dc53e111fd 100644 --- a/fs/autofs4/root.c +++ b/fs/autofs4/root.c @@ -35,7 +35,6 @@ static long autofs4_root_compat_ioctl(struct file *,unsigned int,unsigned long); #endif static int autofs4_dir_open(struct inode *inode, struct file *file); static struct dentry *autofs4_lookup(struct inode *,struct dentry *, struct nameidata *); -static void *autofs4_follow_link(struct dentry *, struct nameidata *); #define TRIGGER_FLAGS (LOOKUP_CONTINUE | LOOKUP_DIRECTORY) #define TRIGGER_INTENTS (LOOKUP_OPEN | LOOKUP_CREATE) @@ -73,7 +72,6 @@ const struct inode_operations autofs4_direct_root_inode_operations = { .unlink = autofs4_dir_unlink, .mkdir = autofs4_dir_mkdir, .rmdir = autofs4_dir_rmdir, - .follow_link = autofs4_follow_link, }; const struct inode_operations autofs4_dir_inode_operations = { @@ -420,13 +418,12 @@ void autofs4_dentry_release(struct dentry *de) /* For dentries of directories in the root dir */ static const struct dentry_operations autofs4_root_dentry_operations = { - .d_revalidate = autofs4_revalidate, .d_release = autofs4_dentry_release, }; /* For other dentries */ static const struct dentry_operations autofs4_dentry_operations = { - .d_revalidate = autofs4_revalidate, + .d_automount = autofs4_d_automount, .d_release = autofs4_dentry_release, }; @@ -540,50 +537,176 @@ next: return NULL; } +static int autofs4_mount_wait(struct dentry *dentry) +{ + struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); + struct autofs_info *ino = autofs4_dentry_ino(dentry); + int status; + + if (ino->flags & AUTOFS_INF_PENDING) { + DPRINTK("waiting for mount name=%.*s", + dentry->d_name.len, dentry->d_name.name); + status = autofs4_wait(sbi, dentry, NFY_MOUNT); + DPRINTK("mount wait done status=%d", status); + ino->last_used = jiffies; + return status; + } + return 0; +} + +static int do_expire_wait(struct dentry *dentry) +{ + struct dentry *expiring; + + expiring = autofs4_lookup_expiring(dentry); + if (!expiring) + return autofs4_expire_wait(dentry); + else { + /* + * If we are racing with expire the request might not + * be quite complete, but the directory has been removed + * so it must have been successful, just wait for it. + */ + autofs4_expire_wait(expiring); + autofs4_del_expiring(expiring); + dput(expiring); + } + return 0; +} + +static struct dentry *autofs4_mountpoint_changed(struct path *path) +{ + struct dentry *dentry = path->dentry; + struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); + + /* + * If this is an indirect mount the dentry could have gone away + * as a result of an expire and a new one created. + */ + if (autofs_type_indirect(sbi->type) && d_unhashed(dentry)) { + struct dentry *parent = dentry->d_parent; + struct dentry *new = d_lookup(parent, &dentry->d_name); + if (!new) + return NULL; + dput(path->dentry); + path->dentry = new; + } + return path->dentry; +} + +struct vfsmount *autofs4_d_automount(struct path *path) +{ + struct dentry *dentry = path->dentry; + struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); + struct autofs_info *ino = autofs4_dentry_ino(dentry); + int status; + + DPRINTK("dentry=%p %.*s", + dentry, dentry->d_name.len, dentry->d_name.name); + + /* The daemon never triggers a mount. */ + if (autofs4_oz_mode(sbi)) + return NULL; + + /* + * If an expire request is pending everyone must wait. + * If the expire fails we're still mounted so continue + * the follow and return. A return of -EAGAIN (which only + * happens with indirect mounts) means the expire completed + * and the directory was removed, so just go ahead and try + * the mount. + */ + status = do_expire_wait(dentry); + if (status && status != -EAGAIN) + return NULL; + + /* Callback to the daemon to perform the mount or wait */ + spin_lock(&sbi->fs_lock); + if (ino->flags & AUTOFS_INF_PENDING) { + spin_unlock(&sbi->fs_lock); + status = autofs4_mount_wait(dentry); + if (status) + return ERR_PTR(status); + spin_lock(&sbi->fs_lock); + goto done; + } + + /* + * If the dentry is a symlink it's equivalent to a directory + * having d_mounted() true, so there's no need to call back + * to the daemon. + */ + if (dentry->d_inode && S_ISLNK(dentry->d_inode->i_mode)) + goto done; + spin_lock(&dentry->d_lock); + if (!d_mountpoint(dentry) && list_empty(&dentry->d_subdirs)) { + ino->flags |= AUTOFS_INF_PENDING; + spin_unlock(&dentry->d_lock); + spin_unlock(&sbi->fs_lock); + status = autofs4_mount_wait(dentry); + if (status) + return ERR_PTR(status); + spin_lock(&sbi->fs_lock); + ino->flags &= ~AUTOFS_INF_PENDING; + goto done; + } + spin_unlock(&dentry->d_lock); +done: + /* + * Any needed mounting has been completed and the path updated + * so turn this into a normal dentry so we don't continually + * call ->d_automount(). + */ + managed_dentry_clear_automount(dentry); + spin_unlock(&sbi->fs_lock); + + /* Mount succeeded, check if we ended up with a new dentry */ + dentry = autofs4_mountpoint_changed(path); + if (!dentry) + return ERR_PTR(-ENOENT); + + return NULL; +} + /* Lookups in the root directory */ static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { struct autofs_sb_info *sbi; struct autofs_info *ino; - struct dentry *expiring, *active; - int oz_mode; + struct dentry *active; - DPRINTK("name = %.*s", - dentry->d_name.len, dentry->d_name.name); + DPRINTK("name = %.*s", dentry->d_name.len, dentry->d_name.name); /* File name too long to exist */ if (dentry->d_name.len > NAME_MAX) return ERR_PTR(-ENAMETOOLONG); sbi = autofs4_sbi(dir->i_sb); - oz_mode = autofs4_oz_mode(sbi); DPRINTK("pid = %u, pgrp = %u, catatonic = %d, oz_mode = %d", - current->pid, task_pgrp_nr(current), sbi->catatonic, oz_mode); + current->pid, task_pgrp_nr(current), sbi->catatonic, oz_mode); active = autofs4_lookup_active(dentry); if (active) { - dentry = active; - ino = autofs4_dentry_ino(dentry); + return active; } else { - /* - * Mark the dentry incomplete but don't hash it. We do this - * to serialize our inode creation operations (symlink and - * mkdir) which prevents deadlock during the callback to - * the daemon. Subsequent user space lookups for the same - * dentry are placed on the wait queue while the daemon - * itself is allowed passage unresticted so the create - * operation itself can then hash the dentry. Finally, - * we check for the hashed dentry and return the newly - * hashed dentry. - */ d_set_d_op(dentry, &autofs4_root_dentry_operations); /* - * And we need to ensure that the same dentry is used for - * all following lookup calls until it is hashed so that - * the dentry flags are persistent throughout the request. + * A dentry that is not within the root can never trigger a + * mount operation, unless the directory already exists, so we + * can return fail immediately. The daemon however does need + * to create directories within the file system. */ + if (!autofs4_oz_mode(sbi) && !IS_ROOT(dentry->d_parent)) + return ERR_PTR(-ENOENT); + + /* Mark entries in the root as mount triggers */ + if (autofs_type_indirect(sbi->type) && IS_ROOT(dentry->d_parent)) { + d_set_d_op(dentry, &autofs4_dentry_operations); + managed_dentry_set_automount(dentry); + } + ino = autofs4_init_ino(NULL, sbi, 0555); if (!ino) return ERR_PTR(-ENOMEM); @@ -595,82 +718,6 @@ static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, s d_instantiate(dentry, NULL); } - - if (!oz_mode) { - mutex_unlock(&dir->i_mutex); - expiring = autofs4_lookup_expiring(dentry); - if (expiring) { - /* - * If we are racing with expire the request might not - * be quite complete but the directory has been removed - * so it must have been successful, so just wait for it. - */ - autofs4_expire_wait(expiring); - autofs4_del_expiring(expiring); - dput(expiring); - } - - spin_lock(&sbi->fs_lock); - ino->flags |= AUTOFS_INF_PENDING; - spin_unlock(&sbi->fs_lock); - if (dentry->d_op && dentry->d_op->d_revalidate) - (dentry->d_op->d_revalidate)(dentry, nd); - mutex_lock(&dir->i_mutex); - } - - /* - * If we are still pending, check if we had to handle - * a signal. If so we can force a restart.. - */ - if (ino->flags & AUTOFS_INF_PENDING) { - /* See if we were interrupted */ - if (signal_pending(current)) { - sigset_t *sigset = ¤t->pending.signal; - if (sigismember (sigset, SIGKILL) || - sigismember (sigset, SIGQUIT) || - sigismember (sigset, SIGINT)) { - if (active) - dput(active); - return ERR_PTR(-ERESTARTNOINTR); - } - } - if (!oz_mode) { - spin_lock(&sbi->fs_lock); - ino->flags &= ~AUTOFS_INF_PENDING; - spin_unlock(&sbi->fs_lock); - } - } - - /* - * If this dentry is unhashed, then we shouldn't honour this - * lookup. Returning ENOENT here doesn't do the right thing - * for all system calls, but it should be OK for the operations - * we permit from an autofs. - */ - if (!oz_mode && d_unhashed(dentry)) { - /* - * A user space application can (and has done in the past) - * remove and re-create this directory during the callback. - * This can leave us with an unhashed dentry, but a - * successful mount! So we need to perform another - * cached lookup in case the dentry now exists. - */ - struct dentry *parent = dentry->d_parent; - struct dentry *new = d_lookup(parent, &dentry->d_name); - if (new != NULL) - dentry = new; - else - dentry = ERR_PTR(-ENOENT); - - if (active) - dput(active); - - return dentry; - } - - if (active) - return active; - return NULL; } @@ -715,11 +762,6 @@ static int autofs4_dir_symlink(struct inode *dir, } d_add(dentry, inode); - if (dir == dir->i_sb->s_root->d_inode) - d_set_d_op(dentry, &autofs4_root_dentry_operations); - else - d_set_d_op(dentry, &autofs4_dentry_operations); - dentry->d_fsdata = ino; ino->dentry = dget(dentry); atomic_inc(&ino->count); @@ -850,11 +892,6 @@ static int autofs4_dir_mkdir(struct inode *dir, struct dentry *dentry, int mode) } d_add(dentry, inode); - if (dir == dir->i_sb->s_root->d_inode) - d_set_d_op(dentry, &autofs4_root_dentry_operations); - else - d_set_d_op(dentry, &autofs4_dentry_operations); - dentry->d_fsdata = ino; ino->dentry = dget(dentry); atomic_inc(&ino->count); -- cgit v1.2.2 From b5b801779d59165c4ecf1009009109545bd1f642 Mon Sep 17 00:00:00 2001 From: Ian Kent Date: Fri, 14 Jan 2011 18:46:03 +0000 Subject: autofs4: Add d_manage() dentry operation This patch required a previous patch to add the ->d_automount() dentry operation. Add a function to use the newly defined ->d_manage() dentry operation for blocking during mount and expire. Whether the VFS calls the dentry operations d_automount() and d_manage() is controled by the DMANAGED_AUTOMOUNT and DMANAGED_TRANSIT flags. autofs uses the d_automount() operation to callback to user space to request mount operations and the d_manage() operation to block walks into mounts that are under construction or destruction. In order to prevent these functions from being called unnecessarily the DMANAGED_* flags are cleared for cases which would cause this. In the common case the DMANAGED_AUTOMOUNT and DMANAGED_TRANSIT flags are both set for dentrys waiting to be mounted. The DMANAGED_TRANSIT flag is cleared upon successful mount request completion and set during expire runs, both during the dentry expire check, and if selected for expire, is left set until a subsequent successful mount request completes. The exception to this is the so-called rootless multi-mount which has no actual mount at its base. In this case the DMANAGED_AUTOMOUNT flag is cleared upon successful mount request completion as well and set again after a successful expire. Signed-off-by: Ian Kent Signed-off-by: David Howells Signed-off-by: Al Viro --- fs/autofs4/autofs_i.h | 50 ++++++++++++++++++++++++++- fs/autofs4/expire.c | 51 +++++++++++++-------------- fs/autofs4/inode.c | 3 +- fs/autofs4/root.c | 95 ++++++++++++++++++++++++++++++++++++++++++++------- 4 files changed, 159 insertions(+), 40 deletions(-) (limited to 'fs') diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h index 1ebfe53872b5..f0c95e0460cf 100644 --- a/fs/autofs4/autofs_i.h +++ b/fs/autofs4/autofs_i.h @@ -99,7 +99,6 @@ struct autofs_info { }; #define AUTOFS_INF_EXPIRING (1<<0) /* dentry is in the process of expiring */ -#define AUTOFS_INF_MOUNTPOINT (1<<1) /* mountpoint status for direct expire */ #define AUTOFS_INF_PENDING (1<<2) /* dentry pending mount */ struct autofs_wait_queue { @@ -221,6 +220,7 @@ extern const struct file_operations autofs4_root_operations; /* Operations methods */ struct vfsmount *autofs4_d_automount(struct path *); +int autofs4_d_manage(struct dentry *, bool); /* VFS automount flags management functions */ @@ -248,6 +248,54 @@ static inline void managed_dentry_clear_automount(struct dentry *dentry) spin_unlock(&dentry->d_lock); } +static inline void __managed_dentry_set_transit(struct dentry *dentry) +{ + dentry->d_flags |= DCACHE_MANAGE_TRANSIT; +} + +static inline void managed_dentry_set_transit(struct dentry *dentry) +{ + spin_lock(&dentry->d_lock); + __managed_dentry_set_transit(dentry); + spin_unlock(&dentry->d_lock); +} + +static inline void __managed_dentry_clear_transit(struct dentry *dentry) +{ + dentry->d_flags &= ~DCACHE_MANAGE_TRANSIT; +} + +static inline void managed_dentry_clear_transit(struct dentry *dentry) +{ + spin_lock(&dentry->d_lock); + __managed_dentry_clear_transit(dentry); + spin_unlock(&dentry->d_lock); +} + +static inline void __managed_dentry_set_managed(struct dentry *dentry) +{ + dentry->d_flags |= (DCACHE_NEED_AUTOMOUNT|DCACHE_MANAGE_TRANSIT); +} + +static inline void managed_dentry_set_managed(struct dentry *dentry) +{ + spin_lock(&dentry->d_lock); + __managed_dentry_set_managed(dentry); + spin_unlock(&dentry->d_lock); +} + +static inline void __managed_dentry_clear_managed(struct dentry *dentry) +{ + dentry->d_flags &= ~(DCACHE_NEED_AUTOMOUNT|DCACHE_MANAGE_TRANSIT); +} + +static inline void managed_dentry_clear_managed(struct dentry *dentry) +{ + spin_lock(&dentry->d_lock); + __managed_dentry_clear_managed(dentry); + spin_unlock(&dentry->d_lock); +} + /* Initializing function */ int autofs4_fill_super(struct super_block *, void *, int); diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c index 0571ec8352b7..3ed79d76c233 100644 --- a/fs/autofs4/expire.c +++ b/fs/autofs4/expire.c @@ -26,10 +26,6 @@ static inline int autofs4_can_expire(struct dentry *dentry, if (ino == NULL) return 0; - /* No point expiring a pending mount */ - if (ino->flags & AUTOFS_INF_PENDING) - return 0; - if (!do_now) { /* Too young to die */ if (!timeout || time_after(ino->last_used + timeout, now)) @@ -283,6 +279,7 @@ struct dentry *autofs4_expire_direct(struct super_block *sb, unsigned long timeout; struct dentry *root = dget(sb->s_root); int do_now = how & AUTOFS_EXP_IMMEDIATE; + struct autofs_info *ino; if (!root) return NULL; @@ -291,20 +288,21 @@ struct dentry *autofs4_expire_direct(struct super_block *sb, timeout = sbi->exp_timeout; spin_lock(&sbi->fs_lock); + ino = autofs4_dentry_ino(root); + /* No point expiring a pending mount */ + if (ino->flags & AUTOFS_INF_PENDING) { + spin_unlock(&sbi->fs_lock); + return NULL; + } + managed_dentry_set_transit(root); if (!autofs4_direct_busy(mnt, root, timeout, do_now)) { struct autofs_info *ino = autofs4_dentry_ino(root); - if (d_mountpoint(root)) { - ino->flags |= AUTOFS_INF_MOUNTPOINT; - spin_lock(&root->d_lock); - root->d_flags &= ~DCACHE_MOUNTED; - spin_unlock(&root->d_lock); - } ino->flags |= AUTOFS_INF_EXPIRING; - managed_dentry_set_automount(root); init_completion(&ino->expire_complete); spin_unlock(&sbi->fs_lock); return root; } + managed_dentry_clear_transit(root); spin_unlock(&sbi->fs_lock); dput(root); @@ -341,6 +339,10 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb, while ((dentry = get_next_positive_dentry(dentry, root))) { spin_lock(&sbi->fs_lock); ino = autofs4_dentry_ino(dentry); + /* No point expiring a pending mount */ + if (ino->flags & AUTOFS_INF_PENDING) + goto cont; + managed_dentry_set_transit(dentry); /* * Case 1: (i) indirect mount or top level pseudo direct mount @@ -400,6 +402,8 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb, } } next: + managed_dentry_clear_transit(dentry); +cont: spin_unlock(&sbi->fs_lock); } return NULL; @@ -409,7 +413,6 @@ found: expired, (int)expired->d_name.len, expired->d_name.name); ino = autofs4_dentry_ino(expired); ino->flags |= AUTOFS_INF_EXPIRING; - managed_dentry_set_automount(expired); init_completion(&ino->expire_complete); spin_unlock(&sbi->fs_lock); spin_lock(&autofs4_lock); @@ -482,7 +485,7 @@ int autofs4_expire_run(struct super_block *sb, ino = autofs4_dentry_ino(dentry); ino->flags &= ~AUTOFS_INF_EXPIRING; if (!d_unhashed(dentry)) - managed_dentry_clear_automount(dentry); + managed_dentry_clear_transit(dentry); complete_all(&ino->expire_complete); spin_unlock(&sbi->fs_lock); @@ -508,20 +511,18 @@ int autofs4_do_expire_multi(struct super_block *sb, struct vfsmount *mnt, ret = autofs4_wait(sbi, dentry, NFY_EXPIRE); spin_lock(&sbi->fs_lock); - if (ino->flags & AUTOFS_INF_MOUNTPOINT) { - spin_lock(&sb->s_root->d_lock); - /* - * If we haven't been expired away, then reset - * mounted status. - */ - if (mnt->mnt_parent != mnt) - sb->s_root->d_flags |= DCACHE_MOUNTED; - spin_unlock(&sb->s_root->d_lock); - ino->flags &= ~AUTOFS_INF_MOUNTPOINT; - } ino->flags &= ~AUTOFS_INF_EXPIRING; + spin_lock(&dentry->d_lock); if (ret) - managed_dentry_clear_automount(dentry); + __managed_dentry_clear_transit(dentry); + else { + if ((IS_ROOT(dentry) || + (autofs_type_indirect(sbi->type) && + IS_ROOT(dentry->d_parent))) && + !(dentry->d_flags & DCACHE_NEED_AUTOMOUNT)) + __managed_dentry_set_automount(dentry); + } + spin_unlock(&dentry->d_lock); complete_all(&ino->expire_complete); spin_unlock(&sbi->fs_lock); dput(dentry); diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c index d0aa38cac302..75c1ed8e2fb9 100644 --- a/fs/autofs4/inode.c +++ b/fs/autofs4/inode.c @@ -253,6 +253,7 @@ static struct autofs_info *autofs4_mkroot(struct autofs_sb_info *sbi) static const struct dentry_operations autofs4_sb_dentry_operations = { .d_automount = autofs4_d_automount, + .d_manage = autofs4_d_manage, .d_release = autofs4_dentry_release, }; @@ -322,7 +323,7 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent) } if (autofs_type_trigger(sbi->type)) - __managed_dentry_set_automount(root); + __managed_dentry_set_managed(root); root_inode->i_fop = &autofs4_root_operations; root_inode->i_op = autofs_type_trigger(sbi->type) ? diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c index 27dc53e111fd..f1076b91a0fa 100644 --- a/fs/autofs4/root.c +++ b/fs/autofs4/root.c @@ -424,6 +424,7 @@ static const struct dentry_operations autofs4_root_dentry_operations = { /* For other dentries */ static const struct dentry_operations autofs4_dentry_operations = { .d_automount = autofs4_d_automount, + .d_manage = autofs4_d_manage, .d_release = autofs4_dentry_release, }; @@ -604,6 +605,18 @@ struct vfsmount *autofs4_d_automount(struct path *path) DPRINTK("dentry=%p %.*s", dentry, dentry->d_name.len, dentry->d_name.name); + /* + * Someone may have manually umounted this or it was a submount + * that has gone away. + */ + spin_lock(&dentry->d_lock); + if (!d_mountpoint(dentry) && list_empty(&dentry->d_subdirs)) { + if (!(dentry->d_flags & DCACHE_MANAGE_TRANSIT) && + (dentry->d_flags & DCACHE_NEED_AUTOMOUNT)) + __managed_dentry_set_transit(path->dentry); + } + spin_unlock(&dentry->d_lock); + /* The daemon never triggers a mount. */ if (autofs4_oz_mode(sbi)) return NULL; @@ -633,31 +646,63 @@ struct vfsmount *autofs4_d_automount(struct path *path) /* * If the dentry is a symlink it's equivalent to a directory - * having d_mounted() true, so there's no need to call back + * having d_mountpoint() true, so there's no need to call back * to the daemon. */ if (dentry->d_inode && S_ISLNK(dentry->d_inode->i_mode)) goto done; - spin_lock(&dentry->d_lock); - if (!d_mountpoint(dentry) && list_empty(&dentry->d_subdirs)) { + if (!d_mountpoint(dentry)) { + /* + * It's possible that user space hasn't removed directories + * after umounting a rootless multi-mount, although it + * should. For v5 have_submounts() is sufficient to handle + * this because the leaves of the directory tree under the + * mount never trigger mounts themselves (they have an autofs + * trigger mount mounted on them). But v4 pseudo direct mounts + * do need the leaves to to trigger mounts. In this case we + * have no choice but to use the list_empty() check and + * require user space behave. + */ + if (sbi->version > 4) { + if (have_submounts(dentry)) + goto done; + } else { + spin_lock(&dentry->d_lock); + if (!list_empty(&dentry->d_subdirs)) { + spin_unlock(&dentry->d_lock); + goto done; + } + spin_unlock(&dentry->d_lock); + } ino->flags |= AUTOFS_INF_PENDING; - spin_unlock(&dentry->d_lock); spin_unlock(&sbi->fs_lock); status = autofs4_mount_wait(dentry); if (status) return ERR_PTR(status); spin_lock(&sbi->fs_lock); ino->flags &= ~AUTOFS_INF_PENDING; - goto done; } - spin_unlock(&dentry->d_lock); done: - /* - * Any needed mounting has been completed and the path updated - * so turn this into a normal dentry so we don't continually - * call ->d_automount(). - */ - managed_dentry_clear_automount(dentry); + if (!(ino->flags & AUTOFS_INF_EXPIRING)) { + /* + * Any needed mounting has been completed and the path updated + * so turn this into a normal dentry so we don't continually + * call ->d_automount() and ->d_manage(). + */ + spin_lock(&dentry->d_lock); + __managed_dentry_clear_transit(dentry); + /* + * Only clear DMANAGED_AUTOMOUNT for rootless multi-mounts and + * symlinks as in all other cases the dentry will be covered by + * an actual mount so ->d_automount() won't be called during + * the follow. + */ + if ((!d_mountpoint(dentry) && + !list_empty(&dentry->d_subdirs)) || + (dentry->d_inode && S_ISLNK(dentry->d_inode->i_mode))) + __managed_dentry_clear_automount(dentry); + spin_unlock(&dentry->d_lock); + } spin_unlock(&sbi->fs_lock); /* Mount succeeded, check if we ended up with a new dentry */ @@ -668,6 +713,30 @@ done: return NULL; } +int autofs4_d_manage(struct dentry *dentry, bool mounting_here) +{ + struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); + + DPRINTK("dentry=%p %.*s", + dentry, dentry->d_name.len, dentry->d_name.name); + + /* The daemon never waits. */ + if (autofs4_oz_mode(sbi) || mounting_here) { + if (!d_mountpoint(dentry)) + return -EISDIR; + return 0; + } + + /* Wait for pending expires */ + do_expire_wait(dentry); + + /* + * This dentry may be under construction so wait on mount + * completion. + */ + return autofs4_mount_wait(dentry); +} + /* Lookups in the root directory */ static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { @@ -704,7 +773,7 @@ static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, s /* Mark entries in the root as mount triggers */ if (autofs_type_indirect(sbi->type) && IS_ROOT(dentry->d_parent)) { d_set_d_op(dentry, &autofs4_dentry_operations); - managed_dentry_set_automount(dentry); + __managed_dentry_set_managed(dentry); } ino = autofs4_init_ino(NULL, sbi, 0555); -- cgit v1.2.2 From 8c13a676d5a56495c350f3141824a5ef6c6b4606 Mon Sep 17 00:00:00 2001 From: Ian Kent Date: Fri, 14 Jan 2011 18:46:08 +0000 Subject: autofs4: Remove unused code Remove code that is not used due to the use of ->d_automount() and ->d_manage(). Signed-off-by: Ian Kent Signed-off-by: David Howells Signed-off-by: Al Viro --- fs/autofs4/autofs_i.h | 7 -- fs/autofs4/root.c | 243 -------------------------------------------------- 2 files changed, 250 deletions(-) (limited to 'fs') diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h index f0c95e0460cf..1ee3b9afbe9e 100644 --- a/fs/autofs4/autofs_i.h +++ b/fs/autofs4/autofs_i.h @@ -175,13 +175,6 @@ static inline int autofs4_ispending(struct dentry *dentry) return 0; } -static inline void autofs4_copy_atime(struct file *src, struct file *dst) -{ - dst->f_path.dentry->d_inode->i_atime = - src->f_path.dentry->d_inode->i_atime; - return; -} - struct inode *autofs4_get_inode(struct super_block *, struct autofs_info *); void autofs4_free_ino(struct autofs_info *); diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c index f1076b91a0fa..b2498c8cb0a7 100644 --- a/fs/autofs4/root.c +++ b/fs/autofs4/root.c @@ -36,9 +36,6 @@ static long autofs4_root_compat_ioctl(struct file *,unsigned int,unsigned long); static int autofs4_dir_open(struct inode *inode, struct file *file); static struct dentry *autofs4_lookup(struct inode *,struct dentry *, struct nameidata *); -#define TRIGGER_FLAGS (LOOKUP_CONTINUE | LOOKUP_DIRECTORY) -#define TRIGGER_INTENTS (LOOKUP_OPEN | LOOKUP_CREATE) - const struct file_operations autofs4_root_operations = { .open = dcache_dir_open, .release = dcache_dir_close, @@ -114,14 +111,6 @@ static void autofs4_del_active(struct dentry *dentry) return; } -static unsigned int autofs4_need_mount(unsigned int flags) -{ - unsigned int res = 0; - if (flags & (TRIGGER_FLAGS | TRIGGER_INTENTS)) - res = 1; - return res; -} - static int autofs4_dir_open(struct inode *inode, struct file *file) { struct dentry *dentry = file->f_path.dentry; @@ -156,238 +145,6 @@ out: return dcache_dir_open(inode, file); } -static int try_to_fill_dentry(struct dentry *dentry, int flags) -{ - struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); - struct autofs_info *ino = autofs4_dentry_ino(dentry); - int status; - - DPRINTK("dentry=%p %.*s ino=%p", - dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode); - - /* - * Wait for a pending mount, triggering one if there - * isn't one already - */ - if (dentry->d_inode == NULL) { - DPRINTK("waiting for mount name=%.*s", - dentry->d_name.len, dentry->d_name.name); - - status = autofs4_wait(sbi, dentry, NFY_MOUNT); - - DPRINTK("mount done status=%d", status); - - /* Turn this into a real negative dentry? */ - if (status == -ENOENT) { - spin_lock(&sbi->fs_lock); - ino->flags &= ~AUTOFS_INF_PENDING; - spin_unlock(&sbi->fs_lock); - return status; - } else if (status) { - /* Return a negative dentry, but leave it "pending" */ - return status; - } - /* Trigger mount for path component or follow link */ - } else if (ino->flags & AUTOFS_INF_PENDING || - autofs4_need_mount(flags)) { - DPRINTK("waiting for mount name=%.*s", - dentry->d_name.len, dentry->d_name.name); - - spin_lock(&sbi->fs_lock); - ino->flags |= AUTOFS_INF_PENDING; - spin_unlock(&sbi->fs_lock); - status = autofs4_wait(sbi, dentry, NFY_MOUNT); - - DPRINTK("mount done status=%d", status); - - if (status) { - spin_lock(&sbi->fs_lock); - ino->flags &= ~AUTOFS_INF_PENDING; - spin_unlock(&sbi->fs_lock); - return status; - } - } - - /* Initialize expiry counter after successful mount */ - ino->last_used = jiffies; - - spin_lock(&sbi->fs_lock); - ino->flags &= ~AUTOFS_INF_PENDING; - spin_unlock(&sbi->fs_lock); - - return 0; -} - -/* For autofs direct mounts the follow link triggers the mount */ -static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd) -{ - struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); - struct autofs_info *ino = autofs4_dentry_ino(dentry); - int oz_mode = autofs4_oz_mode(sbi); - unsigned int lookup_type; - int status; - - DPRINTK("dentry=%p %.*s oz_mode=%d nd->flags=%d", - dentry, dentry->d_name.len, dentry->d_name.name, oz_mode, - nd->flags); - /* - * For an expire of a covered direct or offset mount we need - * to break out of follow_down_one() at the autofs mount trigger - * (d_mounted--), so we can see the expiring flag, and manage - * the blocking and following here until the expire is completed. - */ - if (oz_mode) { - spin_lock(&sbi->fs_lock); - if (ino->flags & AUTOFS_INF_EXPIRING) { - spin_unlock(&sbi->fs_lock); - /* Follow down to our covering mount. */ - if (!follow_down_one(&nd->path)) - goto done; - goto follow; - } - spin_unlock(&sbi->fs_lock); - goto done; - } - - /* If an expire request is pending everyone must wait. */ - autofs4_expire_wait(dentry); - - /* We trigger a mount for almost all flags */ - lookup_type = autofs4_need_mount(nd->flags); - spin_lock(&sbi->fs_lock); - spin_lock(&autofs4_lock); - spin_lock(&dentry->d_lock); - if (!(lookup_type || ino->flags & AUTOFS_INF_PENDING)) { - spin_unlock(&dentry->d_lock); - spin_unlock(&autofs4_lock); - spin_unlock(&sbi->fs_lock); - goto follow; - } - - /* - * If the dentry contains directories then it is an autofs - * multi-mount with no root mount offset. So don't try to - * mount it again. - */ - if (ino->flags & AUTOFS_INF_PENDING || - (!d_mountpoint(dentry) && list_empty(&dentry->d_subdirs))) { - spin_unlock(&dentry->d_lock); - spin_unlock(&autofs4_lock); - spin_unlock(&sbi->fs_lock); - - status = try_to_fill_dentry(dentry, nd->flags); - if (status) - goto out_error; - - goto follow; - } - spin_unlock(&dentry->d_lock); - spin_unlock(&autofs4_lock); - spin_unlock(&sbi->fs_lock); -follow: - /* - * If there is no root mount it must be an autofs - * multi-mount with no root offset so we don't need - * to follow it. - */ - if (d_managed(dentry)) { - status = follow_down(&nd->path, false); - if (status < 0) - goto out_error; - } - -done: - return NULL; - -out_error: - path_put(&nd->path); - return ERR_PTR(status); -} - -/* - * Revalidate is called on every cache lookup. Some of those - * cache lookups may actually happen while the dentry is not - * yet completely filled in, and revalidate has to delay such - * lookups.. - */ -static int autofs4_revalidate(struct dentry *dentry, struct nameidata *nd) -{ - struct inode *dir; - struct autofs_sb_info *sbi; - int oz_mode; - int flags = nd ? nd->flags : 0; - int status = 1; - - if (flags & LOOKUP_RCU) - return -ECHILD; - - dir = dentry->d_parent->d_inode; - sbi = autofs4_sbi(dir->i_sb); - oz_mode = autofs4_oz_mode(sbi); - - /* Pending dentry */ - spin_lock(&sbi->fs_lock); - if (autofs4_ispending(dentry)) { - /* The daemon never causes a mount to trigger */ - spin_unlock(&sbi->fs_lock); - - if (oz_mode) - return 1; - - /* - * If the directory has gone away due to an expire - * we have been called as ->d_revalidate() and so - * we need to return false and proceed to ->lookup(). - */ - if (autofs4_expire_wait(dentry) == -EAGAIN) - return 0; - - /* - * A zero status is success otherwise we have a - * negative error code. - */ - status = try_to_fill_dentry(dentry, flags); - if (status == 0) - return 1; - - return status; - } - spin_unlock(&sbi->fs_lock); - - /* Negative dentry.. invalidate if "old" */ - if (dentry->d_inode == NULL) - return 0; - - /* Check for a non-mountpoint directory with no contents */ - spin_lock(&autofs4_lock); - spin_lock(&dentry->d_lock); - if (S_ISDIR(dentry->d_inode->i_mode) && - !d_mountpoint(dentry) && list_empty(&dentry->d_subdirs)) { - DPRINTK("dentry=%p %.*s, emptydir", - dentry, dentry->d_name.len, dentry->d_name.name); - spin_unlock(&dentry->d_lock); - spin_unlock(&autofs4_lock); - - /* The daemon never causes a mount to trigger */ - if (oz_mode) - return 1; - - /* - * A zero status is success otherwise we have a - * negative error code. - */ - status = try_to_fill_dentry(dentry, flags); - if (status == 0) - return 1; - - return status; - } - spin_unlock(&dentry->d_lock); - spin_unlock(&autofs4_lock); - - return 1; -} - void autofs4_dentry_release(struct dentry *de) { struct autofs_info *inf; -- cgit v1.2.2 From e61da20a50d21725ff27571a6dff9468e4fb7146 Mon Sep 17 00:00:00 2001 From: Ian Kent Date: Fri, 14 Jan 2011 18:46:14 +0000 Subject: autofs4: Clean up inode operations Since the use of ->follow_link() has been eliminated there is no need to separate the indirect and direct inode operations. Signed-off-by: Ian Kent Signed-off-by: David Howells Signed-off-by: Al Viro --- fs/autofs4/autofs_i.h | 3 --- fs/autofs4/inode.c | 4 +--- fs/autofs4/root.c | 15 --------------- 3 files changed, 1 insertion(+), 21 deletions(-) (limited to 'fs') diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h index 1ee3b9afbe9e..f4b4030cf406 100644 --- a/fs/autofs4/autofs_i.h +++ b/fs/autofs4/autofs_i.h @@ -204,9 +204,6 @@ void autofs_dev_ioctl_exit(void); extern const struct inode_operations autofs4_symlink_inode_operations; extern const struct inode_operations autofs4_dir_inode_operations; -extern const struct inode_operations autofs4_root_inode_operations; -extern const struct inode_operations autofs4_indirect_root_inode_operations; -extern const struct inode_operations autofs4_direct_root_inode_operations; extern const struct file_operations autofs4_dir_operations; extern const struct file_operations autofs4_root_operations; diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c index 75c1ed8e2fb9..dac3dc79ccb4 100644 --- a/fs/autofs4/inode.c +++ b/fs/autofs4/inode.c @@ -326,9 +326,7 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent) __managed_dentry_set_managed(root); root_inode->i_fop = &autofs4_root_operations; - root_inode->i_op = autofs_type_trigger(sbi->type) ? - &autofs4_direct_root_inode_operations : - &autofs4_indirect_root_inode_operations; + root_inode->i_op = &autofs4_dir_inode_operations; /* Couldn't this be tested earlier? */ if (sbi->max_proto < AUTOFS_MIN_PROTO_VERSION || diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c index b2498c8cb0a7..52af410a831d 100644 --- a/fs/autofs4/root.c +++ b/fs/autofs4/root.c @@ -56,21 +56,6 @@ const struct file_operations autofs4_dir_operations = { .llseek = dcache_dir_lseek, }; -const struct inode_operations autofs4_indirect_root_inode_operations = { - .lookup = autofs4_lookup, - .unlink = autofs4_dir_unlink, - .symlink = autofs4_dir_symlink, - .mkdir = autofs4_dir_mkdir, - .rmdir = autofs4_dir_rmdir, -}; - -const struct inode_operations autofs4_direct_root_inode_operations = { - .lookup = autofs4_lookup, - .unlink = autofs4_dir_unlink, - .mkdir = autofs4_dir_mkdir, - .rmdir = autofs4_dir_rmdir, -}; - const struct inode_operations autofs4_dir_inode_operations = { .lookup = autofs4_lookup, .unlink = autofs4_dir_unlink, -- cgit v1.2.2 From 71e469db242c2eeb00faf9caf7d9e00150c00a6e Mon Sep 17 00:00:00 2001 From: Ian Kent Date: Fri, 14 Jan 2011 18:46:19 +0000 Subject: autofs4: Clean up dentry operations There are now two distinct dentry operations uses. One for dentrys that trigger mounts and one for dentrys that do not. Rationalize the use of these dentry operations and rename them to reflect their function. Signed-off-by: Ian Kent Signed-off-by: David Howells Signed-off-by: Al Viro --- fs/autofs4/autofs_i.h | 7 ++----- fs/autofs4/inode.c | 12 ++++-------- fs/autofs4/root.c | 36 ++++++++++++++++++++---------------- 3 files changed, 26 insertions(+), 29 deletions(-) (limited to 'fs') diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h index f4b4030cf406..c28085cb82a5 100644 --- a/fs/autofs4/autofs_i.h +++ b/fs/autofs4/autofs_i.h @@ -206,11 +206,8 @@ extern const struct inode_operations autofs4_symlink_inode_operations; extern const struct inode_operations autofs4_dir_inode_operations; extern const struct file_operations autofs4_dir_operations; extern const struct file_operations autofs4_root_operations; - -/* Operations methods */ - -struct vfsmount *autofs4_d_automount(struct path *); -int autofs4_d_manage(struct dentry *, bool); +extern const struct dentry_operations autofs4_dentry_operations; +extern const struct dentry_operations autofs4_mount_dentry_operations; /* VFS automount flags management functions */ diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c index dac3dc79ccb4..427c35746340 100644 --- a/fs/autofs4/inode.c +++ b/fs/autofs4/inode.c @@ -251,12 +251,6 @@ static struct autofs_info *autofs4_mkroot(struct autofs_sb_info *sbi) return ino; } -static const struct dentry_operations autofs4_sb_dentry_operations = { - .d_automount = autofs4_d_automount, - .d_manage = autofs4_d_manage, - .d_release = autofs4_dentry_release, -}; - int autofs4_fill_super(struct super_block *s, void *data, int silent) { struct inode * root_inode; @@ -311,7 +305,7 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent) goto fail_iput; pipe = NULL; - d_set_d_op(root, &autofs4_sb_dentry_operations); + d_set_d_op(root, &autofs4_dentry_operations); root->d_fsdata = ino; /* Can this call block? */ @@ -322,8 +316,10 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent) goto fail_dput; } - if (autofs_type_trigger(sbi->type)) + if (autofs_type_trigger(sbi->type)) { + d_set_d_op(root, &autofs4_mount_dentry_operations); __managed_dentry_set_managed(root); + } root_inode->i_fop = &autofs4_root_operations; root_inode->i_op = &autofs4_dir_inode_operations; diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c index 52af410a831d..62c1229a4d31 100644 --- a/fs/autofs4/root.c +++ b/fs/autofs4/root.c @@ -35,6 +35,8 @@ static long autofs4_root_compat_ioctl(struct file *,unsigned int,unsigned long); #endif static int autofs4_dir_open(struct inode *inode, struct file *file); static struct dentry *autofs4_lookup(struct inode *,struct dentry *, struct nameidata *); +static struct vfsmount *autofs4_d_automount(struct path *); +static int autofs4_d_manage(struct dentry *, bool); const struct file_operations autofs4_root_operations = { .open = dcache_dir_open, @@ -64,6 +66,18 @@ const struct inode_operations autofs4_dir_inode_operations = { .rmdir = autofs4_dir_rmdir, }; +/* For dentries that don't initiate mounting */ +const struct dentry_operations autofs4_dentry_operations = { + .d_release = autofs4_dentry_release, +}; + +/* For dentries that do initiate mounting */ +const struct dentry_operations autofs4_mount_dentry_operations = { + .d_automount = autofs4_d_automount, + .d_manage = autofs4_d_manage, + .d_release = autofs4_dentry_release, +}; + static void autofs4_add_active(struct dentry *dentry) { struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); @@ -158,18 +172,6 @@ void autofs4_dentry_release(struct dentry *de) } } -/* For dentries of directories in the root dir */ -static const struct dentry_operations autofs4_root_dentry_operations = { - .d_release = autofs4_dentry_release, -}; - -/* For other dentries */ -static const struct dentry_operations autofs4_dentry_operations = { - .d_automount = autofs4_d_automount, - .d_manage = autofs4_d_manage, - .d_release = autofs4_dentry_release, -}; - static struct dentry *autofs4_lookup_active(struct dentry *dentry) { struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); @@ -337,7 +339,7 @@ static struct dentry *autofs4_mountpoint_changed(struct path *path) return path->dentry; } -struct vfsmount *autofs4_d_automount(struct path *path) +static struct vfsmount *autofs4_d_automount(struct path *path) { struct dentry *dentry = path->dentry; struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); @@ -501,7 +503,7 @@ static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, s if (active) { return active; } else { - d_set_d_op(dentry, &autofs4_root_dentry_operations); + d_set_d_op(dentry, &autofs4_dentry_operations); /* * A dentry that is not within the root can never trigger a @@ -514,7 +516,7 @@ static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, s /* Mark entries in the root as mount triggers */ if (autofs_type_indirect(sbi->type) && IS_ROOT(dentry->d_parent)) { - d_set_d_op(dentry, &autofs4_dentry_operations); + d_set_d_op(dentry, &autofs4_mount_dentry_operations); __managed_dentry_set_managed(dentry); } @@ -573,6 +575,8 @@ static int autofs4_dir_symlink(struct inode *dir, } d_add(dentry, inode); + d_set_d_op(dentry, &autofs4_dentry_operations); + dentry->d_fsdata = ino; ino->dentry = dget(dentry); atomic_inc(&ino->count); @@ -791,7 +795,7 @@ static inline int autofs4_ask_umount(struct vfsmount *mnt, int __user *p) int is_autofs4_dentry(struct dentry *dentry) { return dentry && dentry->d_inode && - (dentry->d_op == &autofs4_root_dentry_operations || + (dentry->d_op == &autofs4_mount_dentry_operations || dentry->d_op == &autofs4_dentry_operations) && dentry->d_fsdata != NULL; } -- cgit v1.2.2 From 6651149371b842715906311b4631b8489cebf7e8 Mon Sep 17 00:00:00 2001 From: Ian Kent Date: Fri, 14 Jan 2011 18:46:24 +0000 Subject: autofs4: Clean up autofs4_free_ino() When this function is called the local reference count does't need to be updated since the dentry is going away and dput definitely must not be called here. Also the autofs info struct field inode isn't used so remove it. Signed-off-by: Ian Kent Signed-off-by: David Howells Signed-off-by: Al Viro --- fs/autofs4/inode.c | 13 ------------- fs/autofs4/root.c | 9 --------- 2 files changed, 22 deletions(-) (limited to 'fs') diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c index 427c35746340..3ecd2e2bcdbd 100644 --- a/fs/autofs4/inode.c +++ b/fs/autofs4/inode.c @@ -45,7 +45,6 @@ struct autofs_info *autofs4_init_ino(struct autofs_info *ino, if (!reinit) { ino->flags = 0; - ino->inode = NULL; ino->dentry = NULL; ino->size = 0; INIT_LIST_HEAD(&ino->active); @@ -76,19 +75,8 @@ struct autofs_info *autofs4_init_ino(struct autofs_info *ino, void autofs4_free_ino(struct autofs_info *ino) { - struct autofs_info *p_ino; - if (ino->dentry) { ino->dentry->d_fsdata = NULL; - if (ino->dentry->d_inode) { - struct dentry *parent = ino->dentry->d_parent; - if (atomic_dec_and_test(&ino->count)) { - p_ino = autofs4_dentry_ino(parent); - if (p_ino && parent != ino->dentry) - atomic_dec(&p_ino->count); - } - dput(ino->dentry); - } ino->dentry = NULL; } if (ino->free) @@ -390,7 +378,6 @@ struct inode *autofs4_get_inode(struct super_block *sb, if (inode == NULL) return NULL; - inf->inode = inode; inode->i_mode = inf->mode; if (sb->s_root) { inode->i_uid = sb->s_root->d_inode->i_uid; diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c index 62c1229a4d31..8315565ed7d4 100644 --- a/fs/autofs4/root.c +++ b/fs/autofs4/root.c @@ -151,11 +151,8 @@ void autofs4_dentry_release(struct dentry *de) DPRINTK("releasing %p", de); inf = autofs4_dentry_ino(de); - de->d_fsdata = NULL; - if (inf) { struct autofs_sb_info *sbi = autofs4_sbi(de->d_sb); - if (sbi) { spin_lock(&sbi->lookup_lock); if (!list_empty(&inf->active)) @@ -164,10 +161,6 @@ void autofs4_dentry_release(struct dentry *de) list_del(&inf->expiring); spin_unlock(&sbi->lookup_lock); } - - inf->dentry = NULL; - inf->inode = NULL; - autofs4_free_ino(inf); } } @@ -583,7 +576,6 @@ static int autofs4_dir_symlink(struct inode *dir, p_ino = autofs4_dentry_ino(dentry->d_parent); if (p_ino && dentry->d_parent != dentry) atomic_inc(&p_ino->count); - ino->inode = inode; ino->u.symlink = cp; dir->i_mtime = CURRENT_TIME; @@ -713,7 +705,6 @@ static int autofs4_dir_mkdir(struct inode *dir, struct dentry *dentry, int mode) p_ino = autofs4_dentry_ino(dentry->d_parent); if (p_ino && dentry->d_parent != dentry) atomic_inc(&p_ino->count); - ino->inode = inode; inc_nlink(dir); dir->i_mtime = CURRENT_TIME; -- cgit v1.2.2 From 9e3fea16ba386fa549a0b2de8a203e5d412997a0 Mon Sep 17 00:00:00 2001 From: Ian Kent Date: Fri, 14 Jan 2011 18:46:30 +0000 Subject: autofs4: Fix wait validation It is possible for the check in wait.c:validate_request() to return an incorrect result if the dentry that was mounted upon has changed during the callback. Signed-off-by: Ian Kent Signed-off-by: David Howells Signed-off-by: Al Viro --- fs/autofs4/waitq.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c index c5f8459c905e..56010056b2e6 100644 --- a/fs/autofs4/waitq.c +++ b/fs/autofs4/waitq.c @@ -309,6 +309,9 @@ static int validate_request(struct autofs_wait_queue **wait, * completed while we waited on the mutex ... */ if (notify == NFY_MOUNT) { + struct dentry *new = NULL; + int valid = 1; + /* * If the dentry was successfully mounted while we slept * on the wait queue mutex we can return success. If it @@ -316,8 +319,20 @@ static int validate_request(struct autofs_wait_queue **wait, * a multi-mount with no mount at it's base) we can * continue on and create a new request. */ + if (!IS_ROOT(dentry)) { + if (dentry->d_inode && d_unhashed(dentry)) { + struct dentry *parent = dentry->d_parent; + new = d_lookup(parent, &dentry->d_name); + if (new) + dentry = new; + } + } if (have_submounts(dentry)) - return 0; + valid = 0; + + if (new) + dput(new); + return valid; } return 1; -- cgit v1.2.2 From dd89f90d2deb9aa5bc8e1b15d726ff5c0bb2b623 Mon Sep 17 00:00:00 2001 From: Ian Kent Date: Fri, 14 Jan 2011 18:46:35 +0000 Subject: autofs4: Add v4 pseudo direct mount support Version 4 of autofs provides a pseudo direct mount implementation that relies on directories at the leaves of a directory tree under an indirect mount to trigger mounts. This patch adds support for that functionality. Signed-off-by: Ian Kent Signed-off-by: David Howells Signed-off-by: Al Viro --- fs/autofs4/root.c | 58 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) (limited to 'fs') diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c index 8315565ed7d4..9194e274f849 100644 --- a/fs/autofs4/root.c +++ b/fs/autofs4/root.c @@ -630,6 +630,58 @@ static int autofs4_dir_unlink(struct inode *dir, struct dentry *dentry) return 0; } +/* + * Version 4 of autofs provides a pseudo direct mount implementation + * that relies on directories at the leaves of a directory tree under + * an indirect mount to trigger mounts. To allow for this we need to + * set the DMANAGED_AUTOMOUNT and DMANAGED_TRANSIT flags on the leaves + * of the directory tree. There is no need to clear the automount flag + * following a mount or restore it after an expire because these mounts + * are always covered. However, it is neccessary to ensure that these + * flags are clear on non-empty directories to avoid unnecessary calls + * during path walks. + */ +static void autofs_set_leaf_automount_flags(struct dentry *dentry) +{ + struct dentry *parent; + + /* root and dentrys in the root are already handled */ + if (IS_ROOT(dentry->d_parent)) + return; + + managed_dentry_set_managed(dentry); + + parent = dentry->d_parent; + /* only consider parents below dentrys in the root */ + if (IS_ROOT(parent->d_parent)) + return; + managed_dentry_clear_managed(parent); + return; +} + +static void autofs_clear_leaf_automount_flags(struct dentry *dentry) +{ + struct list_head *d_child; + struct dentry *parent; + + /* flags for dentrys in the root are handled elsewhere */ + if (IS_ROOT(dentry->d_parent)) + return; + + managed_dentry_clear_managed(dentry); + + parent = dentry->d_parent; + /* only consider parents below dentrys in the root */ + if (IS_ROOT(parent->d_parent)) + return; + d_child = &dentry->d_u.d_child; + /* Set parent managed if it's becoming empty */ + if (d_child->next == &parent->d_subdirs && + d_child->prev == &parent->d_subdirs) + managed_dentry_set_managed(parent); + return; +} + static int autofs4_dir_rmdir(struct inode *dir, struct dentry *dentry) { struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb); @@ -657,6 +709,9 @@ static int autofs4_dir_rmdir(struct inode *dir, struct dentry *dentry) spin_unlock(&dentry->d_lock); spin_unlock(&autofs4_lock); + if (sbi->version < 5) + autofs_clear_leaf_automount_flags(dentry); + if (atomic_dec_and_test(&ino->count)) { p_ino = autofs4_dentry_ino(dentry->d_parent); if (p_ino && dentry->d_parent != dentry) @@ -699,6 +754,9 @@ static int autofs4_dir_mkdir(struct inode *dir, struct dentry *dentry, int mode) } d_add(dentry, inode); + if (sbi->version < 5) + autofs_set_leaf_automount_flags(dentry); + dentry->d_fsdata = ino; ino->dentry = dget(dentry); atomic_inc(&ino->count); -- cgit v1.2.2 From 87556ef19926e97464e0163a7840140527ae6615 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Jan 2011 18:46:46 +0000 Subject: Remove a further kludge from __do_follow_link() Remove a further kludge from __do_follow_link() as it's no longer required with the automount code. This reverts the non-helper-function parts of 051d381259eb57d6074d02a6ba6e90e744f1a29f, which breaks union mounts. Reported-by: vaurora@redhat.com Signed-off-by: David Howells Signed-off-by: Al Viro --- fs/namei.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index 61995fba4e21..373852012713 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -800,12 +800,8 @@ __do_follow_link(const struct path *link, struct nameidata *nd, void **p) touch_atime(link->mnt, dentry); nd_set_link(nd, NULL); - if (link->mnt != nd->path.mnt) { - path_to_nameidata(link, nd); - nd->inode = nd->path.dentry->d_inode; - dget(dentry); - } - mntget(link->mnt); + if (link->mnt == nd->path.mnt) + mntget(link->mnt); nd->last_type = LAST_BIND; *p = dentry->d_inode->i_op->follow_link(dentry, nd); -- cgit v1.2.2 From ab90911ff90cdab59b31c045c3f0ae480d14f29d Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Jan 2011 18:46:51 +0000 Subject: Allow d_manage() to be used in RCU-walk mode Allow d_manage() to be called from pathwalk when it is in RCU-walk mode as well as when it is in Ref-walk mode. This permits __follow_mount_rcu() to call d_manage() directly. d_manage() needs a parameter to indicate that it is in RCU-walk mode as it isn't allowed to sleep if in that mode (but should return -ECHILD instead). autofs4_d_manage() can then be set to retain RCU-walk mode if the daemon accesses it and otherwise request dropping back to ref-walk mode. Signed-off-by: David Howells Signed-off-by: Al Viro --- fs/autofs4/root.c | 8 ++++++-- fs/namei.c | 15 ++++++++------- 2 files changed, 14 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c index 9194e274f849..dbd95512808c 100644 --- a/fs/autofs4/root.c +++ b/fs/autofs4/root.c @@ -36,7 +36,7 @@ static long autofs4_root_compat_ioctl(struct file *,unsigned int,unsigned long); static int autofs4_dir_open(struct inode *inode, struct file *file); static struct dentry *autofs4_lookup(struct inode *,struct dentry *, struct nameidata *); static struct vfsmount *autofs4_d_automount(struct path *); -static int autofs4_d_manage(struct dentry *, bool); +static int autofs4_d_manage(struct dentry *, bool, bool); const struct file_operations autofs4_root_operations = { .open = dcache_dir_open, @@ -450,7 +450,7 @@ done: return NULL; } -int autofs4_d_manage(struct dentry *dentry, bool mounting_here) +int autofs4_d_manage(struct dentry *dentry, bool mounting_here, bool rcu_walk) { struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); @@ -464,6 +464,10 @@ int autofs4_d_manage(struct dentry *dentry, bool mounting_here) return 0; } + /* We need to sleep, so we need pathwalk to be in ref-mode */ + if (rcu_walk) + return -ECHILD; + /* Wait for pending expires */ do_expire_wait(dentry); diff --git a/fs/namei.c b/fs/namei.c index 373852012713..5c89695ae1e4 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -987,7 +987,8 @@ static int follow_managed(struct path *path, unsigned flags) if (managed & DCACHE_MANAGE_TRANSIT) { BUG_ON(!path->dentry->d_op); BUG_ON(!path->dentry->d_op->d_manage); - ret = path->dentry->d_op->d_manage(path->dentry, false); + ret = path->dentry->d_op->d_manage(path->dentry, + false, false); if (ret < 0) return ret == -EISDIR ? 0 : ret; } @@ -1048,13 +1049,12 @@ int follow_down_one(struct path *path) static bool __follow_mount_rcu(struct nameidata *nd, struct path *path, struct inode **inode, bool reverse_transit) { - unsigned abort_mask = - reverse_transit ? 0 : DCACHE_MANAGE_TRANSIT; - while (d_mountpoint(path->dentry)) { struct vfsmount *mounted; - if (path->dentry->d_flags & abort_mask) - return true; + if (unlikely(path->dentry->d_flags & DCACHE_MANAGE_TRANSIT) && + !reverse_transit && + path->dentry->d_op->d_manage(path->dentry, false, true) < 0) + return false; mounted = __lookup_mnt(path->mnt, path->dentry, 1); if (!mounted) break; @@ -1132,7 +1132,8 @@ int follow_down(struct path *path, bool mounting_here) if (managed & DCACHE_MANAGE_TRANSIT) { BUG_ON(!path->dentry->d_op); BUG_ON(!path->dentry->d_op->d_manage); - ret = path->dentry->d_op->d_manage(path->dentry, mounting_here); + ret = path->dentry->d_op->d_manage( + path->dentry, mounting_here, false); if (ret < 0) return ret == -EISDIR ? 0 : ret; } -- cgit v1.2.2 From ea5b778a8b98c85a87d66bf844904f9c3802b869 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Jan 2011 19:10:03 +0000 Subject: Unexport do_add_mount() and add in follow_automount(), not ->d_automount() Unexport do_add_mount() and make ->d_automount() return the vfsmount to be added rather than calling do_add_mount() itself. follow_automount() will then do the addition. This slightly complicates things as ->d_automount() normally wants to add the new vfsmount to an expiration list and start an expiration timer. The problem with that is that the vfsmount will be deleted if it has a refcount of 1 and the timer will not repeat if the expiration list is empty. To this end, we require the vfsmount to be returned from d_automount() with a refcount of (at least) 2. One of these refs will be dropped unconditionally. In addition, follow_automount() must get a 3rd ref around the call to do_add_mount() lest it eat a ref and return an error, leaving the mount we have open to being expired as we would otherwise have only 1 ref on it. d_automount() should also add the the vfsmount to the expiration list (by calling mnt_set_expiry()) and start the expiration timer before returning, if this mechanism is to be used. The vfsmount will be unlinked from the expiration list by follow_automount() if do_add_mount() fails. This patch also fixes the call to do_add_mount() for AFS to propagate the mount flags from the parent vfsmount. Signed-off-by: David Howells Signed-off-by: Al Viro --- fs/afs/mntpt.c | 25 ++++++------------------- fs/cifs/cifs_dfs_ref.c | 26 ++++++-------------------- fs/internal.h | 2 ++ fs/namei.c | 42 +++++++++++++++++++++++++++++++++++------- fs/namespace.c | 41 +++++++++++++++++++++++++++++++++-------- fs/nfs/namespace.c | 24 ++++-------------------- 6 files changed, 86 insertions(+), 74 deletions(-) (limited to 'fs') diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c index d23b2e344a78..aa59184151d0 100644 --- a/fs/afs/mntpt.c +++ b/fs/afs/mntpt.c @@ -241,7 +241,6 @@ error_no_devname: struct vfsmount *afs_d_automount(struct path *path) { struct vfsmount *newmnt; - int err; _enter("{%s,%s}", path->mnt->mnt_devname, path->dentry->d_name.name); @@ -249,24 +248,12 @@ struct vfsmount *afs_d_automount(struct path *path) if (IS_ERR(newmnt)) return newmnt; - mntget(newmnt); - err = do_add_mount(newmnt, path, MNT_SHRINKABLE, &afs_vfsmounts); - switch (err) { - case 0: - queue_delayed_work(afs_wq, &afs_mntpt_expiry_timer, - afs_mntpt_expiry_timeout * HZ); - _leave(" = %p {%s}", newmnt, newmnt->mnt_devname); - return newmnt; - case -EBUSY: - /* someone else made a mount here whilst we were busy */ - mntput(newmnt); - _leave(" = NULL [EBUSY]"); - return NULL; - default: - mntput(newmnt); - _leave(" = %d", err); - return ERR_PTR(err); - } + mntget(newmnt); /* prevent immediate expiration */ + mnt_set_expiry(newmnt, &afs_vfsmounts); + queue_delayed_work(afs_wq, &afs_mntpt_expiry_timer, + afs_mntpt_expiry_timeout * HZ); + _leave(" = %p {%s}", newmnt, newmnt->mnt_devname); + return newmnt; } /* diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c index 0fc163808de3..7ed36536e754 100644 --- a/fs/cifs/cifs_dfs_ref.c +++ b/fs/cifs/cifs_dfs_ref.c @@ -351,7 +351,6 @@ free_xid: struct vfsmount *cifs_dfs_d_automount(struct path *path) { struct vfsmount *newmnt; - int err; cFYI(1, "in %s", __func__); @@ -361,25 +360,12 @@ struct vfsmount *cifs_dfs_d_automount(struct path *path) return newmnt; } - mntget(newmnt); - err = do_add_mount(newmnt, path, path->mnt->mnt_flags | MNT_SHRINKABLE, - &cifs_dfs_automount_list); - switch (err) { - case 0: - schedule_delayed_work(&cifs_dfs_automount_task, - cifs_dfs_mountpoint_expiry_timeout); - cFYI(1, "leaving %s [ok]" , __func__); - return newmnt; - case -EBUSY: - /* someone else made a mount here whilst we were busy */ - mntput(newmnt); - cFYI(1, "leaving %s [EBUSY]" , __func__); - return NULL; - default: - mntput(newmnt); - cFYI(1, "leaving %s [error %d]" , __func__, err); - return ERR_PTR(err); - } + mntget(newmnt); /* prevent immediate expiration */ + mnt_set_expiry(newmnt, &cifs_dfs_automount_list); + schedule_delayed_work(&cifs_dfs_automount_task, + cifs_dfs_mountpoint_expiry_timeout); + cFYI(1, "leaving %s [ok]" , __func__); + return newmnt; } const struct inode_operations cifs_dfs_referral_inode_operations = { diff --git a/fs/internal.h b/fs/internal.h index 9687c2ee2735..4931060fd089 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -70,6 +70,8 @@ extern void mnt_set_mountpoint(struct vfsmount *, struct dentry *, extern void release_mounts(struct list_head *); extern void umount_tree(struct vfsmount *, int, struct list_head *); extern struct vfsmount *copy_tree(struct vfsmount *, struct dentry *, int); +extern int do_add_mount(struct vfsmount *, struct path *, int); +extern void mnt_clear_expiry(struct vfsmount *); extern void __init mnt_init(void); diff --git a/fs/namei.c b/fs/namei.c index 5c89695ae1e4..c2e37727e3ab 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -900,6 +900,7 @@ static int follow_automount(struct path *path, unsigned flags, bool *need_mntput) { struct vfsmount *mnt; + int err; if (!path->dentry->d_op || !path->dentry->d_op->d_automount) return -EREMOTE; @@ -942,22 +943,49 @@ static int follow_automount(struct path *path, unsigned flags, return -EREMOTE; return PTR_ERR(mnt); } + if (!mnt) /* mount collision */ return 0; + /* The new mount record should have at least 2 refs to prevent it being + * expired before we get a chance to add it + */ + BUG_ON(mnt_get_count(mnt) < 2); + if (mnt->mnt_sb == path->mnt->mnt_sb && mnt->mnt_root == path->dentry) { + mnt_clear_expiry(mnt); + mntput(mnt); mntput(mnt); return -ELOOP; } - dput(path->dentry); - if (*need_mntput) - mntput(path->mnt); - path->mnt = mnt; - path->dentry = dget(mnt->mnt_root); - *need_mntput = true; - return 0; + /* We need to add the mountpoint to the parent. The filesystem may + * have placed it on an expiry list, and so we need to make sure it + * won't be expired under us if do_add_mount() fails (do_add_mount() + * will eat a reference unconditionally). + */ + mntget(mnt); + err = do_add_mount(mnt, path, path->mnt->mnt_flags | MNT_SHRINKABLE); + switch (err) { + case -EBUSY: + /* Someone else made a mount here whilst we were busy */ + err = 0; + default: + mnt_clear_expiry(mnt); + mntput(mnt); + mntput(mnt); + return err; + case 0: + mntput(mnt); + dput(path->dentry); + if (*need_mntput) + mntput(path->mnt); + path->mnt = mnt; + path->dentry = dget(mnt->mnt_root); + *need_mntput = true; + return 0; + } } /* diff --git a/fs/namespace.c b/fs/namespace.c index d94ccd6ddafd..bfcb701f9490 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -1925,15 +1925,14 @@ static int do_new_mount(struct path *path, char *type, int flags, if (IS_ERR(mnt)) return PTR_ERR(mnt); - return do_add_mount(mnt, path, mnt_flags, NULL); + return do_add_mount(mnt, path, mnt_flags); } /* * add a mount into a namespace's mount tree - * - provide the option of adding the new mount to an expiration list + * - this unconditionally eats one of the caller's references to newmnt. */ -int do_add_mount(struct vfsmount *newmnt, struct path *path, - int mnt_flags, struct list_head *fslist) +int do_add_mount(struct vfsmount *newmnt, struct path *path, int mnt_flags) { int err; @@ -1963,9 +1962,6 @@ int do_add_mount(struct vfsmount *newmnt, struct path *path, if ((err = graft_tree(newmnt, path))) goto unlock; - if (fslist) /* add to the specified expiration list */ - list_add_tail(&newmnt->mnt_expire, fslist); - up_write(&namespace_sem); return 0; @@ -1975,7 +1971,36 @@ unlock: return err; } -EXPORT_SYMBOL_GPL(do_add_mount); +/** + * mnt_set_expiry - Put a mount on an expiration list + * @mnt: The mount to list. + * @expiry_list: The list to add the mount to. + */ +void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list) +{ + down_write(&namespace_sem); + br_write_lock(vfsmount_lock); + + list_add_tail(&mnt->mnt_expire, expiry_list); + + br_write_unlock(vfsmount_lock); + up_write(&namespace_sem); +} +EXPORT_SYMBOL(mnt_set_expiry); + +/* + * Remove a vfsmount from any expiration list it may be on + */ +void mnt_clear_expiry(struct vfsmount *mnt) +{ + if (!list_empty(&mnt->mnt_expire)) { + down_write(&namespace_sem); + br_write_lock(vfsmount_lock); + list_del_init(&mnt->mnt_expire); + br_write_unlock(vfsmount_lock); + up_write(&namespace_sem); + } +} /* * process a list of expirable mountpoints with the intent of discarding any diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c index f3fbb1bf3f18..f32b8603dca8 100644 --- a/fs/nfs/namespace.c +++ b/fs/nfs/namespace.c @@ -149,26 +149,10 @@ struct vfsmount *nfs_d_automount(struct path *path) if (IS_ERR(mnt)) goto out; - mntget(mnt); - err = do_add_mount(mnt, path, path->mnt->mnt_flags | MNT_SHRINKABLE, - &nfs_automount_list); - switch (err) { - case 0: - dprintk("%s: done, success\n", __func__); - schedule_delayed_work(&nfs_automount_task, nfs_mountpoint_expiry_timeout); - break; - case -EBUSY: - /* someone else made a mount here whilst we were busy */ - mntput(mnt); - dprintk("%s: done, collision\n", __func__); - mnt = NULL; - break; - default: - mntput(mnt); - dprintk("%s: done, error %d\n", __func__, err); - mnt = ERR_PTR(err); - break; - } + dprintk("%s: done, success\n", __func__); + mntget(mnt); /* prevent immediate expiration */ + mnt_set_expiry(mnt, &nfs_automount_list); + schedule_delayed_work(&nfs_automount_task, nfs_mountpoint_expiry_timeout); out: nfs_free_fattr(fattr); -- cgit v1.2.2 From b650c858c26bd9ba29ebc82d30f09355845a294a Mon Sep 17 00:00:00 2001 From: David Howells Date: Sat, 15 Jan 2011 10:51:57 +0000 Subject: autofs4: Merge the remaining dentry ops tables Merge the remaining autofs4 dentry ops tables. It doesn't matter if d_automount and d_manage are present on something that's not mountable or holdable as these ops are only used if the appropriate flags are set in dentry->d_flags. [AV] switch to ->s_d_op, since now _everything_ on autofs4 is using the same dentry_operations. Signed-off-by: David Howells Signed-off-by: Al Viro --- fs/autofs4/autofs_i.h | 1 - fs/autofs4/inode.c | 6 ++---- fs/autofs4/root.c | 17 ++--------------- 3 files changed, 4 insertions(+), 20 deletions(-) (limited to 'fs') diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h index c28085cb82a5..1f016bfb42d5 100644 --- a/fs/autofs4/autofs_i.h +++ b/fs/autofs4/autofs_i.h @@ -207,7 +207,6 @@ extern const struct inode_operations autofs4_dir_inode_operations; extern const struct file_operations autofs4_dir_operations; extern const struct file_operations autofs4_root_operations; extern const struct dentry_operations autofs4_dentry_operations; -extern const struct dentry_operations autofs4_mount_dentry_operations; /* VFS automount flags management functions */ diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c index 3ecd2e2bcdbd..9e1a9dad23e1 100644 --- a/fs/autofs4/inode.c +++ b/fs/autofs4/inode.c @@ -276,6 +276,7 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent) s->s_blocksize_bits = 10; s->s_magic = AUTOFS_SUPER_MAGIC; s->s_op = &autofs4_sops; + s->s_d_op = &autofs4_dentry_operations; s->s_time_gran = 1; /* @@ -293,7 +294,6 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent) goto fail_iput; pipe = NULL; - d_set_d_op(root, &autofs4_dentry_operations); root->d_fsdata = ino; /* Can this call block? */ @@ -304,10 +304,8 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent) goto fail_dput; } - if (autofs_type_trigger(sbi->type)) { - d_set_d_op(root, &autofs4_mount_dentry_operations); + if (autofs_type_trigger(sbi->type)) __managed_dentry_set_managed(root); - } root_inode->i_fop = &autofs4_root_operations; root_inode->i_op = &autofs4_dir_inode_operations; diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c index dbd95512808c..1dba035fc376 100644 --- a/fs/autofs4/root.c +++ b/fs/autofs4/root.c @@ -66,13 +66,7 @@ const struct inode_operations autofs4_dir_inode_operations = { .rmdir = autofs4_dir_rmdir, }; -/* For dentries that don't initiate mounting */ const struct dentry_operations autofs4_dentry_operations = { - .d_release = autofs4_dentry_release, -}; - -/* For dentries that do initiate mounting */ -const struct dentry_operations autofs4_mount_dentry_operations = { .d_automount = autofs4_d_automount, .d_manage = autofs4_d_manage, .d_release = autofs4_dentry_release, @@ -500,8 +494,6 @@ static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, s if (active) { return active; } else { - d_set_d_op(dentry, &autofs4_dentry_operations); - /* * A dentry that is not within the root can never trigger a * mount operation, unless the directory already exists, so we @@ -512,10 +504,8 @@ static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, s return ERR_PTR(-ENOENT); /* Mark entries in the root as mount triggers */ - if (autofs_type_indirect(sbi->type) && IS_ROOT(dentry->d_parent)) { - d_set_d_op(dentry, &autofs4_mount_dentry_operations); + if (autofs_type_indirect(sbi->type) && IS_ROOT(dentry->d_parent)) __managed_dentry_set_managed(dentry); - } ino = autofs4_init_ino(NULL, sbi, 0555); if (!ino) @@ -572,8 +562,6 @@ static int autofs4_dir_symlink(struct inode *dir, } d_add(dentry, inode); - d_set_d_op(dentry, &autofs4_dentry_operations); - dentry->d_fsdata = ino; ino->dentry = dget(dentry); atomic_inc(&ino->count); @@ -848,8 +836,7 @@ static inline int autofs4_ask_umount(struct vfsmount *mnt, int __user *p) int is_autofs4_dentry(struct dentry *dentry) { return dentry && dentry->d_inode && - (dentry->d_op == &autofs4_mount_dentry_operations || - dentry->d_op == &autofs4_dentry_operations) && + dentry->d_op == &autofs4_dentry_operations && dentry->d_fsdata != NULL; } -- cgit v1.2.2 From f580eb0931fbcb6dc3916f094f471671facd1daa Mon Sep 17 00:00:00 2001 From: Stefan Schmidt Date: Wed, 12 Jan 2011 09:30:42 +0000 Subject: fs/btrfs: Fix build of ctree CC [M] fs/btrfs/ctree.o In file included from fs/btrfs/ctree.c:21:0: fs/btrfs/ctree.h:1003:17: error: field <91>super_kobj<92> has incomplete type fs/btrfs/ctree.h:1074:17: error: field <91>root_kobj<92> has incomplete type make[2]: *** [fs/btrfs/ctree.o] Error 1 make[1]: *** [fs/btrfs] Error 2 make: *** [fs] Error 2 We need to include kobject.h here. Reported-by: Jeff Garzik Fix-suggested-by: Li Zefan Signed-off-by: Stefan Schmidt Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 4acd4c611efa..0cb322cc4fc0 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -27,6 +27,7 @@ #include #include #include +#include #include #include "extent_io.h" #include "extent_map.h" -- cgit v1.2.2 From 299a08b1c34f9397797946a0fa215c5fd145c5cf Mon Sep 17 00:00:00 2001 From: Miao Xie Date: Wed, 5 Jan 2011 10:07:15 +0000 Subject: btrfs: fix wrong data space statistics Josef has implemented mixed data/metadata chunks, we must add those chunks' space just like data chunks. Signed-off-by: Miao Xie Reviewed-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/super.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index a1a76b2a61f9..caa5bcc62f16 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -790,11 +790,10 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf) rcu_read_lock(); list_for_each_entry_rcu(found, head, list) { - if (found->flags & (BTRFS_BLOCK_GROUP_METADATA | - BTRFS_BLOCK_GROUP_SYSTEM)) - total_used_data += found->disk_total; - else + if (found->flags & BTRFS_BLOCK_GROUP_DATA) total_used_data += found->disk_used; + else + total_used_data += found->disk_total; total_used += found->disk_used; } rcu_read_unlock(); -- cgit v1.2.2 From d52a5b5f1fa40804f681cf9868d4a8f90661bdf3 Mon Sep 17 00:00:00 2001 From: Miao Xie Date: Wed, 5 Jan 2011 10:07:18 +0000 Subject: btrfs: try to reclaim some space when chunk allocation fails We cannot write data into files when when there is tiny space in the filesystem. Reproduce steps: # mkfs.btrfs /dev/sda1 # mount /dev/sda1 /mnt # dd if=/dev/zero of=/mnt/tmpfile0 bs=4K count=1 # dd if=/dev/zero of=/mnt/tmpfile1 bs=4K count=99999999999999 (fill the filesystem) # umount /mnt # mount /dev/sda1 /mnt # rm -f /mnt/tmpfile0 # dd if=/dev/zero of=/mnt/tmpfile0 bs=4K count=1 (failed with nospec) But if we do the last step again, we can write data successfully. The reason of the problem is that btrfs didn't try to commit the current transaction and reclaim some space when chunk allocation failed. This patch fixes it by committing the current transaction to reclaim some space when chunk allocation fails. Signed-off-by: Miao Xie Reviewed-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index b180efdc8b68..3c71d95111fe 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3162,8 +3162,12 @@ alloc: bytes + 2 * 1024 * 1024, alloc_target, 0); btrfs_end_transaction(trans, root); - if (ret < 0) - return ret; + if (ret < 0) { + if (ret != -ENOSPC) + return ret; + else + goto commit_trans; + } if (!data_sinfo) { btrfs_set_inode_space_info(root, inode); @@ -3174,6 +3178,7 @@ alloc: spin_unlock(&data_sinfo->lock); /* commit the current transaction and try again */ +commit_trans: if (!committed && !root->fs_info->open_ioctl_trans) { committed = 1; trans = btrfs_join_transaction(root, 1); -- cgit v1.2.2 From 1974a3b42d8cf7a9c74f1e0310c593023617037a Mon Sep 17 00:00:00 2001 From: Miao Xie Date: Wed, 5 Jan 2011 10:07:24 +0000 Subject: btrfs: fix wrong calculation of stripe size There are two tiny problem: - One is When we check the chunk size is greater than the max chunk size or not, we should take mirrors into account, but the original code didn't. - The other is btrfs shouldn't use the size of the residual free space as the length of of a dup chunk when doing chunk allocation. It is because the device space that a dup chunk needs is twice as large as the chunk size, if we use the size of the residual free space as the length of a dup chunk, we can not get enough free space. Fix it. Signed-off-by: Miao Xie Reviewed-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/volumes.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 177b73179590..c50a85e0d08f 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -2177,6 +2177,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, int num_stripes = 1; int min_stripes = 1; int sub_stripes = 0; + int ncopies = 1; int looped = 0; int ret; int index; @@ -2197,12 +2198,14 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, if (type & (BTRFS_BLOCK_GROUP_DUP)) { num_stripes = 2; min_stripes = 2; + ncopies = 2; } if (type & (BTRFS_BLOCK_GROUP_RAID1)) { if (fs_devices->rw_devices < 2) return -ENOSPC; num_stripes = 2; min_stripes = 2; + ncopies = 2; } if (type & (BTRFS_BLOCK_GROUP_RAID10)) { num_stripes = fs_devices->rw_devices; @@ -2210,6 +2213,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, return -ENOSPC; num_stripes &= ~(u32)1; sub_stripes = 2; + ncopies = 2; min_stripes = 4; } @@ -2239,8 +2243,8 @@ again: map->num_stripes = num_stripes; } - if (calc_size * num_stripes > max_chunk_size) { - calc_size = max_chunk_size; + if (calc_size * num_stripes > max_chunk_size * ncopies) { + calc_size = max_chunk_size * ncopies; do_div(calc_size, num_stripes); do_div(calc_size, stripe_len); calc_size *= stripe_len; @@ -2321,6 +2325,8 @@ again: if (!looped && max_avail > 0) { looped = 1; calc_size = max_avail; + if (type & BTRFS_BLOCK_GROUP_DUP) + do_div(calc_size, 2); goto again; } kfree(map); -- cgit v1.2.2 From 7bfc837df935d850fe996dfe92ef48975cd4170a Mon Sep 17 00:00:00 2001 From: Miao Xie Date: Wed, 5 Jan 2011 10:07:26 +0000 Subject: btrfs: restructure find_free_dev_extent() - make it return the start position and length of the max free space when it can not find a suitable free space. - make it more readability Signed-off-by: Miao Xie Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 4 +- fs/btrfs/volumes.c | 155 ++++++++++++++++++++++++++++--------------------- 2 files changed, 91 insertions(+), 68 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 3c71d95111fe..1e1c9a177626 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -8099,7 +8099,7 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr) mutex_lock(&root->fs_info->chunk_mutex); list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { u64 min_free = btrfs_block_group_used(&block_group->item); - u64 dev_offset, max_avail; + u64 dev_offset; /* * check to make sure we can actually find a chunk with enough @@ -8107,7 +8107,7 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr) */ if (device->total_bytes > device->bytes_used + min_free) { ret = find_free_dev_extent(NULL, device, min_free, - &dev_offset, &max_avail); + &dev_offset, NULL); if (!ret) break; ret = -1; diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index c50a85e0d08f..4838bd395e49 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -729,58 +729,82 @@ error: } /* + * find_free_dev_extent - find free space in the specified device + * @trans: transaction handler + * @device: the device which we search the free space in + * @num_bytes: the size of the free space that we need + * @start: store the start of the free space. + * @len: the size of the free space. that we find, or the size of the max + * free space if we don't find suitable free space + * * this uses a pretty simple search, the expectation is that it is * called very infrequently and that a given device has a small number * of extents + * + * @start is used to store the start of the free space if we find. But if we + * don't find suitable free space, it will be used to store the start position + * of the max free space. + * + * @len is used to store the size of the free space that we find. + * But if we don't find suitable free space, it is used to store the size of + * the max free space. */ int find_free_dev_extent(struct btrfs_trans_handle *trans, struct btrfs_device *device, u64 num_bytes, - u64 *start, u64 *max_avail) + u64 *start, u64 *len) { struct btrfs_key key; struct btrfs_root *root = device->dev_root; - struct btrfs_dev_extent *dev_extent = NULL; + struct btrfs_dev_extent *dev_extent; struct btrfs_path *path; - u64 hole_size = 0; - u64 last_byte = 0; - u64 search_start = 0; + u64 hole_size; + u64 max_hole_start; + u64 max_hole_size; + u64 extent_end; + u64 search_start; u64 search_end = device->total_bytes; int ret; - int slot = 0; - int start_found; + int slot; struct extent_buffer *l; - path = btrfs_alloc_path(); - if (!path) - return -ENOMEM; - path->reada = 2; - start_found = 0; - /* FIXME use last free of some kind */ /* we don't want to overwrite the superblock on the drive, * so we make sure to start at an offset of at least 1MB */ - search_start = max((u64)1024 * 1024, search_start); + search_start = 1024 * 1024; - if (root->fs_info->alloc_start + num_bytes <= device->total_bytes) + if (root->fs_info->alloc_start + num_bytes <= search_end) search_start = max(root->fs_info->alloc_start, search_start); + max_hole_start = search_start; + max_hole_size = 0; + + if (search_start >= search_end) { + ret = -ENOSPC; + goto error; + } + + path = btrfs_alloc_path(); + if (!path) { + ret = -ENOMEM; + goto error; + } + path->reada = 2; + key.objectid = device->devid; key.offset = search_start; key.type = BTRFS_DEV_EXTENT_KEY; + ret = btrfs_search_slot(trans, root, &key, path, 0, 0); if (ret < 0) - goto error; + goto out; if (ret > 0) { ret = btrfs_previous_item(root, path, key.objectid, key.type); if (ret < 0) - goto error; - if (ret > 0) - start_found = 1; + goto out; } - l = path->nodes[0]; - btrfs_item_key_to_cpu(l, &key, path->slots[0]); + while (1) { l = path->nodes[0]; slot = path->slots[0]; @@ -789,24 +813,9 @@ int find_free_dev_extent(struct btrfs_trans_handle *trans, if (ret == 0) continue; if (ret < 0) - goto error; -no_more_items: - if (!start_found) { - if (search_start >= search_end) { - ret = -ENOSPC; - goto error; - } - *start = search_start; - start_found = 1; - goto check_pending; - } - *start = last_byte > search_start ? - last_byte : search_start; - if (search_end <= *start) { - ret = -ENOSPC; - goto error; - } - goto check_pending; + goto out; + + break; } btrfs_item_key_to_cpu(l, &key, slot); @@ -814,48 +823,62 @@ no_more_items: goto next; if (key.objectid > device->devid) - goto no_more_items; + break; - if (key.offset >= search_start && key.offset > last_byte && - start_found) { - if (last_byte < search_start) - last_byte = search_start; - hole_size = key.offset - last_byte; + if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) + goto next; - if (hole_size > *max_avail) - *max_avail = hole_size; + if (key.offset > search_start) { + hole_size = key.offset - search_start; - if (key.offset > last_byte && - hole_size >= num_bytes) { - *start = last_byte; - goto check_pending; + if (hole_size > max_hole_size) { + max_hole_start = search_start; + max_hole_size = hole_size; + } + + /* + * If this free space is greater than which we need, + * it must be the max free space that we have found + * until now, so max_hole_start must point to the start + * of this free space and the length of this free space + * is stored in max_hole_size. Thus, we return + * max_hole_start and max_hole_size and go back to the + * caller. + */ + if (hole_size >= num_bytes) { + ret = 0; + goto out; } } - if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) - goto next; - start_found = 1; dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); - last_byte = key.offset + btrfs_dev_extent_length(l, dev_extent); + extent_end = key.offset + btrfs_dev_extent_length(l, + dev_extent); + if (extent_end > search_start) + search_start = extent_end; next: path->slots[0]++; cond_resched(); } -check_pending: - /* we have to make sure we didn't find an extent that has already - * been allocated by the map tree or the original allocation - */ - BUG_ON(*start < search_start); - if (*start + num_bytes > search_end) { - ret = -ENOSPC; - goto error; + hole_size = search_end- search_start; + if (hole_size > max_hole_size) { + max_hole_start = search_start; + max_hole_size = hole_size; } - /* check for pending inserts here */ - ret = 0; -error: + /* See above. */ + if (hole_size < num_bytes) + ret = -ENOSPC; + else + ret = 0; + +out: btrfs_free_path(path); +error: + *start = max_hole_start; + if (len && max_hole_size > *len) + *len = max_hole_size; return ret; } -- cgit v1.2.2 From b2117a39fa96cf4814e7cab8c11494149ba6f29d Mon Sep 17 00:00:00 2001 From: Miao Xie Date: Wed, 5 Jan 2011 10:07:28 +0000 Subject: btrfs: make the chunk allocator utilize the devices better With this patch, we change the handling method when we can not get enough free extents with default size. Implementation: 1. Look up the suitable free extent on each device and keep the search result. If not find a suitable free extent, keep the max free extent 2. If we get enough suitable free extents with default size, chunk allocation succeeds. 3. If we can not get enough free extents, but the number of the extent with default size is >= min_stripes, we just change the mapping information (reduce the number of stripes in the extent map), and chunk allocation succeeds. 4. If the number of the extent with default size is < min_stripes, sort the devices by its max free extent's size descending 5. Use the size of the max free extent on the (num_stripes - 1)th device as the stripe size to allocate the device space By this way, the chunk allocator can allocate chunks as large as possible when the devices' space is not enough and make full use of the devices. Signed-off-by: Miao Xie Signed-off-by: Chris Mason --- fs/btrfs/volumes.c | 379 ++++++++++++++++++++++++++++++++++++++--------------- fs/btrfs/volumes.h | 24 ++++ 2 files changed, 300 insertions(+), 103 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 4838bd395e49..c22784b989b7 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -877,7 +877,7 @@ out: btrfs_free_path(path); error: *start = max_hole_start; - if (len && max_hole_size > *len) + if (len) *len = max_hole_size; return ret; } @@ -2176,70 +2176,67 @@ static noinline u64 chunk_bytes_by_type(u64 type, u64 calc_size, return calc_size * num_stripes; } -static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, - struct btrfs_root *extent_root, - struct map_lookup **map_ret, - u64 *num_bytes, u64 *stripe_size, - u64 start, u64 type) +/* Used to sort the devices by max_avail(descending sort) */ +int btrfs_cmp_device_free_bytes(const void *dev_info1, const void *dev_info2) { - struct btrfs_fs_info *info = extent_root->fs_info; - struct btrfs_device *device = NULL; - struct btrfs_fs_devices *fs_devices = info->fs_devices; - struct list_head *cur; - struct map_lookup *map = NULL; - struct extent_map_tree *em_tree; - struct extent_map *em; - struct list_head private_devs; - int min_stripe_size = 1 * 1024 * 1024; - u64 calc_size = 1024 * 1024 * 1024; - u64 max_chunk_size = calc_size; - u64 min_free; - u64 avail; - u64 max_avail = 0; - u64 dev_offset; - int num_stripes = 1; - int min_stripes = 1; - int sub_stripes = 0; - int ncopies = 1; - int looped = 0; - int ret; - int index; - int stripe_len = 64 * 1024; + if (((struct btrfs_device_info *)dev_info1)->max_avail > + ((struct btrfs_device_info *)dev_info2)->max_avail) + return -1; + else if (((struct btrfs_device_info *)dev_info1)->max_avail < + ((struct btrfs_device_info *)dev_info2)->max_avail) + return 1; + else + return 0; +} - if ((type & BTRFS_BLOCK_GROUP_RAID1) && - (type & BTRFS_BLOCK_GROUP_DUP)) { - WARN_ON(1); - type &= ~BTRFS_BLOCK_GROUP_DUP; - } - if (list_empty(&fs_devices->alloc_list)) - return -ENOSPC; +static int __btrfs_calc_nstripes(struct btrfs_fs_devices *fs_devices, u64 type, + int *num_stripes, int *min_stripes, + int *sub_stripes) +{ + *num_stripes = 1; + *min_stripes = 1; + *sub_stripes = 0; if (type & (BTRFS_BLOCK_GROUP_RAID0)) { - num_stripes = fs_devices->rw_devices; - min_stripes = 2; + *num_stripes = fs_devices->rw_devices; + *min_stripes = 2; } if (type & (BTRFS_BLOCK_GROUP_DUP)) { - num_stripes = 2; - min_stripes = 2; - ncopies = 2; + *num_stripes = 2; + *min_stripes = 2; } if (type & (BTRFS_BLOCK_GROUP_RAID1)) { if (fs_devices->rw_devices < 2) return -ENOSPC; - num_stripes = 2; - min_stripes = 2; - ncopies = 2; + *num_stripes = 2; + *min_stripes = 2; } if (type & (BTRFS_BLOCK_GROUP_RAID10)) { - num_stripes = fs_devices->rw_devices; - if (num_stripes < 4) + *num_stripes = fs_devices->rw_devices; + if (*num_stripes < 4) return -ENOSPC; - num_stripes &= ~(u32)1; - sub_stripes = 2; - ncopies = 2; - min_stripes = 4; + *num_stripes &= ~(u32)1; + *sub_stripes = 2; + *min_stripes = 4; } + return 0; +} + +static u64 __btrfs_calc_stripe_size(struct btrfs_fs_devices *fs_devices, + u64 proposed_size, u64 type, + int num_stripes, int small_stripe) +{ + int min_stripe_size = 1 * 1024 * 1024; + u64 calc_size = proposed_size; + u64 max_chunk_size = calc_size; + int ncopies = 1; + + if (type & (BTRFS_BLOCK_GROUP_RAID1 | + BTRFS_BLOCK_GROUP_DUP | + BTRFS_BLOCK_GROUP_RAID10)) + ncopies = 2; + if (type & BTRFS_BLOCK_GROUP_DATA) { max_chunk_size = 10 * calc_size; min_stripe_size = 64 * 1024 * 1024; @@ -2256,51 +2253,209 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1), max_chunk_size); -again: - max_avail = 0; - if (!map || map->num_stripes != num_stripes) { - kfree(map); - map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); - if (!map) - return -ENOMEM; - map->num_stripes = num_stripes; - } - if (calc_size * num_stripes > max_chunk_size * ncopies) { calc_size = max_chunk_size * ncopies; do_div(calc_size, num_stripes); - do_div(calc_size, stripe_len); - calc_size *= stripe_len; + do_div(calc_size, BTRFS_STRIPE_LEN); + calc_size *= BTRFS_STRIPE_LEN; } /* we don't want tiny stripes */ - if (!looped) + if (!small_stripe) calc_size = max_t(u64, min_stripe_size, calc_size); /* - * we're about to do_div by the stripe_len so lets make sure + * we're about to do_div by the BTRFS_STRIPE_LEN so lets make sure * we end up with something bigger than a stripe */ - calc_size = max_t(u64, calc_size, stripe_len * 4); + calc_size = max_t(u64, calc_size, BTRFS_STRIPE_LEN); + + do_div(calc_size, BTRFS_STRIPE_LEN); + calc_size *= BTRFS_STRIPE_LEN; + + return calc_size; +} + +static struct map_lookup *__shrink_map_lookup_stripes(struct map_lookup *map, + int num_stripes) +{ + struct map_lookup *new; + size_t len = map_lookup_size(num_stripes); + + BUG_ON(map->num_stripes < num_stripes); + + if (map->num_stripes == num_stripes) + return map; + + new = kmalloc(len, GFP_NOFS); + if (!new) { + /* just change map->num_stripes */ + map->num_stripes = num_stripes; + return map; + } + + memcpy(new, map, len); + new->num_stripes = num_stripes; + kfree(map); + return new; +} + +/* + * helper to allocate device space from btrfs_device_info, in which we stored + * max free space information of every device. It is used when we can not + * allocate chunks by default size. + * + * By this helper, we can allocate a new chunk as larger as possible. + */ +static int __btrfs_alloc_tiny_space(struct btrfs_trans_handle *trans, + struct btrfs_fs_devices *fs_devices, + struct btrfs_device_info *devices, + int nr_device, u64 type, + struct map_lookup **map_lookup, + int min_stripes, u64 *stripe_size) +{ + int i, index, sort_again = 0; + int min_devices = min_stripes; + u64 max_avail, min_free; + struct map_lookup *map = *map_lookup; + int ret; + + if (nr_device < min_stripes) + return -ENOSPC; + + btrfs_descending_sort_devices(devices, nr_device); + + max_avail = devices[0].max_avail; + if (!max_avail) + return -ENOSPC; + + for (i = 0; i < nr_device; i++) { + /* + * if dev_offset = 0, it means the free space of this device + * is less than what we need, and we didn't search max avail + * extent on this device, so do it now. + */ + if (!devices[i].dev_offset) { + ret = find_free_dev_extent(trans, devices[i].dev, + max_avail, + &devices[i].dev_offset, + &devices[i].max_avail); + if (ret != 0 && ret != -ENOSPC) + return ret; + sort_again = 1; + } + } + + /* we update the max avail free extent of each devices, sort again */ + if (sort_again) + btrfs_descending_sort_devices(devices, nr_device); + + if (type & BTRFS_BLOCK_GROUP_DUP) + min_devices = 1; + + if (!devices[min_devices - 1].max_avail) + return -ENOSPC; + + max_avail = devices[min_devices - 1].max_avail; + if (type & BTRFS_BLOCK_GROUP_DUP) + do_div(max_avail, 2); - do_div(calc_size, stripe_len); - calc_size *= stripe_len; + max_avail = __btrfs_calc_stripe_size(fs_devices, max_avail, type, + min_stripes, 1); + if (type & BTRFS_BLOCK_GROUP_DUP) + min_free = max_avail * 2; + else + min_free = max_avail; + + if (min_free > devices[min_devices - 1].max_avail) + return -ENOSPC; + + map = __shrink_map_lookup_stripes(map, min_stripes); + *stripe_size = max_avail; + + index = 0; + for (i = 0; i < min_stripes; i++) { + map->stripes[i].dev = devices[index].dev; + map->stripes[i].physical = devices[index].dev_offset; + if (type & BTRFS_BLOCK_GROUP_DUP) { + i++; + map->stripes[i].dev = devices[index].dev; + map->stripes[i].physical = devices[index].dev_offset + + max_avail; + } + index++; + } + *map_lookup = map; + + return 0; +} + +static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, + struct btrfs_root *extent_root, + struct map_lookup **map_ret, + u64 *num_bytes, u64 *stripe_size, + u64 start, u64 type) +{ + struct btrfs_fs_info *info = extent_root->fs_info; + struct btrfs_device *device = NULL; + struct btrfs_fs_devices *fs_devices = info->fs_devices; + struct list_head *cur; + struct map_lookup *map; + struct extent_map_tree *em_tree; + struct extent_map *em; + struct btrfs_device_info *devices_info; + struct list_head private_devs; + u64 calc_size = 1024 * 1024 * 1024; + u64 min_free; + u64 avail; + u64 dev_offset; + int num_stripes; + int min_stripes; + int sub_stripes; + int min_devices; /* the min number of devices we need */ + int i; + int ret; + int index; + + if ((type & BTRFS_BLOCK_GROUP_RAID1) && + (type & BTRFS_BLOCK_GROUP_DUP)) { + WARN_ON(1); + type &= ~BTRFS_BLOCK_GROUP_DUP; + } + if (list_empty(&fs_devices->alloc_list)) + return -ENOSPC; + + ret = __btrfs_calc_nstripes(fs_devices, type, &num_stripes, + &min_stripes, &sub_stripes); + if (ret) + return ret; + + devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices, + GFP_NOFS); + if (!devices_info) + return -ENOMEM; + + map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); + if (!map) { + ret = -ENOMEM; + goto error; + } + map->num_stripes = num_stripes; cur = fs_devices->alloc_list.next; index = 0; + i = 0; - if (type & BTRFS_BLOCK_GROUP_DUP) + calc_size = __btrfs_calc_stripe_size(fs_devices, calc_size, type, + num_stripes, 0); + + if (type & BTRFS_BLOCK_GROUP_DUP) { min_free = calc_size * 2; - else + min_devices = 1; + } else { min_free = calc_size; - - /* - * we add 1MB because we never use the first 1MB of the device, unless - * we've looped, then we are likely allocating the maximum amount of - * space left already - */ - if (!looped) - min_free += 1024 * 1024; + min_devices = min_stripes; + } INIT_LIST_HEAD(&private_devs); while (index < num_stripes) { @@ -2313,27 +2468,39 @@ again: cur = cur->next; if (device->in_fs_metadata && avail >= min_free) { - ret = find_free_dev_extent(trans, device, - min_free, &dev_offset, - &max_avail); + ret = find_free_dev_extent(trans, device, min_free, + &devices_info[i].dev_offset, + &devices_info[i].max_avail); if (ret == 0) { list_move_tail(&device->dev_alloc_list, &private_devs); map->stripes[index].dev = device; - map->stripes[index].physical = dev_offset; + map->stripes[index].physical = + devices_info[i].dev_offset; index++; if (type & BTRFS_BLOCK_GROUP_DUP) { map->stripes[index].dev = device; map->stripes[index].physical = - dev_offset + calc_size; + devices_info[i].dev_offset + + calc_size; index++; } - } - } else if (device->in_fs_metadata && avail > max_avail) - max_avail = avail; + } else if (ret != -ENOSPC) + goto error; + + devices_info[i].dev = device; + i++; + } else if (device->in_fs_metadata && + avail >= BTRFS_STRIPE_LEN) { + devices_info[i].dev = device; + devices_info[i].max_avail = avail; + i++; + } + if (cur == &fs_devices->alloc_list) break; } + list_splice(&private_devs, &fs_devices->alloc_list); if (index < num_stripes) { if (index >= min_stripes) { @@ -2342,36 +2509,36 @@ again: num_stripes /= sub_stripes; num_stripes *= sub_stripes; } - looped = 1; - goto again; - } - if (!looped && max_avail > 0) { - looped = 1; - calc_size = max_avail; - if (type & BTRFS_BLOCK_GROUP_DUP) - do_div(calc_size, 2); - goto again; + + map = __shrink_map_lookup_stripes(map, num_stripes); + } else if (i >= min_devices) { + ret = __btrfs_alloc_tiny_space(trans, fs_devices, + devices_info, i, type, + &map, min_stripes, + &calc_size); + if (ret) + goto error; + } else { + ret = -ENOSPC; + goto error; } - kfree(map); - return -ENOSPC; } map->sector_size = extent_root->sectorsize; - map->stripe_len = stripe_len; - map->io_align = stripe_len; - map->io_width = stripe_len; + map->stripe_len = BTRFS_STRIPE_LEN; + map->io_align = BTRFS_STRIPE_LEN; + map->io_width = BTRFS_STRIPE_LEN; map->type = type; - map->num_stripes = num_stripes; map->sub_stripes = sub_stripes; *map_ret = map; *stripe_size = calc_size; *num_bytes = chunk_bytes_by_type(type, calc_size, - num_stripes, sub_stripes); + map->num_stripes, sub_stripes); em = alloc_extent_map(GFP_NOFS); if (!em) { - kfree(map); - return -ENOMEM; + ret = -ENOMEM; + goto error; } em->bdev = (struct block_device *)map; em->start = start; @@ -2404,7 +2571,13 @@ again: index++; } + kfree(devices_info); return 0; + +error: + kfree(map); + kfree(devices_info); + return ret; } static int __finish_chunk_alloc(struct btrfs_trans_handle *trans, diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index a668c0116982..a5cfedf393f9 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -20,8 +20,11 @@ #define __BTRFS_VOLUMES_ #include +#include #include "async-thread.h" +#define BTRFS_STRIPE_LEN (64 * 1024) + struct buffer_head; struct btrfs_pending_bios { struct bio *head; @@ -137,6 +140,27 @@ struct btrfs_multi_bio { struct btrfs_bio_stripe stripes[]; }; +struct btrfs_device_info { + struct btrfs_device *dev; + u64 dev_offset; + u64 max_avail; +}; + +/* Used to sort the devices by max_avail(descending sort) */ +int btrfs_cmp_device_free_bytes(const void *dev_info1, const void *dev_info2); + +/* + * sort the devices by max_avail, in which max free extent size of each device + * is stored.(Descending Sort) + */ +static inline void btrfs_descending_sort_devices( + struct btrfs_device_info *devices, + size_t nr_devices) +{ + sort(devices, nr_devices, sizeof(struct btrfs_device_info), + btrfs_cmp_device_free_bytes, NULL); +} + #define btrfs_multi_bio_size(n) (sizeof(struct btrfs_multi_bio) + \ (sizeof(struct btrfs_bio_stripe) * (n))) -- cgit v1.2.2 From 6d07bcec969af335d4e35b3921131b7929bd634e Mon Sep 17 00:00:00 2001 From: Miao Xie Date: Wed, 5 Jan 2011 10:07:31 +0000 Subject: btrfs: fix wrong free space information of btrfs When we store data by raid profile in btrfs with two or more different size disks, df command shows there is some free space in the filesystem, but the user can not write any data in fact, df command shows the wrong free space information of btrfs. # mkfs.btrfs -d raid1 /dev/sda9 /dev/sda10 # btrfs-show Label: none uuid: a95cd49e-6e33-45b8-8741-a36153ce4b64 Total devices 2 FS bytes used 28.00KB devid 1 size 5.01GB used 2.03GB path /dev/sda9 devid 2 size 10.00GB used 2.01GB path /dev/sda10 # btrfs device scan /dev/sda9 /dev/sda10 # mount /dev/sda9 /mnt # dd if=/dev/zero of=tmpfile0 bs=4K count=9999999999 (fill the filesystem) # sync # df -TH Filesystem Type Size Used Avail Use% Mounted on /dev/sda9 btrfs 17G 8.6G 5.4G 62% /mnt # btrfs-show Label: none uuid: a95cd49e-6e33-45b8-8741-a36153ce4b64 Total devices 2 FS bytes used 3.99GB devid 1 size 5.01GB used 5.01GB path /dev/sda9 devid 2 size 10.00GB used 4.99GB path /dev/sda10 It is because btrfs cannot allocate chunks when one of the pairing disks has no space, the free space on the other disks can not be used for ever, and should be subtracted from the total space, but btrfs doesn't subtract this space from the total. It is strange to the user. This patch fixes it by calcing the free space that can be used to allocate chunks. Implementation: 1. get all the devices free space, and align them by stripe length. 2. sort the devices by the free space. 3. check the free space of the devices, 3.1. if it is not zero, and then check the number of the devices that has more free space than this device, if the number of the devices is beyond the min stripe number, the free space can be used, and add into total free space. if the number of the devices is below the min stripe number, we can not use the free space, the check ends. 3.2. if the free space is zero, check the next devices, goto 3.1 This implementation is just likely fake chunk allocation. After appling this patch, df can show correct space information: # df -TH Filesystem Type Size Used Avail Use% Mounted on /dev/sda9 btrfs 17G 8.6G 0 100% /mnt Signed-off-by: Miao Xie Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 2 + fs/btrfs/extent-tree.c | 58 +++++++++++++++++++- fs/btrfs/super.c | 146 +++++++++++++++++++++++++++++++++++++++++++++++-- fs/btrfs/volumes.c | 84 ++++++++++++++++++++++++++++ fs/btrfs/volumes.h | 3 + 5 files changed, 286 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 0cb322cc4fc0..0995f4f68d7a 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2158,6 +2158,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, int btrfs_remove_block_group(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 group_start); u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags); +u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data); void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *ionde); void btrfs_clear_space_info_full(struct btrfs_fs_info *info); int btrfs_check_data_free_space(struct inode *inode, u64 bytes); @@ -2201,6 +2202,7 @@ int btrfs_set_block_group_ro(struct btrfs_root *root, int btrfs_set_block_group_rw(struct btrfs_root *root, struct btrfs_block_group_cache *cache); void btrfs_put_block_group_cache(struct btrfs_fs_info *info); +u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo); /* ctree.c */ int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, int level, int *slot); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 1e1c9a177626..04bfc3a2bd9f 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3090,7 +3090,7 @@ static u64 get_alloc_profile(struct btrfs_root *root, u64 flags) return btrfs_reduce_alloc_profile(root, flags); } -static u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data) +u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data) { u64 flags; @@ -8019,6 +8019,62 @@ out: return ret; } +/* + * helper to account the unused space of all the readonly block group in the + * list. takes mirrors into account. + */ +static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list) +{ + struct btrfs_block_group_cache *block_group; + u64 free_bytes = 0; + int factor; + + list_for_each_entry(block_group, groups_list, list) { + spin_lock(&block_group->lock); + + if (!block_group->ro) { + spin_unlock(&block_group->lock); + continue; + } + + if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 | + BTRFS_BLOCK_GROUP_RAID10 | + BTRFS_BLOCK_GROUP_DUP)) + factor = 2; + else + factor = 1; + + free_bytes += (block_group->key.offset - + btrfs_block_group_used(&block_group->item)) * + factor; + + spin_unlock(&block_group->lock); + } + + return free_bytes; +} + +/* + * helper to account the unused space of all the readonly block group in the + * space_info. takes mirrors into account. + */ +u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo) +{ + int i; + u64 free_bytes = 0; + + spin_lock(&sinfo->lock); + + for(i = 0; i < BTRFS_NR_RAID_TYPES; i++) + if (!list_empty(&sinfo->block_groups[i])) + free_bytes += __btrfs_get_ro_block_group_free_space( + &sinfo->block_groups[i]); + + spin_unlock(&sinfo->lock); + + return free_bytes; +} + int btrfs_set_block_group_rw(struct btrfs_root *root, struct btrfs_block_group_cache *cache) { diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index caa5bcc62f16..2963376e77f4 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -777,6 +777,127 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data) return 0; } +/* + * The helper to calc the free space on the devices that can be used to store + * file data. + */ +static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes) +{ + struct btrfs_fs_info *fs_info = root->fs_info; + struct btrfs_device_info *devices_info; + struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; + struct btrfs_device *device; + u64 skip_space; + u64 type; + u64 avail_space; + u64 used_space; + u64 min_stripe_size; + int min_stripes = 1; + int i = 0, nr_devices; + int ret; + + nr_devices = fs_info->fs_devices->rw_devices; + BUG_ON(!nr_devices); + + devices_info = kmalloc(sizeof(*devices_info) * nr_devices, + GFP_NOFS); + if (!devices_info) + return -ENOMEM; + + /* calc min stripe number for data space alloction */ + type = btrfs_get_alloc_profile(root, 1); + if (type & BTRFS_BLOCK_GROUP_RAID0) + min_stripes = 2; + else if (type & BTRFS_BLOCK_GROUP_RAID1) + min_stripes = 2; + else if (type & BTRFS_BLOCK_GROUP_RAID10) + min_stripes = 4; + + if (type & BTRFS_BLOCK_GROUP_DUP) + min_stripe_size = 2 * BTRFS_STRIPE_LEN; + else + min_stripe_size = BTRFS_STRIPE_LEN; + + list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { + if (!device->in_fs_metadata) + continue; + + avail_space = device->total_bytes - device->bytes_used; + + /* align with stripe_len */ + do_div(avail_space, BTRFS_STRIPE_LEN); + avail_space *= BTRFS_STRIPE_LEN; + + /* + * In order to avoid overwritting the superblock on the drive, + * btrfs starts at an offset of at least 1MB when doing chunk + * allocation. + */ + skip_space = 1024 * 1024; + + /* user can set the offset in fs_info->alloc_start. */ + if (fs_info->alloc_start + BTRFS_STRIPE_LEN <= + device->total_bytes) + skip_space = max(fs_info->alloc_start, skip_space); + + /* + * btrfs can not use the free space in [0, skip_space - 1], + * we must subtract it from the total. In order to implement + * it, we account the used space in this range first. + */ + ret = btrfs_account_dev_extents_size(device, 0, skip_space - 1, + &used_space); + if (ret) { + kfree(devices_info); + return ret; + } + + /* calc the free space in [0, skip_space - 1] */ + skip_space -= used_space; + + /* + * we can use the free space in [0, skip_space - 1], subtract + * it from the total. + */ + if (avail_space && avail_space >= skip_space) + avail_space -= skip_space; + else + avail_space = 0; + + if (avail_space < min_stripe_size) + continue; + + devices_info[i].dev = device; + devices_info[i].max_avail = avail_space; + + i++; + } + + nr_devices = i; + + btrfs_descending_sort_devices(devices_info, nr_devices); + + i = nr_devices - 1; + avail_space = 0; + while (nr_devices >= min_stripes) { + if (devices_info[i].max_avail >= min_stripe_size) { + int j; + u64 alloc_size; + + avail_space += devices_info[i].max_avail * min_stripes; + alloc_size = devices_info[i].max_avail; + for (j = i + 1 - min_stripes; j <= i; j++) + devices_info[j].max_avail -= alloc_size; + } + i--; + nr_devices--; + } + + kfree(devices_info); + *free_bytes = avail_space; + return 0; +} + static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct btrfs_root *root = btrfs_sb(dentry->d_sb); @@ -784,16 +905,21 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf) struct list_head *head = &root->fs_info->space_info; struct btrfs_space_info *found; u64 total_used = 0; - u64 total_used_data = 0; + u64 total_free_data = 0; int bits = dentry->d_sb->s_blocksize_bits; __be32 *fsid = (__be32 *)root->fs_info->fsid; + int ret; + /* holding chunk_muext to avoid allocating new chunks */ + mutex_lock(&root->fs_info->chunk_mutex); rcu_read_lock(); list_for_each_entry_rcu(found, head, list) { - if (found->flags & BTRFS_BLOCK_GROUP_DATA) - total_used_data += found->disk_used; - else - total_used_data += found->disk_total; + if (found->flags & BTRFS_BLOCK_GROUP_DATA) { + total_free_data += found->disk_total - found->disk_used; + total_free_data -= + btrfs_account_ro_block_groups_free_space(found); + } + total_used += found->disk_used; } rcu_read_unlock(); @@ -801,9 +927,17 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf) buf->f_namelen = BTRFS_NAME_LEN; buf->f_blocks = btrfs_super_total_bytes(disk_super) >> bits; buf->f_bfree = buf->f_blocks - (total_used >> bits); - buf->f_bavail = buf->f_blocks - (total_used_data >> bits); buf->f_bsize = dentry->d_sb->s_blocksize; buf->f_type = BTRFS_SUPER_MAGIC; + buf->f_bavail = total_free_data; + ret = btrfs_calc_avail_data_space(root, &total_free_data); + if (ret) { + mutex_unlock(&root->fs_info->chunk_mutex); + return ret; + } + buf->f_bavail += total_free_data; + buf->f_bavail = buf->f_bavail >> bits; + mutex_unlock(&root->fs_info->chunk_mutex); /* We treat it as constant endianness (it doesn't matter _which_) because we want the fsid to come out the same whether mounted diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index c22784b989b7..0c7f478cf645 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -728,6 +728,90 @@ error: return ret; } +/* helper to account the used device space in the range */ +int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start, + u64 end, u64 *length) +{ + struct btrfs_key key; + struct btrfs_root *root = device->dev_root; + struct btrfs_dev_extent *dev_extent; + struct btrfs_path *path; + u64 extent_end; + int ret; + int slot; + struct extent_buffer *l; + + *length = 0; + + if (start >= device->total_bytes) + return 0; + + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + path->reada = 2; + + key.objectid = device->devid; + key.offset = start; + key.type = BTRFS_DEV_EXTENT_KEY; + + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); + if (ret < 0) + goto out; + if (ret > 0) { + ret = btrfs_previous_item(root, path, key.objectid, key.type); + if (ret < 0) + goto out; + } + + while (1) { + l = path->nodes[0]; + slot = path->slots[0]; + if (slot >= btrfs_header_nritems(l)) { + ret = btrfs_next_leaf(root, path); + if (ret == 0) + continue; + if (ret < 0) + goto out; + + break; + } + btrfs_item_key_to_cpu(l, &key, slot); + + if (key.objectid < device->devid) + goto next; + + if (key.objectid > device->devid) + break; + + if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) + goto next; + + dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); + extent_end = key.offset + btrfs_dev_extent_length(l, + dev_extent); + if (key.offset <= start && extent_end > end) { + *length = end - start + 1; + break; + } else if (key.offset <= start && extent_end > start) + *length += extent_end - start; + else if (key.offset > start && extent_end <= end) + *length += extent_end - key.offset; + else if (key.offset > start && key.offset <= end) { + *length += end - key.offset + 1; + break; + } else if (key.offset > end) + break; + +next: + path->slots[0]++; + } + ret = 0; +out: + btrfs_free_path(path); + return ret; +} + /* * find_free_dev_extent - find free space in the specified device * @trans: transaction handler diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index a5cfedf393f9..7af6144a7954 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -161,6 +161,9 @@ static inline void btrfs_descending_sort_devices( btrfs_cmp_device_free_bytes, NULL); } +int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start, + u64 end, u64 *length); + #define btrfs_multi_bio_size(n) (sizeof(struct btrfs_multi_bio) + \ (sizeof(struct btrfs_bio_stripe) * (n))) -- cgit v1.2.2 From 42838bb265b9cff3de9587fcacc398b5112dc2d9 Mon Sep 17 00:00:00 2001 From: Jesper Juhl Date: Thu, 6 Jan 2011 21:45:21 +0000 Subject: btrfs: Mem leak in btrfs_get_acl() It seems to me that we leak the memory allocated to 'value' in btrfs_get_acl() if the call to posix_acl_from_xattr() fails. Here's a patch that attempts to correct that problem. Signed-off-by: Jesper Juhl Signed-off-by: Chris Mason --- fs/btrfs/acl.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c index 2222d161c7b6..6d1410e392d3 100644 --- a/fs/btrfs/acl.c +++ b/fs/btrfs/acl.c @@ -60,8 +60,10 @@ static struct posix_acl *btrfs_get_acl(struct inode *inode, int type) size = __btrfs_getxattr(inode, name, value, size); if (size > 0) { acl = posix_acl_from_xattr(value, size); - if (IS_ERR(acl)) + if (IS_ERR(acl)) { + kfree(value); return acl; + } set_cached_acl(inode, type, acl); } kfree(value); -- cgit v1.2.2 From 20b450773d17e325190c158e10bfdb25dc21d2d6 Mon Sep 17 00:00:00 2001 From: Dave Young Date: Sat, 8 Jan 2011 10:09:13 +0000 Subject: btrfs: mount failure return value fix I happened to pass swap partition as root partition in cmdline, then kernel panic and tell me about "Cannot open root device". It is not correct, in fact it is a fs type mismatch instead of 'no device'. Eventually I found btrfs mounting failed with -EIO, it should be -EINVAL. The logic in init/do_mounts.c: for (p = fs_names; *p; p += strlen(p)+1) { int err = do_mount_root(name, p, flags, root_mount_data); switch (err) { case 0: goto out; case -EACCES: flags |= MS_RDONLY; goto retry; case -EINVAL: continue; } print "Cannot open root device" panic } SO fs type after btrfs will have no chance to mount Here fix the return value as -EINVAL Signed-off-by: Dave Young Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 4 +++- fs/btrfs/volumes.c | 8 +++++--- 2 files changed, 8 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index f88eb2ce7919..f9efb68fc2e3 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1713,8 +1713,10 @@ struct btrfs_root *open_ctree(struct super_block *sb, fs_info, BTRFS_ROOT_TREE_OBJECTID); bh = btrfs_read_dev_super(fs_devices->latest_bdev); - if (!bh) + if (!bh) { + err = -EINVAL; goto fail_iput; + } memcpy(&fs_info->super_copy, bh->b_data, sizeof(fs_info->super_copy)); memcpy(&fs_info->super_for_commit, &fs_info->super_copy, diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 0c7f478cf645..e8be478178aa 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -600,8 +600,10 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices, set_blocksize(bdev, 4096); bh = btrfs_read_dev_super(bdev); - if (!bh) + if (!bh) { + ret = -EINVAL; goto error_close; + } disk_super = (struct btrfs_super_block *)bh->b_data; devid = btrfs_stack_device_id(&disk_super->dev_item); @@ -702,7 +704,7 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, goto error_close; bh = btrfs_read_dev_super(bdev); if (!bh) { - ret = -EIO; + ret = -EINVAL; goto error_close; } disk_super = (struct btrfs_super_block *)bh->b_data; @@ -1302,7 +1304,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) set_blocksize(bdev, 4096); bh = btrfs_read_dev_super(bdev); if (!bh) { - ret = -EIO; + ret = -EINVAL; goto error_close; } disk_super = (struct btrfs_super_block *)bh->b_data; -- cgit v1.2.2 From ff175d57f057f77d2d3031d674c2af9167a4af02 Mon Sep 17 00:00:00 2001 From: Jesper Juhl Date: Sat, 25 Dec 2010 21:22:30 +0000 Subject: btrfs: Don't pass NULL ptr to func that may deref it. Hi, In fs/btrfs/inode.c::fixup_tree_root_location() we have this code: ... if (!path) { err = -ENOMEM; goto out; } ... out: btrfs_free_path(path); return err; btrfs_free_path() passes its argument on to other functions and some of them end up dereferencing the pointer. In the code above that pointer is clearly NULL, so btrfs_free_path() will eventually cause a NULL dereference. There are many ways to cut this cake (fix the bug). The one I chose was to make btrfs_free_path() deal gracefully with NULL pointers. If you disagree, feel free to come up with an alternative patch. Signed-off-by: Jesper Juhl Signed-off-by: Chris Mason --- fs/btrfs/ctree.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs') diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 9ac171599258..99599f1c1554 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -105,6 +105,8 @@ noinline void btrfs_clear_path_blocking(struct btrfs_path *p, /* this also releases the path */ void btrfs_free_path(struct btrfs_path *p) { + if (!p) + return; btrfs_release_path(NULL, p); kmem_cache_free(btrfs_path_cachep, p); } -- cgit v1.2.2 From 91ca338d776e0cefb255bf2979b6448febd880f5 Mon Sep 17 00:00:00 2001 From: Tsutomu Itoh Date: Wed, 5 Jan 2011 02:32:22 +0000 Subject: btrfs: check NULL or not Should check if functions returns NULL or not. Signed-off-by: Tsutomu Itoh Signed-off-by: Chris Mason --- fs/btrfs/ctree.c | 6 ++++++ fs/btrfs/disk-io.c | 8 ++++++++ fs/btrfs/extent_io.c | 2 ++ 3 files changed, 16 insertions(+) (limited to 'fs') diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 99599f1c1554..b5baff0dccfe 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -2516,6 +2516,9 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root btrfs_assert_tree_locked(path->nodes[1]); right = read_node_slot(root, upper, slot + 1); + if (right == NULL) + return 1; + btrfs_tree_lock(right); btrfs_set_lock_blocking(right); @@ -2766,6 +2769,9 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root btrfs_assert_tree_locked(path->nodes[1]); left = read_node_slot(root, path->nodes[1], slot - 1); + if (left == NULL) + return 1; + btrfs_tree_lock(left); btrfs_set_lock_blocking(left); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index f9efb68fc2e3..a0c37b2ee9ed 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -353,6 +353,10 @@ static int csum_dirty_buffer(struct btrfs_root *root, struct page *page) WARN_ON(len == 0); eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS); + if (eb == NULL) { + WARN_ON(1); + goto out; + } ret = btree_read_extent_buffer_pages(root, eb, start + PAGE_CACHE_SIZE, btrfs_header_generation(eb)); BUG_ON(ret); @@ -427,6 +431,10 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end, WARN_ON(len == 0); eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS); + if (eb == NULL) { + ret = -EIO; + goto out; + } found_start = btrfs_header_bytenr(eb); if (found_start != start) { diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index f1d198128959..8b8d3d99ae68 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -3075,6 +3075,8 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree, #endif eb = kmem_cache_zalloc(extent_buffer_cache, mask); + if (eb == NULL) + return NULL; eb->start = start; eb->len = len; spin_lock_init(&eb->lock); -- cgit v1.2.2 From 5e540f7715b8cd83b8e60beaaa525b125cc122de Mon Sep 17 00:00:00 2001 From: Tsutomu Itoh Date: Mon, 27 Dec 2010 06:53:10 +0000 Subject: btrfs: Fix memory leak in btrfs_read_fs_root_no_radix() In btrfs_read_fs_root_no_radix(), 'root' is not freed if btrfs_search_slot() returns error. Signed-off-by: Tsutomu Itoh Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index a0c37b2ee9ed..9b1dd4138072 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1153,6 +1153,7 @@ struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root, } btrfs_free_path(path); if (ret) { + kfree(root); if (ret > 0) ret = -ENOENT; return ERR_PTR(ret); -- cgit v1.2.2 From f690efb1aa2a961dd6655529c1797fcac60ad6d9 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 12 Jan 2011 21:04:22 +0000 Subject: Btrfs: don't warn if we get ENOSPC in btrfs_block_rsv_check If we run low on space we could get a bunch of warnings out of btrfs_block_rsv_check, but this is mostly just called via the transaction code to see if we need to end the transaction, it expects to see failures, so let's not WARN and freak everybody out for no reason. Thanks, Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 04bfc3a2bd9f..055b837eab19 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3727,11 +3727,6 @@ int btrfs_block_rsv_check(struct btrfs_trans_handle *trans, return 0; } - WARN_ON(1); - printk(KERN_INFO"block_rsv size %llu reserved %llu freed %llu %llu\n", - block_rsv->size, block_rsv->reserved, - block_rsv->freed[0], block_rsv->freed[1]); - return -ENOSPC; } -- cgit v1.2.2 From 6f88a4403def422bd8e276ddf6863d6ac71435d2 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Wed, 29 Dec 2010 14:55:03 +0000 Subject: btrfs: Require CAP_SYS_ADMIN for filesystem rebalance Filesystem rebalancing (BTRFS_IOC_BALANCE) affects the entire filesystem and may run uninterruptibly for a long time. This does not seem to be something that an unprivileged user should be able to do. Reported-by: Aron Xu Signed-off-by: Ben Hutchings Signed-off-by: Chris Mason --- fs/btrfs/volumes.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'fs') diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index e8be478178aa..f2d2f4ccc738 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include "compat.h" #include "ctree.h" @@ -2024,6 +2025,9 @@ int btrfs_balance(struct btrfs_root *dev_root) if (dev_root->fs_info->sb->s_flags & MS_RDONLY) return -EROFS; + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + mutex_lock(&dev_root->fs_info->volume_mutex); dev_root = dev_root->fs_info->dev_root; -- cgit v1.2.2 From 7b8a53fd815deb39542085897743fa0063f9fe06 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 15 Jan 2011 20:08:44 -0500 Subject: fix old umount_tree() breakage Expiry-related code calls umount_tree() several times with the same list to collect vfsmounts to. Which is fine, except that umount_tree() implicitly assumed that the list would be empty on each call - it moves the victims over there and then iterates through the list kicking them out. It's *almost* idempotent, so everything nearly worked. However, mnt->ghosts handling (and thus expirability checks) had been broken - that part was not idempotent... The fix is trivial - use local temporary list, splice it to the the collector list when we are through. Signed-off-by: Al Viro --- fs/namespace.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/namespace.c b/fs/namespace.c index bfcb701f9490..d7fc05fac753 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -1226,15 +1226,16 @@ void release_mounts(struct list_head *head) */ void umount_tree(struct vfsmount *mnt, int propagate, struct list_head *kill) { + LIST_HEAD(tmp_list); struct vfsmount *p; for (p = mnt; p; p = next_mnt(p, mnt)) - list_move(&p->mnt_hash, kill); + list_move(&p->mnt_hash, &tmp_list); if (propagate) - propagate_umount(kill); + propagate_umount(&tmp_list); - list_for_each_entry(p, kill, mnt_hash) { + list_for_each_entry(p, &tmp_list, mnt_hash) { list_del_init(&p->mnt_expire); list_del_init(&p->mnt_list); __touch_mnt_namespace(p->mnt_ns); @@ -1246,6 +1247,7 @@ void umount_tree(struct vfsmount *mnt, int propagate, struct list_head *kill) } change_mnt_propagation(p, MS_PRIVATE); } + list_splice(&tmp_list, kill); } static void shrink_submounts(struct vfsmount *mnt, struct list_head *umounts); -- cgit v1.2.2 From f03c65993b98eeb909a4012ce7833c5857d74755 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 14 Jan 2011 22:30:21 -0500 Subject: sanitize vfsmount refcounting changes Instead of splitting refcount between (per-cpu) mnt_count and (SMP-only) mnt_longrefs, make all references contribute to mnt_count again and keep track of how many are longterm ones. Accounting rules for longterm count: * 1 for each fs_struct.root.mnt * 1 for each fs_struct.pwd.mnt * 1 for having non-NULL ->mnt_ns * decrement to 0 happens only under vfsmount lock exclusive That allows nice common case for mntput() - since we can't drop the final reference until after mnt_longterm has reached 0 due to the rules above, mntput() can grab vfsmount lock shared and check mnt_longterm. If it turns out to be non-zero (which is the common case), we know that this is not the final mntput() and can just blindly decrement percpu mnt_count. Otherwise we grab vfsmount lock exclusive and do usual decrement-and-check of percpu mnt_count. For fs_struct.c we have mnt_make_longterm() and mnt_make_shortterm(); namespace.c uses the latter in places where we don't already hold vfsmount lock exclusive and opencodes a few remaining spots where we need to manipulate mnt_longterm. Note that we mostly revert the code outside of fs/namespace.c back to what we used to have; in particular, normal code doesn't need to care about two kinds of references, etc. And we get to keep the optimization Nick's variant had bought us... Signed-off-by: Al Viro --- fs/anon_inodes.c | 2 +- fs/fs_struct.c | 35 +++++++++++------ fs/internal.h | 3 ++ fs/namei.c | 24 ------------ fs/namespace.c | 116 +++++++++++++++++++++---------------------------------- fs/pipe.c | 2 +- fs/super.c | 2 +- 7 files changed, 73 insertions(+), 111 deletions(-) (limited to 'fs') diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c index cbe57f3c4d89..c5567cb78432 100644 --- a/fs/anon_inodes.c +++ b/fs/anon_inodes.c @@ -233,7 +233,7 @@ static int __init anon_inode_init(void) return 0; err_mntput: - mntput_long(anon_inode_mnt); + mntput(anon_inode_mnt); err_unregister_filesystem: unregister_filesystem(&anon_inode_fs_type); err_exit: diff --git a/fs/fs_struct.c b/fs/fs_struct.c index 68ca487bedb1..78b519c13536 100644 --- a/fs/fs_struct.c +++ b/fs/fs_struct.c @@ -4,6 +4,19 @@ #include #include #include +#include "internal.h" + +static inline void path_get_longterm(struct path *path) +{ + path_get(path); + mnt_make_longterm(path->mnt); +} + +static inline void path_put_longterm(struct path *path) +{ + mnt_make_shortterm(path->mnt); + path_put(path); +} /* * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values. @@ -17,11 +30,11 @@ void set_fs_root(struct fs_struct *fs, struct path *path) write_seqcount_begin(&fs->seq); old_root = fs->root; fs->root = *path; - path_get_long(path); + path_get_longterm(path); write_seqcount_end(&fs->seq); spin_unlock(&fs->lock); if (old_root.dentry) - path_put_long(&old_root); + path_put_longterm(&old_root); } /* @@ -36,12 +49,12 @@ void set_fs_pwd(struct fs_struct *fs, struct path *path) write_seqcount_begin(&fs->seq); old_pwd = fs->pwd; fs->pwd = *path; - path_get_long(path); + path_get_longterm(path); write_seqcount_end(&fs->seq); spin_unlock(&fs->lock); if (old_pwd.dentry) - path_put_long(&old_pwd); + path_put_longterm(&old_pwd); } void chroot_fs_refs(struct path *old_root, struct path *new_root) @@ -59,13 +72,13 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root) write_seqcount_begin(&fs->seq); if (fs->root.dentry == old_root->dentry && fs->root.mnt == old_root->mnt) { - path_get_long(new_root); + path_get_longterm(new_root); fs->root = *new_root; count++; } if (fs->pwd.dentry == old_root->dentry && fs->pwd.mnt == old_root->mnt) { - path_get_long(new_root); + path_get_longterm(new_root); fs->pwd = *new_root; count++; } @@ -76,13 +89,13 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root) } while_each_thread(g, p); read_unlock(&tasklist_lock); while (count--) - path_put_long(old_root); + path_put_longterm(old_root); } void free_fs_struct(struct fs_struct *fs) { - path_put_long(&fs->root); - path_put_long(&fs->pwd); + path_put_longterm(&fs->root); + path_put_longterm(&fs->pwd); kmem_cache_free(fs_cachep, fs); } @@ -118,9 +131,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old) spin_lock(&old->lock); fs->root = old->root; - path_get_long(&fs->root); + path_get_longterm(&fs->root); fs->pwd = old->pwd; - path_get_long(&fs->pwd); + path_get_longterm(&fs->pwd); spin_unlock(&old->lock); } return fs; diff --git a/fs/internal.h b/fs/internal.h index 4931060fd089..12ccb86edef7 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -73,6 +73,9 @@ extern struct vfsmount *copy_tree(struct vfsmount *, struct dentry *, int); extern int do_add_mount(struct vfsmount *, struct path *, int); extern void mnt_clear_expiry(struct vfsmount *); +extern void mnt_make_longterm(struct vfsmount *); +extern void mnt_make_shortterm(struct vfsmount *); + extern void __init mnt_init(void); DECLARE_BRLOCK(vfsmount_lock); diff --git a/fs/namei.c b/fs/namei.c index c2e37727e3ab..8f7b41a14882 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -367,18 +367,6 @@ void path_get(struct path *path) } EXPORT_SYMBOL(path_get); -/** - * path_get_long - get a long reference to a path - * @path: path to get the reference to - * - * Given a path increment the reference count to the dentry and the vfsmount. - */ -void path_get_long(struct path *path) -{ - mntget_long(path->mnt); - dget(path->dentry); -} - /** * path_put - put a reference to a path * @path: path to put the reference to @@ -392,18 +380,6 @@ void path_put(struct path *path) } EXPORT_SYMBOL(path_put); -/** - * path_put_long - put a long reference to a path - * @path: path to put the reference to - * - * Given a path decrement the reference count to the dentry and the vfsmount. - */ -void path_put_long(struct path *path) -{ - dput(path->dentry); - mntput_long(path->mnt); -} - /** * nameidata_drop_rcu - drop this nameidata out of rcu-walk * @nd: nameidata pathwalk data to drop diff --git a/fs/namespace.c b/fs/namespace.c index d7fc05fac753..48809e21f270 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -183,7 +183,7 @@ static inline void mnt_dec_count(struct vfsmount *mnt) unsigned int mnt_get_count(struct vfsmount *mnt) { #ifdef CONFIG_SMP - unsigned int count = atomic_read(&mnt->mnt_longrefs); + unsigned int count = 0; int cpu; for_each_possible_cpu(cpu) { @@ -217,7 +217,7 @@ struct vfsmount *alloc_vfsmnt(const char *name) if (!mnt->mnt_pcp) goto out_free_devname; - atomic_set(&mnt->mnt_longrefs, 1); + this_cpu_add(mnt->mnt_pcp->mnt_count, 1); #else mnt->mnt_count = 1; mnt->mnt_writers = 0; @@ -624,8 +624,11 @@ static void commit_tree(struct vfsmount *mnt) BUG_ON(parent == mnt); list_add_tail(&head, &mnt->mnt_list); - list_for_each_entry(m, &head, mnt_list) + list_for_each_entry(m, &head, mnt_list) { m->mnt_ns = n; + atomic_inc(&m->mnt_longterm); + } + list_splice(&head, n->list.prev); list_add_tail(&mnt->mnt_hash, mount_hashtable + @@ -734,51 +737,30 @@ static inline void mntfree(struct vfsmount *mnt) deactivate_super(sb); } -#ifdef CONFIG_SMP -static inline void __mntput(struct vfsmount *mnt, int longrefs) +static void mntput_no_expire(struct vfsmount *mnt) { - if (!longrefs) { put_again: - br_read_lock(vfsmount_lock); - if (likely(atomic_read(&mnt->mnt_longrefs))) { - mnt_dec_count(mnt); - br_read_unlock(vfsmount_lock); - return; - } +#ifdef CONFIG_SMP + br_read_lock(vfsmount_lock); + if (likely(atomic_read(&mnt->mnt_longterm))) { + mnt_dec_count(mnt); br_read_unlock(vfsmount_lock); - } else { - BUG_ON(!atomic_read(&mnt->mnt_longrefs)); - if (atomic_add_unless(&mnt->mnt_longrefs, -1, 1)) - return; + return; } + br_read_unlock(vfsmount_lock); br_write_lock(vfsmount_lock); - if (!longrefs) - mnt_dec_count(mnt); - else - atomic_dec(&mnt->mnt_longrefs); + mnt_dec_count(mnt); if (mnt_get_count(mnt)) { br_write_unlock(vfsmount_lock); return; } - if (unlikely(mnt->mnt_pinned)) { - mnt_add_count(mnt, mnt->mnt_pinned + 1); - mnt->mnt_pinned = 0; - br_write_unlock(vfsmount_lock); - acct_auto_close_mnt(mnt); - goto put_again; - } - br_write_unlock(vfsmount_lock); - mntfree(mnt); -} #else -static inline void __mntput(struct vfsmount *mnt, int longrefs) -{ -put_again: mnt_dec_count(mnt); if (likely(mnt_get_count(mnt))) return; br_write_lock(vfsmount_lock); +#endif if (unlikely(mnt->mnt_pinned)) { mnt_add_count(mnt, mnt->mnt_pinned + 1); mnt->mnt_pinned = 0; @@ -789,12 +771,6 @@ put_again: br_write_unlock(vfsmount_lock); mntfree(mnt); } -#endif - -static void mntput_no_expire(struct vfsmount *mnt) -{ - __mntput(mnt, 0); -} void mntput(struct vfsmount *mnt) { @@ -802,7 +778,7 @@ void mntput(struct vfsmount *mnt) /* avoid cacheline pingpong, hope gcc doesn't get "smart" */ if (unlikely(mnt->mnt_expiry_mark)) mnt->mnt_expiry_mark = 0; - __mntput(mnt, 0); + mntput_no_expire(mnt); } } EXPORT_SYMBOL(mntput); @@ -815,33 +791,6 @@ struct vfsmount *mntget(struct vfsmount *mnt) } EXPORT_SYMBOL(mntget); -void mntput_long(struct vfsmount *mnt) -{ -#ifdef CONFIG_SMP - if (mnt) { - /* avoid cacheline pingpong, hope gcc doesn't get "smart" */ - if (unlikely(mnt->mnt_expiry_mark)) - mnt->mnt_expiry_mark = 0; - __mntput(mnt, 1); - } -#else - mntput(mnt); -#endif -} -EXPORT_SYMBOL(mntput_long); - -struct vfsmount *mntget_long(struct vfsmount *mnt) -{ -#ifdef CONFIG_SMP - if (mnt) - atomic_inc(&mnt->mnt_longrefs); - return mnt; -#else - return mntget(mnt); -#endif -} -EXPORT_SYMBOL(mntget_long); - void mnt_pin(struct vfsmount *mnt) { br_write_lock(vfsmount_lock); @@ -1216,7 +1165,7 @@ void release_mounts(struct list_head *head) dput(dentry); mntput(m); } - mntput_long(mnt); + mntput(mnt); } } @@ -1240,6 +1189,7 @@ void umount_tree(struct vfsmount *mnt, int propagate, struct list_head *kill) list_del_init(&p->mnt_list); __touch_mnt_namespace(p->mnt_ns); p->mnt_ns = NULL; + atomic_dec(&p->mnt_longterm); list_del_init(&p->mnt_child); if (p->mnt_parent != p) { p->mnt_parent->mnt_ghosts++; @@ -1969,7 +1919,7 @@ int do_add_mount(struct vfsmount *newmnt, struct path *path, int mnt_flags) unlock: up_write(&namespace_sem); - mntput_long(newmnt); + mntput(newmnt); return err; } @@ -2291,6 +2241,20 @@ static struct mnt_namespace *alloc_mnt_ns(void) return new_ns; } +void mnt_make_longterm(struct vfsmount *mnt) +{ + atomic_inc(&mnt->mnt_longterm); +} + +void mnt_make_shortterm(struct vfsmount *mnt) +{ + if (atomic_add_unless(&mnt->mnt_longterm, -1, 1)) + return; + br_write_lock(vfsmount_lock); + atomic_dec(&mnt->mnt_longterm); + br_write_unlock(vfsmount_lock); +} + /* * Allocate a new namespace structure and populate it with contents * copied from the namespace of the passed in task structure. @@ -2328,14 +2292,19 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns, q = new_ns->root; while (p) { q->mnt_ns = new_ns; + atomic_inc(&q->mnt_longterm); if (fs) { if (p == fs->root.mnt) { + fs->root.mnt = mntget(q); + atomic_inc(&q->mnt_longterm); + mnt_make_shortterm(p); rootmnt = p; - fs->root.mnt = mntget_long(q); } if (p == fs->pwd.mnt) { + fs->pwd.mnt = mntget(q); + atomic_inc(&q->mnt_longterm); + mnt_make_shortterm(p); pwdmnt = p; - fs->pwd.mnt = mntget_long(q); } } p = next_mnt(p, mnt_ns->root); @@ -2344,9 +2313,9 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns, up_write(&namespace_sem); if (rootmnt) - mntput_long(rootmnt); + mntput(rootmnt); if (pwdmnt) - mntput_long(pwdmnt); + mntput(pwdmnt); return new_ns; } @@ -2379,6 +2348,7 @@ struct mnt_namespace *create_mnt_ns(struct vfsmount *mnt) new_ns = alloc_mnt_ns(); if (!IS_ERR(new_ns)) { mnt->mnt_ns = new_ns; + atomic_inc(&mnt->mnt_longterm); new_ns->root = mnt; list_add(&new_ns->list, &new_ns->root->mnt_list); } diff --git a/fs/pipe.c b/fs/pipe.c index e2e95fb46a1e..89e9e19b1b2e 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -1292,7 +1292,7 @@ static int __init init_pipe_fs(void) static void __exit exit_pipe_fs(void) { unregister_filesystem(&pipe_fs_type); - mntput_long(pipe_mnt); + mntput(pipe_mnt); } fs_initcall(init_pipe_fs); diff --git a/fs/super.c b/fs/super.c index 4f6a3571a634..74e149efed81 100644 --- a/fs/super.c +++ b/fs/super.c @@ -1141,7 +1141,7 @@ static struct vfsmount *fs_set_subtype(struct vfsmount *mnt, const char *fstype) return mnt; err: - mntput_long(mnt); + mntput(mnt); return ERR_PTR(err); } -- cgit v1.2.2 From f8b18087fd3277e424a24e13ce0edf30abe97ce0 Mon Sep 17 00:00:00 2001 From: Stefan Schmidt Date: Wed, 12 Jan 2011 10:30:42 +0100 Subject: fs/btrfs: Fix build of ctree Fix the build failure in some configurations: CC [M] fs/btrfs/ctree.o In file included from fs/btrfs/ctree.c:21:0: fs/btrfs/ctree.h:1003:17: error: field 'super_kobj' has incomplete type fs/btrfs/ctree.h:1074:17: error: field 'root_kobj' has incomplete type make[2]: *** [fs/btrfs/ctree.o] Error 1 make[1]: *** [fs/btrfs] Error 2 make: *** [fs] Error 2 caused by commit 57cc7215b708 ("headers: kobject.h redux") We need to include kobject.h here. Reported-by: Jeff Garzik Fix-suggested-by: Li Zefan Signed-off-by: Stefan Schmidt Signed-off-by: Linus Torvalds --- fs/btrfs/ctree.h | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index a142d204b526..b875d445ea81 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -27,6 +27,7 @@ #include #include #include +#include #include #include "extent_io.h" #include "extent_map.h" -- cgit v1.2.2 From e205117285d6035af135c9d6c34a30ee6b8d1f2e Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Sun, 16 Jan 2011 21:11:25 +0000 Subject: configfs: change depends -> select SYSFS This patch changes configfs to select SYSFS to fix the following: warning: (TARGET_CORE && GFS2_FS) selects CONFIGFS_FS which has unmet direct dependencies (SYSFS) Reported-by: Randy Dunlap Signed-off-by: Nicholas A. Bellinger Acked-by: Joel Becker --- fs/configfs/Kconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/configfs/Kconfig b/fs/configfs/Kconfig index 13587cc97a0b..9febcdefdfdc 100644 --- a/fs/configfs/Kconfig +++ b/fs/configfs/Kconfig @@ -1,8 +1,8 @@ config CONFIGFS_FS tristate "Userspace-driven configuration filesystem" - depends on SYSFS + select SYSFS help - configfs is a ram-based filesystem that provides the converse + configfs is a RAM-based filesystem that provides the converse of sysfs's functionality. Where sysfs is a filesystem-based view of kernel objects, configfs is a filesystem-based manager of kernel objects, or config_items. -- cgit v1.2.2 From 86c747d2a4f028fe2fdf091c3a81d0e187827682 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Sun, 16 Jan 2011 21:14:52 +0000 Subject: dlm: Make DLM depend on CONFIGFS_FS This patch fixes the following kconfig error after changing CONFIGFS_FS -> select SYSFS: fs/sysfs/Kconfig:1:error: recursive dependency detected! fs/sysfs/Kconfig:1: symbol SYSFS is selected by CONFIGFS_FS fs/configfs/Kconfig:1: symbol CONFIGFS_FS is selected by DLM fs/dlm/Kconfig:1: symbol DLM depends on SYSFS Signed-off-by: Nicholas A. Bellinger Cc: Joel Becker Cc: Randy Dunlap Cc: Stephen Rothwell Cc: James Bottomley --- fs/dlm/Kconfig | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/dlm/Kconfig b/fs/dlm/Kconfig index 2dbb422e8116..1897eb1b4b6a 100644 --- a/fs/dlm/Kconfig +++ b/fs/dlm/Kconfig @@ -1,8 +1,7 @@ menuconfig DLM tristate "Distributed Lock Manager (DLM)" depends on EXPERIMENTAL && INET - depends on SYSFS && (IPV6 || IPV6=n) - select CONFIGFS_FS + depends on SYSFS && CONFIGFS_FS && (IPV6 || IPV6=n) select IP_SCTP help A general purpose distributed lock manager for kernel or userspace -- cgit v1.2.2 From 7b1fff7e4fdf2805fce7afd6247912588d72d604 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Sun, 16 Jan 2011 21:16:07 +0000 Subject: ocfs2: Make OCFS2_FS depend on CONFIGFS_FS This patch fixes the following kconfig error after changing CONFIGFS_FS -> select SYSFS: fs/sysfs/Kconfig:1:error: recursive dependency detected! fs/sysfs/Kconfig:1: symbol SYSFS is selected by CONFIGFS_FS fs/configfs/Kconfig:1: symbol CONFIGFS_FS is selected by OCFS2_FS fs/ocfs2/Kconfig:1: symbol OCFS2_FS depends on SYSFS Signed-off-by: Nicholas A. Bellinger Cc: Joel Becker Cc: Randy Dunlap Cc: Stephen Rothwell Cc: James Bottomley --- fs/ocfs2/Kconfig | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/Kconfig b/fs/ocfs2/Kconfig index ab152c00cd3a..77a8de5f7119 100644 --- a/fs/ocfs2/Kconfig +++ b/fs/ocfs2/Kconfig @@ -1,7 +1,6 @@ config OCFS2_FS tristate "OCFS2 file system support" - depends on NET && SYSFS - select CONFIGFS_FS + depends on NET && SYSFS && CONFIGFS_FS select JBD2 select CRC32 select QUOTA -- cgit v1.2.2 From 7e3d0eb0b028ed9e9384e6afcae2f22993bbdf25 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 16 Jan 2011 16:32:11 -0500 Subject: VFS: Fix UP compile error in fs/namespace.c mnt_longterm is there only on SMP Reported-and-tested-by: Joachim Eastwood Signed-off-by: Al Viro Signed-off-by: Linus Torvalds --- fs/namespace.c | 31 ++++++++++++++++++++++++------- 1 file changed, 24 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/namespace.c b/fs/namespace.c index 48809e21f270..9f544f35ed34 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -611,6 +611,21 @@ static void attach_mnt(struct vfsmount *mnt, struct path *path) list_add_tail(&mnt->mnt_child, &path->mnt->mnt_mounts); } +static inline void __mnt_make_longterm(struct vfsmount *mnt) +{ +#ifdef CONFIG_SMP + atomic_inc(&mnt->mnt_longterm); +#endif +} + +/* needs vfsmount lock for write */ +static inline void __mnt_make_shortterm(struct vfsmount *mnt) +{ +#ifdef CONFIG_SMP + atomic_dec(&mnt->mnt_longterm); +#endif +} + /* * vfsmount lock must be held for write */ @@ -626,7 +641,7 @@ static void commit_tree(struct vfsmount *mnt) list_add_tail(&head, &mnt->mnt_list); list_for_each_entry(m, &head, mnt_list) { m->mnt_ns = n; - atomic_inc(&m->mnt_longterm); + __mnt_make_longterm(m); } list_splice(&head, n->list.prev); @@ -1189,7 +1204,7 @@ void umount_tree(struct vfsmount *mnt, int propagate, struct list_head *kill) list_del_init(&p->mnt_list); __touch_mnt_namespace(p->mnt_ns); p->mnt_ns = NULL; - atomic_dec(&p->mnt_longterm); + __mnt_make_shortterm(p); list_del_init(&p->mnt_child); if (p->mnt_parent != p) { p->mnt_parent->mnt_ghosts++; @@ -2243,16 +2258,18 @@ static struct mnt_namespace *alloc_mnt_ns(void) void mnt_make_longterm(struct vfsmount *mnt) { - atomic_inc(&mnt->mnt_longterm); + __mnt_make_longterm(mnt); } void mnt_make_shortterm(struct vfsmount *mnt) { +#ifdef CONFIG_SMP if (atomic_add_unless(&mnt->mnt_longterm, -1, 1)) return; br_write_lock(vfsmount_lock); atomic_dec(&mnt->mnt_longterm); br_write_unlock(vfsmount_lock); +#endif } /* @@ -2292,17 +2309,17 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns, q = new_ns->root; while (p) { q->mnt_ns = new_ns; - atomic_inc(&q->mnt_longterm); + __mnt_make_longterm(q); if (fs) { if (p == fs->root.mnt) { fs->root.mnt = mntget(q); - atomic_inc(&q->mnt_longterm); + __mnt_make_longterm(q); mnt_make_shortterm(p); rootmnt = p; } if (p == fs->pwd.mnt) { fs->pwd.mnt = mntget(q); - atomic_inc(&q->mnt_longterm); + __mnt_make_longterm(q); mnt_make_shortterm(p); pwdmnt = p; } @@ -2348,7 +2365,7 @@ struct mnt_namespace *create_mnt_ns(struct vfsmount *mnt) new_ns = alloc_mnt_ns(); if (!IS_ERR(new_ns)) { mnt->mnt_ns = new_ns; - atomic_inc(&mnt->mnt_longterm); + __mnt_make_longterm(mnt); new_ns->root = mnt; list_add(&new_ns->list, &new_ns->root->mnt_list); } -- cgit v1.2.2 From 19a167af7c97248ec646552ebc9140bc6aa3552a Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 17 Jan 2011 01:35:23 -0500 Subject: Take the completion of automount into new helper ... and shift it from namei.c to namespace.c Signed-off-by: Al Viro --- fs/internal.h | 1 + fs/namei.c | 31 +++++-------------------------- fs/namespace.c | 33 +++++++++++++++++++++++++++++++++ 3 files changed, 39 insertions(+), 26 deletions(-) (limited to 'fs') diff --git a/fs/internal.h b/fs/internal.h index 12ccb86edef7..e8a0b245177d 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -70,6 +70,7 @@ extern void mnt_set_mountpoint(struct vfsmount *, struct dentry *, extern void release_mounts(struct list_head *); extern void umount_tree(struct vfsmount *, int, struct list_head *); extern struct vfsmount *copy_tree(struct vfsmount *, struct dentry *, int); +extern int finish_automount(struct vfsmount *, struct path *); extern int do_add_mount(struct vfsmount *, struct path *, int); extern void mnt_clear_expiry(struct vfsmount *); diff --git a/fs/namei.c b/fs/namei.c index 8f7b41a14882..b753192d8c3f 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -923,37 +923,13 @@ static int follow_automount(struct path *path, unsigned flags, if (!mnt) /* mount collision */ return 0; - /* The new mount record should have at least 2 refs to prevent it being - * expired before we get a chance to add it - */ - BUG_ON(mnt_get_count(mnt) < 2); - - if (mnt->mnt_sb == path->mnt->mnt_sb && - mnt->mnt_root == path->dentry) { - mnt_clear_expiry(mnt); - mntput(mnt); - mntput(mnt); - return -ELOOP; - } + err = finish_automount(mnt, path); - /* We need to add the mountpoint to the parent. The filesystem may - * have placed it on an expiry list, and so we need to make sure it - * won't be expired under us if do_add_mount() fails (do_add_mount() - * will eat a reference unconditionally). - */ - mntget(mnt); - err = do_add_mount(mnt, path, path->mnt->mnt_flags | MNT_SHRINKABLE); switch (err) { case -EBUSY: /* Someone else made a mount here whilst we were busy */ - err = 0; - default: - mnt_clear_expiry(mnt); - mntput(mnt); - mntput(mnt); - return err; + return 0; case 0: - mntput(mnt); dput(path->dentry); if (*need_mntput) mntput(path->mnt); @@ -961,7 +937,10 @@ static int follow_automount(struct path *path, unsigned flags, path->dentry = dget(mnt->mnt_root); *need_mntput = true; return 0; + default: + return err; } + } /* diff --git a/fs/namespace.c b/fs/namespace.c index 9f544f35ed34..bec51e4e0549 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -1895,6 +1895,39 @@ static int do_new_mount(struct path *path, char *type, int flags, return do_add_mount(mnt, path, mnt_flags); } +int finish_automount(struct vfsmount *m, struct path *path) +{ + int err; + /* The new mount record should have at least 2 refs to prevent it being + * expired before we get a chance to add it + */ + BUG_ON(mnt_get_count(m) < 2); + + if (m->mnt_sb == path->mnt->mnt_sb && + m->mnt_root == path->dentry) { + mnt_clear_expiry(m); + mntput(m); + mntput(m); + return -ELOOP; + } + + /* We need to add the mountpoint to the parent. The filesystem may + * have placed it on an expiry list, and so we need to make sure it + * won't be expired under us if do_add_mount() fails (do_add_mount() + * will eat a reference unconditionally). + */ + mntget(m); + err = do_add_mount(m, path, path->mnt->mnt_flags | MNT_SHRINKABLE); + if (err) { + mnt_clear_expiry(m); + mntput(m); + mntput(m); + } else { + mntput(m); + } + return err; +} + /* * add a mount into a namespace's mount tree * - this unconditionally eats one of the caller's references to newmnt. -- cgit v1.2.2 From 15f9a3f3e199647fe0cac19302c5033cf031372d Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 17 Jan 2011 01:41:58 -0500 Subject: don't drop newmnt on error in do_add_mount() That gets rid of the kludge in finish_automount() - we need to keep refcount on the vfsmount as-is until we evict it from expiry list. Signed-off-by: Al Viro --- fs/namespace.c | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) (limited to 'fs') diff --git a/fs/namespace.c b/fs/namespace.c index bec51e4e0549..31aefc8e5fa6 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -1880,6 +1880,7 @@ static int do_new_mount(struct path *path, char *type, int flags, int mnt_flags, char *name, void *data) { struct vfsmount *mnt; + int err; if (!type) return -EINVAL; @@ -1892,7 +1893,10 @@ static int do_new_mount(struct path *path, char *type, int flags, if (IS_ERR(mnt)) return PTR_ERR(mnt); - return do_add_mount(mnt, path, mnt_flags); + err = do_add_mount(mnt, path, mnt_flags); + if (err) + mntput(mnt); + return err; } int finish_automount(struct vfsmount *m, struct path *path) @@ -1911,26 +1915,17 @@ int finish_automount(struct vfsmount *m, struct path *path) return -ELOOP; } - /* We need to add the mountpoint to the parent. The filesystem may - * have placed it on an expiry list, and so we need to make sure it - * won't be expired under us if do_add_mount() fails (do_add_mount() - * will eat a reference unconditionally). - */ - mntget(m); err = do_add_mount(m, path, path->mnt->mnt_flags | MNT_SHRINKABLE); if (err) { mnt_clear_expiry(m); mntput(m); mntput(m); - } else { - mntput(m); } return err; } /* * add a mount into a namespace's mount tree - * - this unconditionally eats one of the caller's references to newmnt. */ int do_add_mount(struct vfsmount *newmnt, struct path *path, int mnt_flags) { @@ -1967,7 +1962,6 @@ int do_add_mount(struct vfsmount *newmnt, struct path *path, int mnt_flags) unlock: up_write(&namespace_sem); - mntput(newmnt); return err; } -- cgit v1.2.2 From b1e75df45a3d8a490b8648e44632debc5eea04b1 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 17 Jan 2011 01:47:59 -0500 Subject: tidy up around finish_automount() do_add_mount() and mnt_clear_expiry() are not needed outside of namespace.c anymore, now that namei has finish_automount() to use. Signed-off-by: Al Viro --- fs/internal.h | 2 -- fs/namespace.c | 46 ++++++++++++++++++---------------------------- 2 files changed, 18 insertions(+), 30 deletions(-) (limited to 'fs') diff --git a/fs/internal.h b/fs/internal.h index e8a0b245177d..0663568b1247 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -71,8 +71,6 @@ extern void release_mounts(struct list_head *); extern void umount_tree(struct vfsmount *, int, struct list_head *); extern struct vfsmount *copy_tree(struct vfsmount *, struct dentry *, int); extern int finish_automount(struct vfsmount *, struct path *); -extern int do_add_mount(struct vfsmount *, struct path *, int); -extern void mnt_clear_expiry(struct vfsmount *); extern void mnt_make_longterm(struct vfsmount *); extern void mnt_make_shortterm(struct vfsmount *); diff --git a/fs/namespace.c b/fs/namespace.c index 31aefc8e5fa6..7b0b95371696 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -1872,6 +1872,8 @@ out: return err; } +static int do_add_mount(struct vfsmount *, struct path *, int); + /* * create a new mount for userspace and request it to be added into the * namespace's tree @@ -1909,25 +1911,31 @@ int finish_automount(struct vfsmount *m, struct path *path) if (m->mnt_sb == path->mnt->mnt_sb && m->mnt_root == path->dentry) { - mnt_clear_expiry(m); - mntput(m); - mntput(m); - return -ELOOP; + err = -ELOOP; + goto fail; } err = do_add_mount(m, path, path->mnt->mnt_flags | MNT_SHRINKABLE); - if (err) { - mnt_clear_expiry(m); - mntput(m); - mntput(m); + if (!err) + return 0; +fail: + /* remove m from any expiration list it may be on */ + if (!list_empty(&m->mnt_expire)) { + down_write(&namespace_sem); + br_write_lock(vfsmount_lock); + list_del_init(&m->mnt_expire); + br_write_unlock(vfsmount_lock); + up_write(&namespace_sem); } + mntput(m); + mntput(m); return err; } /* * add a mount into a namespace's mount tree */ -int do_add_mount(struct vfsmount *newmnt, struct path *path, int mnt_flags) +static int do_add_mount(struct vfsmount *newmnt, struct path *path, int mnt_flags) { int err; @@ -1954,11 +1962,7 @@ int do_add_mount(struct vfsmount *newmnt, struct path *path, int mnt_flags) goto unlock; newmnt->mnt_flags = mnt_flags; - if ((err = graft_tree(newmnt, path))) - goto unlock; - - up_write(&namespace_sem); - return 0; + err = graft_tree(newmnt, path); unlock: up_write(&namespace_sem); @@ -1982,20 +1986,6 @@ void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list) } EXPORT_SYMBOL(mnt_set_expiry); -/* - * Remove a vfsmount from any expiration list it may be on - */ -void mnt_clear_expiry(struct vfsmount *mnt) -{ - if (!list_empty(&mnt->mnt_expire)) { - down_write(&namespace_sem); - br_write_lock(vfsmount_lock); - list_del_init(&mnt->mnt_expire); - br_write_unlock(vfsmount_lock); - up_write(&namespace_sem); - } -} - /* * process a list of expirable mountpoints with the intent of discarding any * mountpoints that aren't in use and haven't been touched since last we came -- cgit v1.2.2 From 64c23e86873ee410554d6d1c76b60da47025e96f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 14 Jan 2011 13:07:30 +0100 Subject: make the feature checks in ->fallocate future proof Instead of various home grown checks that might need updates for new flags just check for any bit outside the mask of the features supported by the filesystem. This makes the check future proof for any newly added flag. Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro --- fs/btrfs/inode.c | 2 +- fs/ext4/extents.c | 2 +- fs/gfs2/ops_inode.c | 2 +- fs/ocfs2/file.c | 2 ++ fs/xfs/linux-2.6/xfs_iops.c | 3 +++ 5 files changed, 8 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index a3798a3aa0d2..64daf2acd0d5 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -7116,7 +7116,7 @@ static long btrfs_fallocate(struct inode *inode, int mode, alloc_end = (offset + len + mask) & ~mask; /* We only support the FALLOC_FL_KEEP_SIZE mode */ - if (mode && (mode != FALLOC_FL_KEEP_SIZE)) + if (mode & ~FALLOC_FL_KEEP_SIZE) return -EOPNOTSUPP; /* diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index c4068f6abf03..4bdd160854eb 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -3645,7 +3645,7 @@ long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len) unsigned int credits, blkbits = inode->i_blkbits; /* We only support the FALLOC_FL_KEEP_SIZE mode */ - if (mode && (mode != FALLOC_FL_KEEP_SIZE)) + if (mode & ~FALLOC_FL_KEEP_SIZE) return -EOPNOTSUPP; /* diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c index 040b5a2e6556..c09528c07f3d 100644 --- a/fs/gfs2/ops_inode.c +++ b/fs/gfs2/ops_inode.c @@ -1426,7 +1426,7 @@ static long gfs2_fallocate(struct inode *inode, int mode, loff_t offset, next = (next + 1) << sdp->sd_sb.sb_bsize_shift; /* We only support the FALLOC_FL_KEEP_SIZE mode */ - if (mode && (mode != FALLOC_FL_KEEP_SIZE)) + if (mode & ~FALLOC_FL_KEEP_SIZE) return -EOPNOTSUPP; offset = (offset >> sdp->sd_sb.sb_bsize_shift) << diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 63e3fca266e0..cf254ce8c941 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -1997,6 +1997,8 @@ static long ocfs2_fallocate(struct inode *inode, int mode, loff_t offset, int change_size = 1; int cmd = OCFS2_IOC_RESVSP64; + if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) + return -EOPNOTSUPP; if (!ocfs2_writes_unwritten_extents(osb)) return -EOPNOTSUPP; diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c index da54403633b6..a4ecc2188a09 100644 --- a/fs/xfs/linux-2.6/xfs_iops.c +++ b/fs/xfs/linux-2.6/xfs_iops.c @@ -518,6 +518,9 @@ xfs_vn_fallocate( xfs_inode_t *ip = XFS_I(inode); int cmd = XFS_IOC_RESVSP; + if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) + return -EOPNOTSUPP; + /* preallocation on directories not yet supported */ error = -ENODEV; if (S_ISDIR(inode->i_mode)) -- cgit v1.2.2 From 2fe17c1075836b66678ed2a305fd09b6773883aa Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 14 Jan 2011 13:07:43 +0100 Subject: fallocate should be a file operation Currently all filesystems except XFS implement fallocate asynchronously, while XFS forced a commit. Both of these are suboptimal - in case of O_SYNC I/O we really want our allocation on disk, especially for the !KEEP_SIZE case where we actually grow the file with user-visible zeroes. On the other hand always commiting the transaction is a bad idea for fast-path uses of fallocate like for example in recent Samba versions. Given that block allocation is a data plane operation anyway change it from an inode operation to a file operation so that we have the file structure available that lets us check for O_SYNC. This also includes moving the code around for a few of the filesystems, and remove the already unnedded S_ISDIR checks given that we only wire up fallocate for regular files. Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro --- fs/btrfs/file.c | 113 +++++++++++++++++++ fs/btrfs/inode.c | 111 ------------------- fs/ext4/ext4.h | 2 +- fs/ext4/extents.c | 9 +- fs/ext4/file.c | 2 +- fs/gfs2/file.c | 258 ++++++++++++++++++++++++++++++++++++++++++++ fs/gfs2/ops_inode.c | 258 -------------------------------------------- fs/ocfs2/file.c | 8 +- fs/open.c | 4 +- fs/xfs/linux-2.6/xfs_file.c | 56 ++++++++++ fs/xfs/linux-2.6/xfs_iops.c | 60 ----------- 11 files changed, 437 insertions(+), 444 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 66836d85763b..a9e0a4eaf3d9 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -1237,6 +1238,117 @@ static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma) return 0; } +static long btrfs_fallocate(struct file *file, int mode, + loff_t offset, loff_t len) +{ + struct inode *inode = file->f_path.dentry->d_inode; + struct extent_state *cached_state = NULL; + u64 cur_offset; + u64 last_byte; + u64 alloc_start; + u64 alloc_end; + u64 alloc_hint = 0; + u64 locked_end; + u64 mask = BTRFS_I(inode)->root->sectorsize - 1; + struct extent_map *em; + int ret; + + alloc_start = offset & ~mask; + alloc_end = (offset + len + mask) & ~mask; + + /* We only support the FALLOC_FL_KEEP_SIZE mode */ + if (mode & ~FALLOC_FL_KEEP_SIZE) + return -EOPNOTSUPP; + + /* + * wait for ordered IO before we have any locks. We'll loop again + * below with the locks held. + */ + btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start); + + mutex_lock(&inode->i_mutex); + ret = inode_newsize_ok(inode, alloc_end); + if (ret) + goto out; + + if (alloc_start > inode->i_size) { + ret = btrfs_cont_expand(inode, alloc_start); + if (ret) + goto out; + } + + ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start); + if (ret) + goto out; + + locked_end = alloc_end - 1; + while (1) { + struct btrfs_ordered_extent *ordered; + + /* the extent lock is ordered inside the running + * transaction + */ + lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start, + locked_end, 0, &cached_state, GFP_NOFS); + ordered = btrfs_lookup_first_ordered_extent(inode, + alloc_end - 1); + if (ordered && + ordered->file_offset + ordered->len > alloc_start && + ordered->file_offset < alloc_end) { + btrfs_put_ordered_extent(ordered); + unlock_extent_cached(&BTRFS_I(inode)->io_tree, + alloc_start, locked_end, + &cached_state, GFP_NOFS); + /* + * we can't wait on the range with the transaction + * running or with the extent lock held + */ + btrfs_wait_ordered_range(inode, alloc_start, + alloc_end - alloc_start); + } else { + if (ordered) + btrfs_put_ordered_extent(ordered); + break; + } + } + + cur_offset = alloc_start; + while (1) { + em = btrfs_get_extent(inode, NULL, 0, cur_offset, + alloc_end - cur_offset, 0); + BUG_ON(IS_ERR(em) || !em); + last_byte = min(extent_map_end(em), alloc_end); + last_byte = (last_byte + mask) & ~mask; + if (em->block_start == EXTENT_MAP_HOLE || + (cur_offset >= inode->i_size && + !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) { + ret = btrfs_prealloc_file_range(inode, mode, cur_offset, + last_byte - cur_offset, + 1 << inode->i_blkbits, + offset + len, + &alloc_hint); + if (ret < 0) { + free_extent_map(em); + break; + } + } + free_extent_map(em); + + cur_offset = last_byte; + if (cur_offset >= alloc_end) { + ret = 0; + break; + } + } + unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end, + &cached_state, GFP_NOFS); + + btrfs_free_reserved_data_space(inode, alloc_end - alloc_start); +out: + mutex_unlock(&inode->i_mutex); + return ret; +} + const struct file_operations btrfs_file_operations = { .llseek = generic_file_llseek, .read = do_sync_read, @@ -1248,6 +1360,7 @@ const struct file_operations btrfs_file_operations = { .open = generic_file_open, .release = btrfs_release_file, .fsync = btrfs_sync_file, + .fallocate = btrfs_fallocate, .unlocked_ioctl = btrfs_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = btrfs_ioctl, diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 64daf2acd0d5..902afbf50811 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -7098,116 +7098,6 @@ int btrfs_prealloc_file_range_trans(struct inode *inode, min_size, actual_len, alloc_hint, trans); } -static long btrfs_fallocate(struct inode *inode, int mode, - loff_t offset, loff_t len) -{ - struct extent_state *cached_state = NULL; - u64 cur_offset; - u64 last_byte; - u64 alloc_start; - u64 alloc_end; - u64 alloc_hint = 0; - u64 locked_end; - u64 mask = BTRFS_I(inode)->root->sectorsize - 1; - struct extent_map *em; - int ret; - - alloc_start = offset & ~mask; - alloc_end = (offset + len + mask) & ~mask; - - /* We only support the FALLOC_FL_KEEP_SIZE mode */ - if (mode & ~FALLOC_FL_KEEP_SIZE) - return -EOPNOTSUPP; - - /* - * wait for ordered IO before we have any locks. We'll loop again - * below with the locks held. - */ - btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start); - - mutex_lock(&inode->i_mutex); - ret = inode_newsize_ok(inode, alloc_end); - if (ret) - goto out; - - if (alloc_start > inode->i_size) { - ret = btrfs_cont_expand(inode, alloc_start); - if (ret) - goto out; - } - - ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start); - if (ret) - goto out; - - locked_end = alloc_end - 1; - while (1) { - struct btrfs_ordered_extent *ordered; - - /* the extent lock is ordered inside the running - * transaction - */ - lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start, - locked_end, 0, &cached_state, GFP_NOFS); - ordered = btrfs_lookup_first_ordered_extent(inode, - alloc_end - 1); - if (ordered && - ordered->file_offset + ordered->len > alloc_start && - ordered->file_offset < alloc_end) { - btrfs_put_ordered_extent(ordered); - unlock_extent_cached(&BTRFS_I(inode)->io_tree, - alloc_start, locked_end, - &cached_state, GFP_NOFS); - /* - * we can't wait on the range with the transaction - * running or with the extent lock held - */ - btrfs_wait_ordered_range(inode, alloc_start, - alloc_end - alloc_start); - } else { - if (ordered) - btrfs_put_ordered_extent(ordered); - break; - } - } - - cur_offset = alloc_start; - while (1) { - em = btrfs_get_extent(inode, NULL, 0, cur_offset, - alloc_end - cur_offset, 0); - BUG_ON(IS_ERR(em) || !em); - last_byte = min(extent_map_end(em), alloc_end); - last_byte = (last_byte + mask) & ~mask; - if (em->block_start == EXTENT_MAP_HOLE || - (cur_offset >= inode->i_size && - !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) { - ret = btrfs_prealloc_file_range(inode, mode, cur_offset, - last_byte - cur_offset, - 1 << inode->i_blkbits, - offset + len, - &alloc_hint); - if (ret < 0) { - free_extent_map(em); - break; - } - } - free_extent_map(em); - - cur_offset = last_byte; - if (cur_offset >= alloc_end) { - ret = 0; - break; - } - } - unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end, - &cached_state, GFP_NOFS); - - btrfs_free_reserved_data_space(inode, alloc_end - alloc_start); -out: - mutex_unlock(&inode->i_mutex); - return ret; -} - static int btrfs_set_page_dirty(struct page *page) { return __set_page_dirty_nobuffers(page); @@ -7310,7 +7200,6 @@ static const struct inode_operations btrfs_file_inode_operations = { .listxattr = btrfs_listxattr, .removexattr = btrfs_removexattr, .permission = btrfs_permission, - .fallocate = btrfs_fallocate, .fiemap = btrfs_fiemap, }; static const struct inode_operations btrfs_special_inode_operations = { diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 1de65f572033..0c8d97b56f34 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -2065,7 +2065,7 @@ extern int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, extern void ext4_ext_truncate(struct inode *); extern void ext4_ext_init(struct super_block *); extern void ext4_ext_release(struct super_block *); -extern long ext4_fallocate(struct inode *inode, int mode, loff_t offset, +extern long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len); extern int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset, ssize_t len); diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 4bdd160854eb..63a75810b7c3 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -3627,14 +3627,15 @@ static void ext4_falloc_update_inode(struct inode *inode, } /* - * preallocate space for a file. This implements ext4's fallocate inode + * preallocate space for a file. This implements ext4's fallocate file * operation, which gets called from sys_fallocate system call. * For block-mapped files, posix_fallocate should fall back to the method * of writing zeroes to the required new blocks (the same behavior which is * expected for file systems which do not support fallocate() system call). */ -long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len) +long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) { + struct inode *inode = file->f_path.dentry->d_inode; handle_t *handle; loff_t new_size; unsigned int max_blocks; @@ -3655,10 +3656,6 @@ long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len) if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) return -EOPNOTSUPP; - /* preallocation to directories is currently not supported */ - if (S_ISDIR(inode->i_mode)) - return -ENODEV; - map.m_lblk = offset >> blkbits; /* * We can't just convert len to max_blocks because diff --git a/fs/ext4/file.c b/fs/ext4/file.c index bb003dc9ffff..2e8322c8aa88 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -210,6 +210,7 @@ const struct file_operations ext4_file_operations = { .fsync = ext4_sync_file, .splice_read = generic_file_splice_read, .splice_write = generic_file_splice_write, + .fallocate = ext4_fallocate, }; const struct inode_operations ext4_file_inode_operations = { @@ -223,7 +224,6 @@ const struct inode_operations ext4_file_inode_operations = { .removexattr = generic_removexattr, #endif .check_acl = ext4_check_acl, - .fallocate = ext4_fallocate, .fiemap = ext4_fiemap, }; diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index fca6689e12e6..7cfdcb913363 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c @@ -19,6 +19,8 @@ #include #include #include +#include +#include #include #include #include @@ -610,6 +612,260 @@ static ssize_t gfs2_file_aio_write(struct kiocb *iocb, const struct iovec *iov, return generic_file_aio_write(iocb, iov, nr_segs, pos); } +static void empty_write_end(struct page *page, unsigned from, + unsigned to) +{ + struct gfs2_inode *ip = GFS2_I(page->mapping->host); + + page_zero_new_buffers(page, from, to); + flush_dcache_page(page); + mark_page_accessed(page); + + if (!gfs2_is_writeback(ip)) + gfs2_page_add_databufs(ip, page, from, to); + + block_commit_write(page, from, to); +} + +static int write_empty_blocks(struct page *page, unsigned from, unsigned to) +{ + unsigned start, end, next; + struct buffer_head *bh, *head; + int error; + + if (!page_has_buffers(page)) { + error = __block_write_begin(page, from, to - from, gfs2_block_map); + if (unlikely(error)) + return error; + + empty_write_end(page, from, to); + return 0; + } + + bh = head = page_buffers(page); + next = end = 0; + while (next < from) { + next += bh->b_size; + bh = bh->b_this_page; + } + start = next; + do { + next += bh->b_size; + if (buffer_mapped(bh)) { + if (end) { + error = __block_write_begin(page, start, end - start, + gfs2_block_map); + if (unlikely(error)) + return error; + empty_write_end(page, start, end); + end = 0; + } + start = next; + } + else + end = next; + bh = bh->b_this_page; + } while (next < to); + + if (end) { + error = __block_write_begin(page, start, end - start, gfs2_block_map); + if (unlikely(error)) + return error; + empty_write_end(page, start, end); + } + + return 0; +} + +static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len, + int mode) +{ + struct gfs2_inode *ip = GFS2_I(inode); + struct buffer_head *dibh; + int error; + u64 start = offset >> PAGE_CACHE_SHIFT; + unsigned int start_offset = offset & ~PAGE_CACHE_MASK; + u64 end = (offset + len - 1) >> PAGE_CACHE_SHIFT; + pgoff_t curr; + struct page *page; + unsigned int end_offset = (offset + len) & ~PAGE_CACHE_MASK; + unsigned int from, to; + + if (!end_offset) + end_offset = PAGE_CACHE_SIZE; + + error = gfs2_meta_inode_buffer(ip, &dibh); + if (unlikely(error)) + goto out; + + gfs2_trans_add_bh(ip->i_gl, dibh, 1); + + if (gfs2_is_stuffed(ip)) { + error = gfs2_unstuff_dinode(ip, NULL); + if (unlikely(error)) + goto out; + } + + curr = start; + offset = start << PAGE_CACHE_SHIFT; + from = start_offset; + to = PAGE_CACHE_SIZE; + while (curr <= end) { + page = grab_cache_page_write_begin(inode->i_mapping, curr, + AOP_FLAG_NOFS); + if (unlikely(!page)) { + error = -ENOMEM; + goto out; + } + + if (curr == end) + to = end_offset; + error = write_empty_blocks(page, from, to); + if (!error && offset + to > inode->i_size && + !(mode & FALLOC_FL_KEEP_SIZE)) { + i_size_write(inode, offset + to); + } + unlock_page(page); + page_cache_release(page); + if (error) + goto out; + curr++; + offset += PAGE_CACHE_SIZE; + from = 0; + } + + gfs2_dinode_out(ip, dibh->b_data); + mark_inode_dirty(inode); + + brelse(dibh); + +out: + return error; +} + +static void calc_max_reserv(struct gfs2_inode *ip, loff_t max, loff_t *len, + unsigned int *data_blocks, unsigned int *ind_blocks) +{ + const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); + unsigned int max_blocks = ip->i_alloc->al_rgd->rd_free_clone; + unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1); + + for (tmp = max_data; tmp > sdp->sd_diptrs;) { + tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs); + max_data -= tmp; + } + /* This calculation isn't the exact reverse of gfs2_write_calc_reserve, + so it might end up with fewer data blocks */ + if (max_data <= *data_blocks) + return; + *data_blocks = max_data; + *ind_blocks = max_blocks - max_data; + *len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift; + if (*len > max) { + *len = max; + gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks); + } +} + +static long gfs2_fallocate(struct file *file, int mode, loff_t offset, + loff_t len) +{ + struct inode *inode = file->f_path.dentry->d_inode; + struct gfs2_sbd *sdp = GFS2_SB(inode); + struct gfs2_inode *ip = GFS2_I(inode); + unsigned int data_blocks = 0, ind_blocks = 0, rblocks; + loff_t bytes, max_bytes; + struct gfs2_alloc *al; + int error; + loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift; + next = (next + 1) << sdp->sd_sb.sb_bsize_shift; + + /* We only support the FALLOC_FL_KEEP_SIZE mode */ + if (mode & ~FALLOC_FL_KEEP_SIZE) + return -EOPNOTSUPP; + + offset = (offset >> sdp->sd_sb.sb_bsize_shift) << + sdp->sd_sb.sb_bsize_shift; + + len = next - offset; + bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2; + if (!bytes) + bytes = UINT_MAX; + + gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh); + error = gfs2_glock_nq(&ip->i_gh); + if (unlikely(error)) + goto out_uninit; + + if (!gfs2_write_alloc_required(ip, offset, len)) + goto out_unlock; + + while (len > 0) { + if (len < bytes) + bytes = len; + al = gfs2_alloc_get(ip); + if (!al) { + error = -ENOMEM; + goto out_unlock; + } + + error = gfs2_quota_lock_check(ip); + if (error) + goto out_alloc_put; + +retry: + gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks); + + al->al_requested = data_blocks + ind_blocks; + error = gfs2_inplace_reserve(ip); + if (error) { + if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) { + bytes >>= 1; + goto retry; + } + goto out_qunlock; + } + max_bytes = bytes; + calc_max_reserv(ip, len, &max_bytes, &data_blocks, &ind_blocks); + al->al_requested = data_blocks + ind_blocks; + + rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA + + RES_RG_HDR + gfs2_rg_blocks(al); + if (gfs2_is_jdata(ip)) + rblocks += data_blocks ? data_blocks : 1; + + error = gfs2_trans_begin(sdp, rblocks, + PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize); + if (error) + goto out_trans_fail; + + error = fallocate_chunk(inode, offset, max_bytes, mode); + gfs2_trans_end(sdp); + + if (error) + goto out_trans_fail; + + len -= max_bytes; + offset += max_bytes; + gfs2_inplace_release(ip); + gfs2_quota_unlock(ip); + gfs2_alloc_put(ip); + } + goto out_unlock; + +out_trans_fail: + gfs2_inplace_release(ip); +out_qunlock: + gfs2_quota_unlock(ip); +out_alloc_put: + gfs2_alloc_put(ip); +out_unlock: + gfs2_glock_dq(&ip->i_gh); +out_uninit: + gfs2_holder_uninit(&ip->i_gh); + return error; +} + #ifdef CONFIG_GFS2_FS_LOCKING_DLM /** @@ -765,6 +1021,7 @@ const struct file_operations gfs2_file_fops = { .splice_read = generic_file_splice_read, .splice_write = generic_file_splice_write, .setlease = gfs2_setlease, + .fallocate = gfs2_fallocate, }; const struct file_operations gfs2_dir_fops = { @@ -794,6 +1051,7 @@ const struct file_operations gfs2_file_fops_nolock = { .splice_read = generic_file_splice_read, .splice_write = generic_file_splice_write, .setlease = generic_setlease, + .fallocate = gfs2_fallocate, }; const struct file_operations gfs2_dir_fops_nolock = { diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c index c09528c07f3d..d8b26ac2e20b 100644 --- a/fs/gfs2/ops_inode.c +++ b/fs/gfs2/ops_inode.c @@ -18,8 +18,6 @@ #include #include #include -#include -#include #include #include "gfs2.h" @@ -1257,261 +1255,6 @@ static int gfs2_removexattr(struct dentry *dentry, const char *name) return ret; } -static void empty_write_end(struct page *page, unsigned from, - unsigned to) -{ - struct gfs2_inode *ip = GFS2_I(page->mapping->host); - - page_zero_new_buffers(page, from, to); - flush_dcache_page(page); - mark_page_accessed(page); - - if (!gfs2_is_writeback(ip)) - gfs2_page_add_databufs(ip, page, from, to); - - block_commit_write(page, from, to); -} - - -static int write_empty_blocks(struct page *page, unsigned from, unsigned to) -{ - unsigned start, end, next; - struct buffer_head *bh, *head; - int error; - - if (!page_has_buffers(page)) { - error = __block_write_begin(page, from, to - from, gfs2_block_map); - if (unlikely(error)) - return error; - - empty_write_end(page, from, to); - return 0; - } - - bh = head = page_buffers(page); - next = end = 0; - while (next < from) { - next += bh->b_size; - bh = bh->b_this_page; - } - start = next; - do { - next += bh->b_size; - if (buffer_mapped(bh)) { - if (end) { - error = __block_write_begin(page, start, end - start, - gfs2_block_map); - if (unlikely(error)) - return error; - empty_write_end(page, start, end); - end = 0; - } - start = next; - } - else - end = next; - bh = bh->b_this_page; - } while (next < to); - - if (end) { - error = __block_write_begin(page, start, end - start, gfs2_block_map); - if (unlikely(error)) - return error; - empty_write_end(page, start, end); - } - - return 0; -} - -static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len, - int mode) -{ - struct gfs2_inode *ip = GFS2_I(inode); - struct buffer_head *dibh; - int error; - u64 start = offset >> PAGE_CACHE_SHIFT; - unsigned int start_offset = offset & ~PAGE_CACHE_MASK; - u64 end = (offset + len - 1) >> PAGE_CACHE_SHIFT; - pgoff_t curr; - struct page *page; - unsigned int end_offset = (offset + len) & ~PAGE_CACHE_MASK; - unsigned int from, to; - - if (!end_offset) - end_offset = PAGE_CACHE_SIZE; - - error = gfs2_meta_inode_buffer(ip, &dibh); - if (unlikely(error)) - goto out; - - gfs2_trans_add_bh(ip->i_gl, dibh, 1); - - if (gfs2_is_stuffed(ip)) { - error = gfs2_unstuff_dinode(ip, NULL); - if (unlikely(error)) - goto out; - } - - curr = start; - offset = start << PAGE_CACHE_SHIFT; - from = start_offset; - to = PAGE_CACHE_SIZE; - while (curr <= end) { - page = grab_cache_page_write_begin(inode->i_mapping, curr, - AOP_FLAG_NOFS); - if (unlikely(!page)) { - error = -ENOMEM; - goto out; - } - - if (curr == end) - to = end_offset; - error = write_empty_blocks(page, from, to); - if (!error && offset + to > inode->i_size && - !(mode & FALLOC_FL_KEEP_SIZE)) { - i_size_write(inode, offset + to); - } - unlock_page(page); - page_cache_release(page); - if (error) - goto out; - curr++; - offset += PAGE_CACHE_SIZE; - from = 0; - } - - gfs2_dinode_out(ip, dibh->b_data); - mark_inode_dirty(inode); - - brelse(dibh); - -out: - return error; -} - -static void calc_max_reserv(struct gfs2_inode *ip, loff_t max, loff_t *len, - unsigned int *data_blocks, unsigned int *ind_blocks) -{ - const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); - unsigned int max_blocks = ip->i_alloc->al_rgd->rd_free_clone; - unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1); - - for (tmp = max_data; tmp > sdp->sd_diptrs;) { - tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs); - max_data -= tmp; - } - /* This calculation isn't the exact reverse of gfs2_write_calc_reserve, - so it might end up with fewer data blocks */ - if (max_data <= *data_blocks) - return; - *data_blocks = max_data; - *ind_blocks = max_blocks - max_data; - *len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift; - if (*len > max) { - *len = max; - gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks); - } -} - -static long gfs2_fallocate(struct inode *inode, int mode, loff_t offset, - loff_t len) -{ - struct gfs2_sbd *sdp = GFS2_SB(inode); - struct gfs2_inode *ip = GFS2_I(inode); - unsigned int data_blocks = 0, ind_blocks = 0, rblocks; - loff_t bytes, max_bytes; - struct gfs2_alloc *al; - int error; - loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift; - next = (next + 1) << sdp->sd_sb.sb_bsize_shift; - - /* We only support the FALLOC_FL_KEEP_SIZE mode */ - if (mode & ~FALLOC_FL_KEEP_SIZE) - return -EOPNOTSUPP; - - offset = (offset >> sdp->sd_sb.sb_bsize_shift) << - sdp->sd_sb.sb_bsize_shift; - - len = next - offset; - bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2; - if (!bytes) - bytes = UINT_MAX; - - gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh); - error = gfs2_glock_nq(&ip->i_gh); - if (unlikely(error)) - goto out_uninit; - - if (!gfs2_write_alloc_required(ip, offset, len)) - goto out_unlock; - - while (len > 0) { - if (len < bytes) - bytes = len; - al = gfs2_alloc_get(ip); - if (!al) { - error = -ENOMEM; - goto out_unlock; - } - - error = gfs2_quota_lock_check(ip); - if (error) - goto out_alloc_put; - -retry: - gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks); - - al->al_requested = data_blocks + ind_blocks; - error = gfs2_inplace_reserve(ip); - if (error) { - if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) { - bytes >>= 1; - goto retry; - } - goto out_qunlock; - } - max_bytes = bytes; - calc_max_reserv(ip, len, &max_bytes, &data_blocks, &ind_blocks); - al->al_requested = data_blocks + ind_blocks; - - rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA + - RES_RG_HDR + gfs2_rg_blocks(al); - if (gfs2_is_jdata(ip)) - rblocks += data_blocks ? data_blocks : 1; - - error = gfs2_trans_begin(sdp, rblocks, - PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize); - if (error) - goto out_trans_fail; - - error = fallocate_chunk(inode, offset, max_bytes, mode); - gfs2_trans_end(sdp); - - if (error) - goto out_trans_fail; - - len -= max_bytes; - offset += max_bytes; - gfs2_inplace_release(ip); - gfs2_quota_unlock(ip); - gfs2_alloc_put(ip); - } - goto out_unlock; - -out_trans_fail: - gfs2_inplace_release(ip); -out_qunlock: - gfs2_quota_unlock(ip); -out_alloc_put: - gfs2_alloc_put(ip); -out_unlock: - gfs2_glock_dq(&ip->i_gh); -out_uninit: - gfs2_holder_uninit(&ip->i_gh); - return error; -} - - static int gfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 len) { @@ -1562,7 +1305,6 @@ const struct inode_operations gfs2_file_iops = { .getxattr = gfs2_getxattr, .listxattr = gfs2_listxattr, .removexattr = gfs2_removexattr, - .fallocate = gfs2_fallocate, .fiemap = gfs2_fiemap, }; diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index cf254ce8c941..a6651956482e 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -1989,9 +1989,10 @@ int ocfs2_change_file_space(struct file *file, unsigned int cmd, return __ocfs2_change_file_space(file, inode, file->f_pos, cmd, sr, 0); } -static long ocfs2_fallocate(struct inode *inode, int mode, loff_t offset, +static long ocfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t len) { + struct inode *inode = file->f_path.dentry->d_inode; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_space_resv sr; int change_size = 1; @@ -2002,9 +2003,6 @@ static long ocfs2_fallocate(struct inode *inode, int mode, loff_t offset, if (!ocfs2_writes_unwritten_extents(osb)) return -EOPNOTSUPP; - if (S_ISDIR(inode->i_mode)) - return -ENODEV; - if (mode & FALLOC_FL_KEEP_SIZE) change_size = 0; @@ -2612,7 +2610,6 @@ const struct inode_operations ocfs2_file_iops = { .getxattr = generic_getxattr, .listxattr = ocfs2_listxattr, .removexattr = generic_removexattr, - .fallocate = ocfs2_fallocate, .fiemap = ocfs2_fiemap, }; @@ -2644,6 +2641,7 @@ const struct file_operations ocfs2_fops = { .flock = ocfs2_flock, .splice_read = ocfs2_file_splice_read, .splice_write = ocfs2_file_splice_write, + .fallocate = ocfs2_fallocate, }; const struct file_operations ocfs2_dops = { diff --git a/fs/open.c b/fs/open.c index 5b6ef7e2859e..e52389e1f05b 100644 --- a/fs/open.c +++ b/fs/open.c @@ -255,10 +255,10 @@ int do_fallocate(struct file *file, int mode, loff_t offset, loff_t len) if (((offset + len) > inode->i_sb->s_maxbytes) || ((offset + len) < 0)) return -EFBIG; - if (!inode->i_op->fallocate) + if (!file->f_op->fallocate) return -EOPNOTSUPP; - return inode->i_op->fallocate(inode, mode, offset, len); + return file->f_op->fallocate(file, mode, offset, len); } SYSCALL_DEFINE(fallocate)(int fd, int mode, loff_t offset, loff_t len) diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c index ef51eb43e137..a55c1b46b219 100644 --- a/fs/xfs/linux-2.6/xfs_file.c +++ b/fs/xfs/linux-2.6/xfs_file.c @@ -37,6 +37,7 @@ #include "xfs_trace.h" #include +#include static const struct vm_operations_struct xfs_file_vm_ops; @@ -882,6 +883,60 @@ out_unlock: return ret; } +STATIC long +xfs_file_fallocate( + struct file *file, + int mode, + loff_t offset, + loff_t len) +{ + struct inode *inode = file->f_path.dentry->d_inode; + long error; + loff_t new_size = 0; + xfs_flock64_t bf; + xfs_inode_t *ip = XFS_I(inode); + int cmd = XFS_IOC_RESVSP; + + if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) + return -EOPNOTSUPP; + + bf.l_whence = 0; + bf.l_start = offset; + bf.l_len = len; + + xfs_ilock(ip, XFS_IOLOCK_EXCL); + + if (mode & FALLOC_FL_PUNCH_HOLE) + cmd = XFS_IOC_UNRESVSP; + + /* check the new inode size is valid before allocating */ + if (!(mode & FALLOC_FL_KEEP_SIZE) && + offset + len > i_size_read(inode)) { + new_size = offset + len; + error = inode_newsize_ok(inode, new_size); + if (error) + goto out_unlock; + } + + error = -xfs_change_file_space(ip, cmd, &bf, 0, XFS_ATTR_NOLOCK); + if (error) + goto out_unlock; + + /* Change file size if needed */ + if (new_size) { + struct iattr iattr; + + iattr.ia_valid = ATTR_SIZE; + iattr.ia_size = new_size; + error = -xfs_setattr(ip, &iattr, XFS_ATTR_NOLOCK); + } + +out_unlock: + xfs_iunlock(ip, XFS_IOLOCK_EXCL); + return error; +} + + STATIC int xfs_file_open( struct inode *inode, @@ -1000,6 +1055,7 @@ const struct file_operations xfs_file_operations = { .open = xfs_file_open, .release = xfs_file_release, .fsync = xfs_file_fsync, + .fallocate = xfs_file_fallocate, }; const struct file_operations xfs_dir_file_operations = { diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c index a4ecc2188a09..bd5727852fd6 100644 --- a/fs/xfs/linux-2.6/xfs_iops.c +++ b/fs/xfs/linux-2.6/xfs_iops.c @@ -46,7 +46,6 @@ #include #include #include -#include #include #include @@ -505,64 +504,6 @@ xfs_vn_setattr( return -xfs_setattr(XFS_I(dentry->d_inode), iattr, 0); } -STATIC long -xfs_vn_fallocate( - struct inode *inode, - int mode, - loff_t offset, - loff_t len) -{ - long error; - loff_t new_size = 0; - xfs_flock64_t bf; - xfs_inode_t *ip = XFS_I(inode); - int cmd = XFS_IOC_RESVSP; - - if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) - return -EOPNOTSUPP; - - /* preallocation on directories not yet supported */ - error = -ENODEV; - if (S_ISDIR(inode->i_mode)) - goto out_error; - - bf.l_whence = 0; - bf.l_start = offset; - bf.l_len = len; - - xfs_ilock(ip, XFS_IOLOCK_EXCL); - - if (mode & FALLOC_FL_PUNCH_HOLE) - cmd = XFS_IOC_UNRESVSP; - - /* check the new inode size is valid before allocating */ - if (!(mode & FALLOC_FL_KEEP_SIZE) && - offset + len > i_size_read(inode)) { - new_size = offset + len; - error = inode_newsize_ok(inode, new_size); - if (error) - goto out_unlock; - } - - error = -xfs_change_file_space(ip, cmd, &bf, 0, XFS_ATTR_NOLOCK); - if (error) - goto out_unlock; - - /* Change file size if needed */ - if (new_size) { - struct iattr iattr; - - iattr.ia_valid = ATTR_SIZE; - iattr.ia_size = new_size; - error = -xfs_setattr(ip, &iattr, XFS_ATTR_NOLOCK); - } - -out_unlock: - xfs_iunlock(ip, XFS_IOLOCK_EXCL); -out_error: - return error; -} - #define XFS_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR) /* @@ -656,7 +597,6 @@ static const struct inode_operations xfs_inode_operations = { .getxattr = generic_getxattr, .removexattr = generic_removexattr, .listxattr = xfs_vn_listxattr, - .fallocate = xfs_vn_fallocate, .fiemap = xfs_vn_fiemap, }; -- cgit v1.2.2 From 3bc0ba4305fa99b32caac8c60df84a2f14fce228 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Mon, 13 Dec 2010 19:38:09 -0500 Subject: fs: Remove unlikely() from fget_light() There's an unlikely() in fget_light() that assumes the file ref count will be 1. Running the annotate branch profiler on a desktop that is performing daily tasks (running firefox, evolution, xchat and is also part of a distcc farm), it shows that the ref count is not 1 that often. correct incorrect % Function File Line ------- --------- - -------- ---- ---- 1035099358 6209599193 85 fget_light file_table.c 315 Cc: Al Viro Cc: Christoph Hellwig Signed-off-by: Steven Rostedt Signed-off-by: Al Viro --- fs/file_table.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/file_table.c b/fs/file_table.c index c3dee381f1b4..c3e89adf53c0 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -311,7 +311,7 @@ struct file *fget_light(unsigned int fd, int *fput_needed) struct files_struct *files = current->files; *fput_needed = 0; - if (likely((atomic_read(&files->count) == 1))) { + if (atomic_read(&files->count) == 1) { file = fcheck_files(files, fd); } else { rcu_read_lock(); -- cgit v1.2.2 From 16ebe911eba8afa88879213dc4388f2dd701561e Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sun, 2 Jan 2011 14:44:00 -0800 Subject: fs: FS_POSIX_ACL does not depend on BLOCK - Fix a kconfig unmet dependency warning. - Remove the comment that identifies which filesystems use POSIX ACL utility routines. - Move the FS_POSIX_ACL symbol outside of the BLOCK symbol if/endif block because its functions do not depend on BLOCK and some of the filesystems that use it do not depend on BLOCK. warning: (GENERIC_ACL && JFFS2_FS_POSIX_ACL && NFSD_V4 && NFS_ACL_SUPPORT && 9P_FS_POSIX_ACL) selects FS_POSIX_ACL which has unmet direct dependencies (BLOCK) Signed-off-by: Randy Dunlap Cc: Al Viro Signed-off-by: Al Viro --- fs/Kconfig | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/Kconfig b/fs/Kconfig index 771f457402d4..9a7921ae4763 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -30,15 +30,6 @@ config FS_MBCACHE source "fs/reiserfs/Kconfig" source "fs/jfs/Kconfig" -config FS_POSIX_ACL -# Posix ACL utility routines (for now, only ext2/ext3/jfs/reiserfs/nfs4) -# -# NOTE: you can implement Posix ACLs without these helpers (XFS does). -# Never use this symbol for ifdefs. -# - bool - default n - source "fs/xfs/Kconfig" source "fs/gfs2/Kconfig" source "fs/ocfs2/Kconfig" @@ -47,6 +38,14 @@ source "fs/nilfs2/Kconfig" endif # BLOCK +# Posix ACL utility routines +# +# Note: Posix ACLs can be implemented without these helpers. Never use +# this symbol for ifdefs in core code. +# +config FS_POSIX_ACL + def_bool n + config EXPORTFS tristate -- cgit v1.2.2 From 6a5640f10255a8941a3a57396dda20af7a5c9a9e Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Mon, 27 Dec 2010 01:41:52 +0900 Subject: compat: remove unnecessary assignment in compat_rw_copy_check_uvector() *@ret_pointer is initialized to @fast_pointer thus the assignment is redundant. Signed-off-by: Namhyung Kim Cc: Jeff Moyer Signed-off-by: Al Viro --- fs/compat.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/compat.c b/fs/compat.c index eb1740ac8c0a..d717442c4133 100644 --- a/fs/compat.c +++ b/fs/compat.c @@ -597,10 +597,8 @@ ssize_t compat_rw_copy_check_uvector(int type, if (nr_segs > fast_segs) { ret = -ENOMEM; iov = kmalloc(nr_segs*sizeof(struct iovec), GFP_KERNEL); - if (iov == NULL) { - *ret_pointer = fast_pointer; + if (iov == NULL) goto out; - } } *ret_pointer = iov; -- cgit v1.2.2 From 974d879e8070bbb132bd4e79ef314703853d0b82 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Mon, 27 Dec 2010 01:41:53 +0900 Subject: compat: update comment of compat statfs syscalls The commit 7ed1ee6118ae ("Take statfs variants to fs/statfs.c") separates out statfs syscalls from fs/open.c. Thus the comment should be changed also. Signed-off-by: Namhyung Kim Cc: Jiri Kosina Signed-off-by: Al Viro --- fs/compat.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/compat.c b/fs/compat.c index d717442c4133..c62b5e6a1c15 100644 --- a/fs/compat.c +++ b/fs/compat.c @@ -257,7 +257,7 @@ static int put_compat_statfs(struct compat_statfs __user *ubuf, struct kstatfs * } /* - * The following statfs calls are copies of code from fs/open.c and + * The following statfs calls are copies of code from fs/statfs.c and * should be checked against those from time to time */ asmlinkage long compat_sys_statfs(const char __user *pathname, struct compat_statfs __user *buf) -- cgit v1.2.2 From e0bb6bda43e20aa1db5774c73a519cd52c463a55 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Mon, 27 Dec 2010 01:41:54 +0900 Subject: compat: copy missing fields in compat_statfs64 to user f_flags and f_spare fields were not copied to userspace when compat_sys_[f]statfs64 called. Signed-off-by: Namhyung Kim Cc: Christoph Hellwig Signed-off-by: Al Viro --- fs/compat.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/compat.c b/fs/compat.c index c62b5e6a1c15..f6fd0a00e6cc 100644 --- a/fs/compat.c +++ b/fs/compat.c @@ -320,7 +320,9 @@ static int put_compat_statfs64(struct compat_statfs64 __user *ubuf, struct kstat __put_user(kbuf->f_namelen, &ubuf->f_namelen) || __put_user(kbuf->f_fsid.val[0], &ubuf->f_fsid.val[0]) || __put_user(kbuf->f_fsid.val[1], &ubuf->f_fsid.val[1]) || - __put_user(kbuf->f_frsize, &ubuf->f_frsize)) + __put_user(kbuf->f_frsize, &ubuf->f_frsize) || + __put_user(kbuf->f_flags, &ubuf->f_flags) || + __clear_user(ubuf->f_spare, sizeof(ubuf->f_spare))) return -EFAULT; return 0; } -- cgit v1.2.2 From 274052ef0bac011249925f6616d147b1491fc601 Mon Sep 17 00:00:00 2001 From: "Dr. David Alan Gilbert" Date: Mon, 13 Dec 2010 17:09:52 +0000 Subject: hpfs_setattr error case avoids unlock_kernel This fixed a case that 'sparse' spotted where hpfs_setattr has an error return that didn't go through it's path that unlocks. This is against git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git version 6313e3c21743cc88bb5bd8aa72948ee1e83937b6. Build tested only, I don't have an hpfs file system to test. Dave Signed-off-by: Dr. David Alan Gilbert Signed-off-by: Al Viro --- fs/hpfs/inode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/hpfs/inode.c b/fs/hpfs/inode.c index 56f0da1cfd10..1ae35baa539e 100644 --- a/fs/hpfs/inode.c +++ b/fs/hpfs/inode.c @@ -281,7 +281,7 @@ int hpfs_setattr(struct dentry *dentry, struct iattr *attr) attr->ia_size != i_size_read(inode)) { error = vmtruncate(inode, attr->ia_size); if (error) - return error; + goto out_unlock; } setattr_copy(inode, attr); -- cgit v1.2.2 From 27eaa1c90c608aa907336c2743d5ecf35c469440 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 14 Dec 2010 00:06:25 +0900 Subject: aio: check return value of create_workqueue() Signed-off-by: Namhyung Kim Signed-off-by: Al Viro --- fs/aio.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/aio.c b/fs/aio.c index 5e00f15c54aa..fc557a3be0a9 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -87,7 +87,7 @@ static int __init aio_setup(void) aio_wq = create_workqueue("aio"); abe_pool = mempool_create_kmalloc_pool(1, sizeof(struct aio_batch_entry)); - BUG_ON(!abe_pool); + BUG_ON(!aio_wq || !abe_pool); pr_debug("aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page)); -- cgit v1.2.2 From ecf5632dd189ab4c366cef853d6e5fe7adfe52e5 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Sun, 16 Jan 2011 23:28:17 +0900 Subject: fs: fix address space warnings in ioctl_fiemap() The fi_extents_start field of struct fiemap_extent_info is a user pointer but was not marked as __user. This makes sparse emit following warnings: CHECK fs/ioctl.c fs/ioctl.c:114:26: warning: incorrect type in argument 1 (different address spaces) fs/ioctl.c:114:26: expected void [noderef] *dst fs/ioctl.c:114:26: got struct fiemap_extent *[assigned] dest fs/ioctl.c:202:14: warning: incorrect type in argument 1 (different address spaces) fs/ioctl.c:202:14: expected void const volatile [noderef] * fs/ioctl.c:202:14: got struct fiemap_extent *[assigned] fi_extents_start fs/ioctl.c:212:27: warning: incorrect type in argument 1 (different address spaces) fs/ioctl.c:212:27: expected void [noderef] *dst fs/ioctl.c:212:27: got char * Also add 'ufiemap' variable to eliminate unnecessary casts. Signed-off-by: Namhyung Kim Signed-off-by: Al Viro --- fs/ioctl.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/ioctl.c b/fs/ioctl.c index d6cc16476620..a59635e295fa 100644 --- a/fs/ioctl.c +++ b/fs/ioctl.c @@ -86,7 +86,7 @@ int fiemap_fill_next_extent(struct fiemap_extent_info *fieinfo, u64 logical, u64 phys, u64 len, u32 flags) { struct fiemap_extent extent; - struct fiemap_extent *dest = fieinfo->fi_extents_start; + struct fiemap_extent __user *dest = fieinfo->fi_extents_start; /* only count the extents */ if (fieinfo->fi_extents_max == 0) { @@ -173,6 +173,7 @@ static int fiemap_check_ranges(struct super_block *sb, static int ioctl_fiemap(struct file *filp, unsigned long arg) { struct fiemap fiemap; + struct fiemap __user *ufiemap = (struct fiemap __user *) arg; struct fiemap_extent_info fieinfo = { 0, }; struct inode *inode = filp->f_path.dentry->d_inode; struct super_block *sb = inode->i_sb; @@ -182,8 +183,7 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg) if (!inode->i_op->fiemap) return -EOPNOTSUPP; - if (copy_from_user(&fiemap, (struct fiemap __user *)arg, - sizeof(struct fiemap))) + if (copy_from_user(&fiemap, ufiemap, sizeof(fiemap))) return -EFAULT; if (fiemap.fm_extent_count > FIEMAP_MAX_EXTENTS) @@ -196,7 +196,7 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg) fieinfo.fi_flags = fiemap.fm_flags; fieinfo.fi_extents_max = fiemap.fm_extent_count; - fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap)); + fieinfo.fi_extents_start = ufiemap->fm_extents; if (fiemap.fm_extent_count != 0 && !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start, @@ -209,7 +209,7 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg) error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len); fiemap.fm_flags = fieinfo.fi_flags; fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped; - if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap))) + if (copy_to_user(ufiemap, &fiemap, sizeof(fiemap))) error = -EFAULT; return error; -- cgit v1.2.2 From 38a708d7759476318d0eec64af174513032ec67a Mon Sep 17 00:00:00 2001 From: Edward Shishkin Date: Sat, 30 Oct 2010 00:11:50 +0200 Subject: ecryptfs: fix truncation error in ecryptfs_read_update_atime This is similar to the bug found in direct-io not so long ago. Fix up truncation (ssize_t->int). This only matters with >2G reads/writes, which the kernel doesn't permit. Signed-off-by: Edward Shishkin Cc: Jeff Moyer Cc: Christoph Hellwig Cc: Eric Sandeen Signed-off-by: Tyler Hicks --- fs/ecryptfs/file.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c index 91da02987bff..679817e82484 100644 --- a/fs/ecryptfs/file.c +++ b/fs/ecryptfs/file.c @@ -47,7 +47,7 @@ static ssize_t ecryptfs_read_update_atime(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { - int rc; + ssize_t rc; struct dentry *lower_dentry; struct vfsmount *lower_vfsmount; struct file *file = iocb->ki_filp; -- cgit v1.2.2 From 2a8652f4e0d11ee27b1d2870c600fd1300661a6e Mon Sep 17 00:00:00 2001 From: Roberto Sassu Date: Wed, 3 Nov 2010 11:11:15 +0100 Subject: ecryptfs: moved ECRYPTFS_SUPER_MAGIC definition to linux/magic.h The definition of ECRYPTFS_SUPER_MAGIC has been moved to the include file 'linux/magic.h' to become available to other kernel subsystems. Signed-off-by: Roberto Sassu Signed-off-by: Tyler Hicks --- fs/ecryptfs/ecryptfs_kernel.h | 1 - 1 file changed, 1 deletion(-) (limited to 'fs') diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h index 413a3c48f0bb..bc530a81e4ce 100644 --- a/fs/ecryptfs/ecryptfs_kernel.h +++ b/fs/ecryptfs/ecryptfs_kernel.h @@ -192,7 +192,6 @@ ecryptfs_get_key_payload_data(struct key *key) (((struct user_key_payload*)key->payload.data)->data); } -#define ECRYPTFS_SUPER_MAGIC 0xf15f #define ECRYPTFS_MAX_KEYSET_SIZE 1024 #define ECRYPTFS_MAX_CIPHER_NAME_SIZE 32 #define ECRYPTFS_MAX_NUM_ENC_KEYS 64 -- cgit v1.2.2 From 070baa51286e5cf59dde6be52fa23647ffb5d32d Mon Sep 17 00:00:00 2001 From: Roberto Sassu Date: Wed, 3 Nov 2010 11:11:22 +0100 Subject: ecryptfs: missing initialization of the superblock 'magic' field This patch initializes the 'magic' field of ecryptfs filesystems to ECRYPTFS_SUPER_MAGIC. Signed-off-by: Roberto Sassu [tyhicks: merge with 66cb76666d69] Signed-off-by: Tyler Hicks --- fs/ecryptfs/main.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs') diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c index d3b28abdd6aa..19f04504f625 100644 --- a/fs/ecryptfs/main.c +++ b/fs/ecryptfs/main.c @@ -36,6 +36,7 @@ #include #include #include +#include #include "ecryptfs_kernel.h" /** @@ -564,6 +565,7 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags ecryptfs_set_superblock_lower(s, path.dentry->d_sb); s->s_maxbytes = path.dentry->d_sb->s_maxbytes; s->s_blocksize = path.dentry->d_sb->s_blocksize; + s->s_magic = ECRYPTFS_SUPER_MAGIC; inode = ecryptfs_get_inode(path.dentry->d_inode, s); rc = PTR_ERR(inode); -- cgit v1.2.2 From 27992890b02d340198a3a22fc210d13684a41564 Mon Sep 17 00:00:00 2001 From: Roberto Sassu Date: Wed, 3 Nov 2010 11:11:28 +0100 Subject: ecryptfs: test lower_file pointer when lower_file_mutex is locked This patch prevents the lower_file pointer in the 'ecryptfs_inode_info' structure to be checked when the mutex 'lower_file_mutex' is not locked. Signed-off-by: Roberto Sassu Signed-off-by: Tyler Hicks --- fs/ecryptfs/file.c | 16 +++++++--------- fs/ecryptfs/inode.c | 32 ++++++++++++++------------------ 2 files changed, 21 insertions(+), 27 deletions(-) (limited to 'fs') diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c index 679817e82484..99259f850e58 100644 --- a/fs/ecryptfs/file.c +++ b/fs/ecryptfs/file.c @@ -191,15 +191,13 @@ static int ecryptfs_open(struct inode *inode, struct file *file) | ECRYPTFS_ENCRYPTED); } mutex_unlock(&crypt_stat->cs_mutex); - if (!ecryptfs_inode_to_private(inode)->lower_file) { - rc = ecryptfs_init_persistent_file(ecryptfs_dentry); - if (rc) { - printk(KERN_ERR "%s: Error attempting to initialize " - "the persistent file for the dentry with name " - "[%s]; rc = [%d]\n", __func__, - ecryptfs_dentry->d_name.name, rc); - goto out_free; - } + rc = ecryptfs_init_persistent_file(ecryptfs_dentry); + if (rc) { + printk(KERN_ERR "%s: Error attempting to initialize " + "the persistent file for the dentry with name " + "[%s]; rc = [%d]\n", __func__, + ecryptfs_dentry->d_name.name, rc); + goto out_free; } if ((ecryptfs_inode_to_private(inode)->lower_file->f_flags & O_RDONLY) && !(file->f_flags & O_RDONLY)) { diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index 64ff02330752..bd33f87a1907 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c @@ -185,15 +185,13 @@ static int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry) "context; rc = [%d]\n", rc); goto out; } - if (!ecryptfs_inode_to_private(ecryptfs_dentry->d_inode)->lower_file) { - rc = ecryptfs_init_persistent_file(ecryptfs_dentry); - if (rc) { - printk(KERN_ERR "%s: Error attempting to initialize " - "the persistent file for the dentry with name " - "[%s]; rc = [%d]\n", __func__, - ecryptfs_dentry->d_name.name, rc); - goto out; - } + rc = ecryptfs_init_persistent_file(ecryptfs_dentry); + if (rc) { + printk(KERN_ERR "%s: Error attempting to initialize " + "the persistent file for the dentry with name " + "[%s]; rc = [%d]\n", __func__, + ecryptfs_dentry->d_name.name, rc); + goto out; } rc = ecryptfs_write_metadata(ecryptfs_dentry); if (rc) { @@ -302,15 +300,13 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry, rc = -ENOMEM; goto out; } - if (!ecryptfs_inode_to_private(ecryptfs_dentry->d_inode)->lower_file) { - rc = ecryptfs_init_persistent_file(ecryptfs_dentry); - if (rc) { - printk(KERN_ERR "%s: Error attempting to initialize " - "the persistent file for the dentry with name " - "[%s]; rc = [%d]\n", __func__, - ecryptfs_dentry->d_name.name, rc); - goto out_free_kmem; - } + rc = ecryptfs_init_persistent_file(ecryptfs_dentry); + if (rc) { + printk(KERN_ERR "%s: Error attempting to initialize " + "the persistent file for the dentry with name " + "[%s]; rc = [%d]\n", __func__, + ecryptfs_dentry->d_name.name, rc); + goto out_free_kmem; } crypt_stat = &ecryptfs_inode_to_private( ecryptfs_dentry->d_inode)->crypt_stat; -- cgit v1.2.2 From 0abe1169470571c473ee720c35fe5b3481c46c46 Mon Sep 17 00:00:00 2001 From: Roberto Sassu Date: Wed, 3 Nov 2010 11:11:34 +0100 Subject: ecryptfs: fixed testing of file descriptor flags This patch replaces the check (lower_file->f_flags & O_RDONLY) with ((lower_file & O_ACCMODE) == O_RDONLY). Signed-off-by: Roberto Sassu Signed-off-by: Tyler Hicks --- fs/ecryptfs/file.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c index 99259f850e58..e069e786da43 100644 --- a/fs/ecryptfs/file.c +++ b/fs/ecryptfs/file.c @@ -199,8 +199,8 @@ static int ecryptfs_open(struct inode *inode, struct file *file) ecryptfs_dentry->d_name.name, rc); goto out_free; } - if ((ecryptfs_inode_to_private(inode)->lower_file->f_flags & O_RDONLY) - && !(file->f_flags & O_RDONLY)) { + if ((ecryptfs_inode_to_private(inode)->lower_file->f_flags & O_ACCMODE) + == O_RDONLY && (file->f_flags & O_ACCMODE) != O_RDONLY) { rc = -EPERM; printk(KERN_WARNING "%s: Lower persistent file is RO; eCryptfs " "file must hence be opened RO\n", __func__); -- cgit v1.2.2 From 888d57bbc91ebd031451d4ab1c669baee826a06c Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Wed, 10 Nov 2010 15:46:16 -0800 Subject: fs/ecryptfs: Add printf format/argument verification and fix fallout Add __attribute__((format... to __ecryptfs_printk Make formats and arguments match. Add casts to (unsigned long long) for %llu. Signed-off-by: Joe Perches [tyhicks: 80 columns cleanup and fixed typo] Signed-off-by: Tyler Hicks --- fs/ecryptfs/crypto.c | 26 ++++++++++++-------------- fs/ecryptfs/ecryptfs_kernel.h | 1 + fs/ecryptfs/file.c | 6 +++--- fs/ecryptfs/keystore.c | 6 +++--- fs/ecryptfs/main.c | 7 ++++--- fs/ecryptfs/mmap.c | 13 +++++++------ 6 files changed, 30 insertions(+), 29 deletions(-) (limited to 'fs') diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c index cbadc1bee6e7..57bdd7a13207 100644 --- a/fs/ecryptfs/crypto.c +++ b/fs/ecryptfs/crypto.c @@ -413,10 +413,9 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page, rc = ecryptfs_derive_iv(extent_iv, crypt_stat, (extent_base + extent_offset)); if (rc) { - ecryptfs_printk(KERN_ERR, "Error attempting to " - "derive IV for extent [0x%.16x]; " - "rc = [%d]\n", (extent_base + extent_offset), - rc); + ecryptfs_printk(KERN_ERR, "Error attempting to derive IV for " + "extent [0x%.16llx]; rc = [%d]\n", + (unsigned long long)(extent_base + extent_offset), rc); goto out; } if (unlikely(ecryptfs_verbosity > 0)) { @@ -443,9 +442,9 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page, } rc = 0; if (unlikely(ecryptfs_verbosity > 0)) { - ecryptfs_printk(KERN_DEBUG, "Encrypt extent [0x%.16x]; " - "rc = [%d]\n", (extent_base + extent_offset), - rc); + ecryptfs_printk(KERN_DEBUG, "Encrypt extent [0x%.16llx]; " + "rc = [%d]\n", + (unsigned long long)(extent_base + extent_offset), rc); ecryptfs_printk(KERN_DEBUG, "First 8 bytes after " "encryption:\n"); ecryptfs_dump_hex((char *)(page_address(enc_extent_page)), 8); @@ -540,10 +539,9 @@ static int ecryptfs_decrypt_extent(struct page *page, rc = ecryptfs_derive_iv(extent_iv, crypt_stat, (extent_base + extent_offset)); if (rc) { - ecryptfs_printk(KERN_ERR, "Error attempting to " - "derive IV for extent [0x%.16x]; " - "rc = [%d]\n", (extent_base + extent_offset), - rc); + ecryptfs_printk(KERN_ERR, "Error attempting to derive IV for " + "extent [0x%.16llx]; rc = [%d]\n", + (unsigned long long)(extent_base + extent_offset), rc); goto out; } if (unlikely(ecryptfs_verbosity > 0)) { @@ -571,9 +569,9 @@ static int ecryptfs_decrypt_extent(struct page *page, } rc = 0; if (unlikely(ecryptfs_verbosity > 0)) { - ecryptfs_printk(KERN_DEBUG, "Decrypt extent [0x%.16x]; " - "rc = [%d]\n", (extent_base + extent_offset), - rc); + ecryptfs_printk(KERN_DEBUG, "Decrypt extent [0x%.16llx]; " + "rc = [%d]\n", + (unsigned long long)(extent_base + extent_offset), rc); ecryptfs_printk(KERN_DEBUG, "First 8 bytes after " "decryption:\n"); ecryptfs_dump_hex((char *)(page_address(page) diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h index bc530a81e4ce..dbc84ed96336 100644 --- a/fs/ecryptfs/ecryptfs_kernel.h +++ b/fs/ecryptfs/ecryptfs_kernel.h @@ -583,6 +583,7 @@ ecryptfs_set_dentry_lower_mnt(struct dentry *dentry, struct vfsmount *lower_mnt) #define ecryptfs_printk(type, fmt, arg...) \ __ecryptfs_printk(type "%s: " fmt, __func__, ## arg); +__attribute__ ((format(printf, 1, 2))) void __ecryptfs_printk(const char *fmt, ...); extern const struct file_operations ecryptfs_main_fops; diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c index e069e786da43..81e10e6a9443 100644 --- a/fs/ecryptfs/file.c +++ b/fs/ecryptfs/file.c @@ -241,9 +241,9 @@ static int ecryptfs_open(struct inode *inode, struct file *file) } } mutex_unlock(&crypt_stat->cs_mutex); - ecryptfs_printk(KERN_DEBUG, "inode w/ addr = [0x%p], i_ino = [0x%.16x] " - "size: [0x%.16x]\n", inode, inode->i_ino, - i_size_read(inode)); + ecryptfs_printk(KERN_DEBUG, "inode w/ addr = [0x%p], i_ino = " + "[0x%.16lx] size: [0x%.16llx]\n", inode, inode->i_ino, + (unsigned long long)i_size_read(inode)); goto out; out_free: kmem_cache_free(ecryptfs_file_info_cache, diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c index b1f6858a5223..25fd7f595c99 100644 --- a/fs/ecryptfs/keystore.c +++ b/fs/ecryptfs/keystore.c @@ -59,7 +59,7 @@ static int process_request_key_err(long err_code) break; default: ecryptfs_printk(KERN_WARNING, "Unknown error code: " - "[0x%.16x]\n", err_code); + "[0x%.16lx]\n", err_code); rc = -EINVAL; } return rc; @@ -1864,8 +1864,8 @@ found_matching_auth_tok: "session key for authentication token with sig " "[%.*s]; rc = [%d]. Removing auth tok " "candidate from the list and searching for " - "the next match.\n", candidate_auth_tok_sig, - ECRYPTFS_SIG_SIZE_HEX, rc); + "the next match.\n", ECRYPTFS_SIG_SIZE_HEX, + candidate_auth_tok_sig, rc); list_for_each_entry_safe(auth_tok_list_item, auth_tok_list_item_tmp, &auth_tok_list, list) { diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c index 19f04504f625..758323a0f09a 100644 --- a/fs/ecryptfs/main.c +++ b/fs/ecryptfs/main.c @@ -810,9 +810,10 @@ static int __init ecryptfs_init(void) ecryptfs_printk(KERN_ERR, "The eCryptfs extent size is " "larger than the host's page size, and so " "eCryptfs cannot run on this system. The " - "default eCryptfs extent size is [%d] bytes; " - "the page size is [%d] bytes.\n", - ECRYPTFS_DEFAULT_EXTENT_SIZE, PAGE_CACHE_SIZE); + "default eCryptfs extent size is [%u] bytes; " + "the page size is [%lu] bytes.\n", + ECRYPTFS_DEFAULT_EXTENT_SIZE, + (unsigned long)PAGE_CACHE_SIZE); goto out; } rc = ecryptfs_init_kmem_caches(); diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c index b1d82756544b..4b9011392736 100644 --- a/fs/ecryptfs/mmap.c +++ b/fs/ecryptfs/mmap.c @@ -65,7 +65,7 @@ static int ecryptfs_writepage(struct page *page, struct writeback_control *wbc) rc = ecryptfs_encrypt_page(page); if (rc) { ecryptfs_printk(KERN_WARNING, "Error encrypting " - "page (upper index [0x%.16x])\n", page->index); + "page (upper index [0x%.16lx])\n", page->index); ClearPageUptodate(page); goto out; } @@ -237,7 +237,7 @@ out: ClearPageUptodate(page); else SetPageUptodate(page); - ecryptfs_printk(KERN_DEBUG, "Unlocking page with index = [0x%.16x]\n", + ecryptfs_printk(KERN_DEBUG, "Unlocking page with index = [0x%.16lx]\n", page->index); unlock_page(page); return rc; @@ -488,7 +488,7 @@ static int ecryptfs_write_end(struct file *file, } else ecryptfs_printk(KERN_DEBUG, "Not a new file\n"); ecryptfs_printk(KERN_DEBUG, "Calling fill_zeros_to_end_of_page" - "(page w/ index = [0x%.16x], to = [%d])\n", index, to); + "(page w/ index = [0x%.16lx], to = [%d])\n", index, to); if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) { rc = ecryptfs_write_lower_page_segment(ecryptfs_inode, page, 0, to); @@ -503,19 +503,20 @@ static int ecryptfs_write_end(struct file *file, rc = fill_zeros_to_end_of_page(page, to); if (rc) { ecryptfs_printk(KERN_WARNING, "Error attempting to fill " - "zeros in page with index = [0x%.16x]\n", index); + "zeros in page with index = [0x%.16lx]\n", index); goto out; } rc = ecryptfs_encrypt_page(page); if (rc) { ecryptfs_printk(KERN_WARNING, "Error encrypting page (upper " - "index [0x%.16x])\n", index); + "index [0x%.16lx])\n", index); goto out; } if (pos + copied > i_size_read(ecryptfs_inode)) { i_size_write(ecryptfs_inode, pos + copied); ecryptfs_printk(KERN_DEBUG, "Expanded file size to " - "[0x%.16x]\n", i_size_read(ecryptfs_inode)); + "[0x%.16llx]\n", + (unsigned long long)i_size_read(ecryptfs_inode)); } rc = ecryptfs_write_inode_size_to_metadata(ecryptfs_inode); if (rc) -- cgit v1.2.2 From f24b38874e1e37bb70291bbc4c5c3c13f5f9dac8 Mon Sep 17 00:00:00 2001 From: Tyler Hicks Date: Mon, 15 Nov 2010 17:36:38 -0600 Subject: ecryptfs: Fix ecryptfs_printk() size_t warnings Commit cb55d21f6fa19d8c6c2680d90317ce88c1f57269 revealed a number of missing 'z' length modifiers in calls to ecryptfs_printk() when printing variables of type size_t. This patch fixes those compiler warnings. Signed-off-by: Tyler Hicks --- fs/ecryptfs/crypto.c | 4 ++-- fs/ecryptfs/keystore.c | 20 ++++++++++---------- 2 files changed, 12 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c index 57bdd7a13207..bfd8b680e648 100644 --- a/fs/ecryptfs/crypto.c +++ b/fs/ecryptfs/crypto.c @@ -348,7 +348,7 @@ static int encrypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat, BUG_ON(!crypt_stat || !crypt_stat->tfm || !(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED)); if (unlikely(ecryptfs_verbosity > 0)) { - ecryptfs_printk(KERN_DEBUG, "Key size [%d]; key:\n", + ecryptfs_printk(KERN_DEBUG, "Key size [%zd]; key:\n", crypt_stat->key_size); ecryptfs_dump_hex(crypt_stat->key, crypt_stat->key_size); @@ -778,7 +778,7 @@ int ecryptfs_init_crypt_ctx(struct ecryptfs_crypt_stat *crypt_stat) } ecryptfs_printk(KERN_DEBUG, "Initializing cipher [%s]; strlen = [%d]; " - "key_size_bits = [%d]\n", + "key_size_bits = [%zd]\n", crypt_stat->cipher, (int)strlen(crypt_stat->cipher), crypt_stat->key_size << 3); if (crypt_stat->tfm) { diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c index 25fd7f595c99..c1436cff6f2d 100644 --- a/fs/ecryptfs/keystore.c +++ b/fs/ecryptfs/keystore.c @@ -130,7 +130,7 @@ int ecryptfs_write_packet_length(char *dest, size_t size, } else { rc = -EINVAL; ecryptfs_printk(KERN_WARNING, - "Unsupported packet size: [%d]\n", size); + "Unsupported packet size: [%zd]\n", size); } return rc; } @@ -1672,7 +1672,7 @@ decrypt_passphrase_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok, auth_tok->session_key.decrypted_key_size); crypt_stat->flags |= ECRYPTFS_KEY_VALID; if (unlikely(ecryptfs_verbosity > 0)) { - ecryptfs_printk(KERN_DEBUG, "FEK of size [%d]:\n", + ecryptfs_printk(KERN_DEBUG, "FEK of size [%zd]:\n", crypt_stat->key_size); ecryptfs_dump_hex(crypt_stat->key, crypt_stat->key_size); @@ -1754,7 +1754,7 @@ int ecryptfs_parse_packet_set(struct ecryptfs_crypt_stat *crypt_stat, if (ECRYPTFS_SIG_SIZE != tag_11_contents_size) { ecryptfs_printk(KERN_ERR, "Expected " "signature of size [%d]; " - "read size [%d]\n", + "read size [%zd]\n", ECRYPTFS_SIG_SIZE, tag_11_contents_size); rc = -EIO; @@ -1787,8 +1787,8 @@ int ecryptfs_parse_packet_set(struct ecryptfs_crypt_stat *crypt_stat, goto out_wipe_list; break; default: - ecryptfs_printk(KERN_DEBUG, "No packet at offset " - "[%d] of the file header; hex value of " + ecryptfs_printk(KERN_DEBUG, "No packet at offset [%zd] " + "of the file header; hex value of " "character is [0x%.2x]\n", i, src[i]); next_packet_is_auth_tok_packet = 0; } @@ -2168,7 +2168,7 @@ write_tag_3_packet(char *dest, size_t *remaining_bytes, if (encrypted_session_key_valid) { ecryptfs_printk(KERN_DEBUG, "encrypted_session_key_valid != 0; " "using auth_tok->session_key.encrypted_key, " - "where key_rec->enc_key_size = [%d]\n", + "where key_rec->enc_key_size = [%zd]\n", key_rec->enc_key_size); memcpy(key_rec->enc_key, auth_tok->session_key.encrypted_key, @@ -2198,7 +2198,7 @@ write_tag_3_packet(char *dest, size_t *remaining_bytes, if (rc < 1 || rc > 2) { ecryptfs_printk(KERN_ERR, "Error generating scatterlist " "for crypt_stat session key; expected rc = 1; " - "got rc = [%d]. key_rec->enc_key_size = [%d]\n", + "got rc = [%d]. key_rec->enc_key_size = [%zd]\n", rc, key_rec->enc_key_size); rc = -ENOMEM; goto out; @@ -2209,7 +2209,7 @@ write_tag_3_packet(char *dest, size_t *remaining_bytes, ecryptfs_printk(KERN_ERR, "Error generating scatterlist " "for crypt_stat encrypted session key; " "expected rc = 1; got rc = [%d]. " - "key_rec->enc_key_size = [%d]\n", rc, + "key_rec->enc_key_size = [%zd]\n", rc, key_rec->enc_key_size); rc = -ENOMEM; goto out; @@ -2224,7 +2224,7 @@ write_tag_3_packet(char *dest, size_t *remaining_bytes, goto out; } rc = 0; - ecryptfs_printk(KERN_DEBUG, "Encrypting [%d] bytes of the key\n", + ecryptfs_printk(KERN_DEBUG, "Encrypting [%zd] bytes of the key\n", crypt_stat->key_size); rc = crypto_blkcipher_encrypt(&desc, dst_sg, src_sg, (*key_rec).enc_key_size); @@ -2235,7 +2235,7 @@ write_tag_3_packet(char *dest, size_t *remaining_bytes, } ecryptfs_printk(KERN_DEBUG, "This should be the encrypted key:\n"); if (ecryptfs_verbosity > 0) { - ecryptfs_printk(KERN_DEBUG, "EFEK of size [%d]:\n", + ecryptfs_printk(KERN_DEBUG, "EFEK of size [%zd]:\n", key_rec->enc_key_size); ecryptfs_dump_hex(key_rec->enc_key, key_rec->enc_key_size); -- cgit v1.2.2 From 24562486be76cf223b8d911f45e1d26eb3364b13 Mon Sep 17 00:00:00 2001 From: Frank Swiderski Date: Mon, 15 Nov 2010 10:43:22 -0800 Subject: ecryptfs: remove unnecessary decrypt when extending a file Removes an unecessary page decrypt from ecryptfs_begin_write when the page is beyond the current file size. Previously, the call to ecryptfs_decrypt_page would result in a read of 0 bytes, but still attempt to decrypt an entire page. This patch detects that case and merely zeros the page before marking it up-to-date. Signed-off-by: Frank Swiderski Signed-off-by: Tyler Hicks --- fs/ecryptfs/mmap.c | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c index 4b9011392736..cc64fca89f8d 100644 --- a/fs/ecryptfs/mmap.c +++ b/fs/ecryptfs/mmap.c @@ -290,6 +290,7 @@ static int ecryptfs_write_begin(struct file *file, return -ENOMEM; *pagep = page; + prev_page_end_size = ((loff_t)index << PAGE_CACHE_SHIFT); if (!PageUptodate(page)) { struct ecryptfs_crypt_stat *crypt_stat = &ecryptfs_inode_to_private(mapping->host)->crypt_stat; @@ -335,18 +336,23 @@ static int ecryptfs_write_begin(struct file *file, SetPageUptodate(page); } } else { - rc = ecryptfs_decrypt_page(page); - if (rc) { - printk(KERN_ERR "%s: Error decrypting page " - "at index [%ld]; rc = [%d]\n", - __func__, page->index, rc); - ClearPageUptodate(page); - goto out; + if (prev_page_end_size + >= i_size_read(page->mapping->host)) { + zero_user(page, 0, PAGE_CACHE_SIZE); + } else { + rc = ecryptfs_decrypt_page(page); + if (rc) { + printk(KERN_ERR "%s: Error decrypting " + "page at index [%ld]; " + "rc = [%d]\n", + __func__, page->index, rc); + ClearPageUptodate(page); + goto out; + } } SetPageUptodate(page); } } - prev_page_end_size = ((loff_t)index << PAGE_CACHE_SHIFT); /* If creating a page or more of holes, zero them out via truncate. * Note, this will increase i_size. */ if (index != 0) { -- cgit v1.2.2 From acce952b0263825da32cf10489413dec78053347 Mon Sep 17 00:00:00 2001 From: liubo Date: Thu, 6 Jan 2011 19:30:25 +0800 Subject: Btrfs: forced readonly mounts on errors This patch comes from "Forced readonly mounts on errors" ideas. As we know, this is the first step in being more fault tolerant of disk corruptions instead of just using BUG() statements. The major content: - add a framework for generating errors that should result in filesystems going readonly. - keep FS state in disk super block. - make sure that all of resource will be freed and released at umount time. - make sure that fter FS is forced readonly on error, there will be no more disk change before FS is corrected. For this, we should stop write operation. After this patch is applied, the conversion from BUG() to such a framework can happen incrementally. Signed-off-by: Liu Bo Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 24 +++ fs/btrfs/disk-io.c | 391 ++++++++++++++++++++++++++++++++++++++++++++++++- fs/btrfs/disk-io.h | 1 + fs/btrfs/extent-tree.c | 11 ++ fs/btrfs/file.c | 11 ++ fs/btrfs/super.c | 84 +++++++++++ fs/btrfs/transaction.c | 3 + 7 files changed, 523 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 0995f4f68d7a..72195378bef9 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -295,6 +295,14 @@ static inline unsigned long btrfs_chunk_item_size(int num_stripes) #define BTRFS_FSID_SIZE 16 #define BTRFS_HEADER_FLAG_WRITTEN (1ULL << 0) #define BTRFS_HEADER_FLAG_RELOC (1ULL << 1) + +/* + * File system states + */ + +/* Errors detected */ +#define BTRFS_SUPER_FLAG_ERROR (1ULL << 2) + #define BTRFS_SUPER_FLAG_SEEDING (1ULL << 32) #define BTRFS_SUPER_FLAG_METADUMP (1ULL << 33) @@ -1058,6 +1066,9 @@ struct btrfs_fs_info { unsigned metadata_ratio; void *bdev_holder; + + /* filesystem state */ + u64 fs_state; }; /* @@ -2203,6 +2214,11 @@ int btrfs_set_block_group_rw(struct btrfs_root *root, struct btrfs_block_group_cache *cache); void btrfs_put_block_group_cache(struct btrfs_fs_info *info); u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo); +int btrfs_error_unpin_extent_range(struct btrfs_root *root, + u64 start, u64 end); +int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr, + u64 num_bytes); + /* ctree.c */ int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, int level, int *slot); @@ -2556,6 +2572,14 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size); /* super.c */ int btrfs_parse_options(struct btrfs_root *root, char *options); int btrfs_sync_fs(struct super_block *sb, int wait); +void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function, + unsigned int line, int errno); + +#define btrfs_std_error(fs_info, errno) \ +do { \ + if ((errno)) \ + __btrfs_std_error((fs_info), __func__, __LINE__, (errno));\ +} while (0) /* acl.c */ #ifdef CONFIG_BTRFS_FS_POSIX_ACL diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 9b1dd4138072..1a3af9e8e0c4 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -44,6 +44,20 @@ static struct extent_io_ops btree_extent_io_ops; static void end_workqueue_fn(struct btrfs_work *work); static void free_fs_root(struct btrfs_root *root); +static void btrfs_check_super_valid(struct btrfs_fs_info *fs_info, + int read_only); +static int btrfs_destroy_ordered_operations(struct btrfs_root *root); +static int btrfs_destroy_ordered_extents(struct btrfs_root *root); +static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, + struct btrfs_root *root); +static int btrfs_destroy_pending_snapshots(struct btrfs_transaction *t); +static int btrfs_destroy_delalloc_inodes(struct btrfs_root *root); +static int btrfs_destroy_marked_extents(struct btrfs_root *root, + struct extent_io_tree *dirty_pages, + int mark); +static int btrfs_destroy_pinned_extent(struct btrfs_root *root, + struct extent_io_tree *pinned_extents); +static int btrfs_cleanup_transaction(struct btrfs_root *root); /* * end_io_wq structs are used to do processing in task context when an IO is @@ -1738,6 +1752,11 @@ struct btrfs_root *open_ctree(struct super_block *sb, if (!btrfs_super_root(disk_super)) goto fail_iput; + /* check FS state, whether FS is broken. */ + fs_info->fs_state |= btrfs_super_flags(disk_super); + + btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY); + ret = btrfs_parse_options(tree_root, options); if (ret) { err = ret; @@ -1968,7 +1987,9 @@ struct btrfs_root *open_ctree(struct super_block *sb, btrfs_set_opt(fs_info->mount_opt, SSD); } - if (btrfs_super_log_root(disk_super) != 0) { + /* do not make disk changes in broken FS */ + if (btrfs_super_log_root(disk_super) != 0 && + !(fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)) { u64 bytenr = btrfs_super_log_root(disk_super); if (fs_devices->rw_devices == 0) { @@ -2464,8 +2485,28 @@ int close_ctree(struct btrfs_root *root) smp_mb(); btrfs_put_block_group_cache(fs_info); + + /* + * Here come 2 situations when btrfs is broken to flip readonly: + * + * 1. when btrfs flips readonly somewhere else before + * btrfs_commit_super, sb->s_flags has MS_RDONLY flag, + * and btrfs will skip to write sb directly to keep + * ERROR state on disk. + * + * 2. when btrfs flips readonly just in btrfs_commit_super, + * and in such case, btrfs cannnot write sb via btrfs_commit_super, + * and since fs_state has been set BTRFS_SUPER_FLAG_ERROR flag, + * btrfs will cleanup all FS resources first and write sb then. + */ if (!(fs_info->sb->s_flags & MS_RDONLY)) { - ret = btrfs_commit_super(root); + ret = btrfs_commit_super(root); + if (ret) + printk(KERN_ERR "btrfs: commit super ret %d\n", ret); + } + + if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) { + ret = btrfs_error_commit_super(root); if (ret) printk(KERN_ERR "btrfs: commit super ret %d\n", ret); } @@ -2641,6 +2682,352 @@ out: return 0; } +static void btrfs_check_super_valid(struct btrfs_fs_info *fs_info, + int read_only) +{ + if (read_only) + return; + + if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) + printk(KERN_WARNING "warning: mount fs with errors, " + "running btrfsck is recommended\n"); +} + +int btrfs_error_commit_super(struct btrfs_root *root) +{ + int ret; + + mutex_lock(&root->fs_info->cleaner_mutex); + btrfs_run_delayed_iputs(root); + mutex_unlock(&root->fs_info->cleaner_mutex); + + down_write(&root->fs_info->cleanup_work_sem); + up_write(&root->fs_info->cleanup_work_sem); + + /* cleanup FS via transaction */ + btrfs_cleanup_transaction(root); + + ret = write_ctree_super(NULL, root, 0); + + return ret; +} + +static int btrfs_destroy_ordered_operations(struct btrfs_root *root) +{ + struct btrfs_inode *btrfs_inode; + struct list_head splice; + + INIT_LIST_HEAD(&splice); + + mutex_lock(&root->fs_info->ordered_operations_mutex); + spin_lock(&root->fs_info->ordered_extent_lock); + + list_splice_init(&root->fs_info->ordered_operations, &splice); + while (!list_empty(&splice)) { + btrfs_inode = list_entry(splice.next, struct btrfs_inode, + ordered_operations); + + list_del_init(&btrfs_inode->ordered_operations); + + btrfs_invalidate_inodes(btrfs_inode->root); + } + + spin_unlock(&root->fs_info->ordered_extent_lock); + mutex_unlock(&root->fs_info->ordered_operations_mutex); + + return 0; +} + +static int btrfs_destroy_ordered_extents(struct btrfs_root *root) +{ + struct list_head splice; + struct btrfs_ordered_extent *ordered; + struct inode *inode; + + INIT_LIST_HEAD(&splice); + + spin_lock(&root->fs_info->ordered_extent_lock); + + list_splice_init(&root->fs_info->ordered_extents, &splice); + while (!list_empty(&splice)) { + ordered = list_entry(splice.next, struct btrfs_ordered_extent, + root_extent_list); + + list_del_init(&ordered->root_extent_list); + atomic_inc(&ordered->refs); + + /* the inode may be getting freed (in sys_unlink path). */ + inode = igrab(ordered->inode); + + spin_unlock(&root->fs_info->ordered_extent_lock); + if (inode) + iput(inode); + + atomic_set(&ordered->refs, 1); + btrfs_put_ordered_extent(ordered); + + spin_lock(&root->fs_info->ordered_extent_lock); + } + + spin_unlock(&root->fs_info->ordered_extent_lock); + + return 0; +} + +static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, + struct btrfs_root *root) +{ + struct rb_node *node; + struct btrfs_delayed_ref_root *delayed_refs; + struct btrfs_delayed_ref_node *ref; + int ret = 0; + + delayed_refs = &trans->delayed_refs; + + spin_lock(&delayed_refs->lock); + if (delayed_refs->num_entries == 0) { + printk(KERN_INFO "delayed_refs has NO entry\n"); + return ret; + } + + node = rb_first(&delayed_refs->root); + while (node) { + ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node); + node = rb_next(node); + + ref->in_tree = 0; + rb_erase(&ref->rb_node, &delayed_refs->root); + delayed_refs->num_entries--; + + atomic_set(&ref->refs, 1); + if (btrfs_delayed_ref_is_head(ref)) { + struct btrfs_delayed_ref_head *head; + + head = btrfs_delayed_node_to_head(ref); + mutex_lock(&head->mutex); + kfree(head->extent_op); + delayed_refs->num_heads--; + if (list_empty(&head->cluster)) + delayed_refs->num_heads_ready--; + list_del_init(&head->cluster); + mutex_unlock(&head->mutex); + } + + spin_unlock(&delayed_refs->lock); + btrfs_put_delayed_ref(ref); + + cond_resched(); + spin_lock(&delayed_refs->lock); + } + + spin_unlock(&delayed_refs->lock); + + return ret; +} + +static int btrfs_destroy_pending_snapshots(struct btrfs_transaction *t) +{ + struct btrfs_pending_snapshot *snapshot; + struct list_head splice; + + INIT_LIST_HEAD(&splice); + + list_splice_init(&t->pending_snapshots, &splice); + + while (!list_empty(&splice)) { + snapshot = list_entry(splice.next, + struct btrfs_pending_snapshot, + list); + + list_del_init(&snapshot->list); + + kfree(snapshot); + } + + return 0; +} + +static int btrfs_destroy_delalloc_inodes(struct btrfs_root *root) +{ + struct btrfs_inode *btrfs_inode; + struct list_head splice; + + INIT_LIST_HEAD(&splice); + + list_splice_init(&root->fs_info->delalloc_inodes, &splice); + + spin_lock(&root->fs_info->delalloc_lock); + + while (!list_empty(&splice)) { + btrfs_inode = list_entry(splice.next, struct btrfs_inode, + delalloc_inodes); + + list_del_init(&btrfs_inode->delalloc_inodes); + + btrfs_invalidate_inodes(btrfs_inode->root); + } + + spin_unlock(&root->fs_info->delalloc_lock); + + return 0; +} + +static int btrfs_destroy_marked_extents(struct btrfs_root *root, + struct extent_io_tree *dirty_pages, + int mark) +{ + int ret; + struct page *page; + struct inode *btree_inode = root->fs_info->btree_inode; + struct extent_buffer *eb; + u64 start = 0; + u64 end; + u64 offset; + unsigned long index; + + while (1) { + ret = find_first_extent_bit(dirty_pages, start, &start, &end, + mark); + if (ret) + break; + + clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS); + while (start <= end) { + index = start >> PAGE_CACHE_SHIFT; + start = (u64)(index + 1) << PAGE_CACHE_SHIFT; + page = find_get_page(btree_inode->i_mapping, index); + if (!page) + continue; + offset = page_offset(page); + + spin_lock(&dirty_pages->buffer_lock); + eb = radix_tree_lookup( + &(&BTRFS_I(page->mapping->host)->io_tree)->buffer, + offset >> PAGE_CACHE_SHIFT); + spin_unlock(&dirty_pages->buffer_lock); + if (eb) { + ret = test_and_clear_bit(EXTENT_BUFFER_DIRTY, + &eb->bflags); + atomic_set(&eb->refs, 1); + } + if (PageWriteback(page)) + end_page_writeback(page); + + lock_page(page); + if (PageDirty(page)) { + clear_page_dirty_for_io(page); + spin_lock_irq(&page->mapping->tree_lock); + radix_tree_tag_clear(&page->mapping->page_tree, + page_index(page), + PAGECACHE_TAG_DIRTY); + spin_unlock_irq(&page->mapping->tree_lock); + } + + page->mapping->a_ops->invalidatepage(page, 0); + unlock_page(page); + } + } + + return ret; +} + +static int btrfs_destroy_pinned_extent(struct btrfs_root *root, + struct extent_io_tree *pinned_extents) +{ + struct extent_io_tree *unpin; + u64 start; + u64 end; + int ret; + + unpin = pinned_extents; + while (1) { + ret = find_first_extent_bit(unpin, 0, &start, &end, + EXTENT_DIRTY); + if (ret) + break; + + /* opt_discard */ + ret = btrfs_error_discard_extent(root, start, end + 1 - start); + + clear_extent_dirty(unpin, start, end, GFP_NOFS); + btrfs_error_unpin_extent_range(root, start, end); + cond_resched(); + } + + return 0; +} + +static int btrfs_cleanup_transaction(struct btrfs_root *root) +{ + struct btrfs_transaction *t; + LIST_HEAD(list); + + WARN_ON(1); + + mutex_lock(&root->fs_info->trans_mutex); + mutex_lock(&root->fs_info->transaction_kthread_mutex); + + list_splice_init(&root->fs_info->trans_list, &list); + while (!list_empty(&list)) { + t = list_entry(list.next, struct btrfs_transaction, list); + if (!t) + break; + + btrfs_destroy_ordered_operations(root); + + btrfs_destroy_ordered_extents(root); + + btrfs_destroy_delayed_refs(t, root); + + btrfs_block_rsv_release(root, + &root->fs_info->trans_block_rsv, + t->dirty_pages.dirty_bytes); + + /* FIXME: cleanup wait for commit */ + t->in_commit = 1; + t->blocked = 1; + if (waitqueue_active(&root->fs_info->transaction_blocked_wait)) + wake_up(&root->fs_info->transaction_blocked_wait); + + t->blocked = 0; + if (waitqueue_active(&root->fs_info->transaction_wait)) + wake_up(&root->fs_info->transaction_wait); + mutex_unlock(&root->fs_info->trans_mutex); + + mutex_lock(&root->fs_info->trans_mutex); + t->commit_done = 1; + if (waitqueue_active(&t->commit_wait)) + wake_up(&t->commit_wait); + mutex_unlock(&root->fs_info->trans_mutex); + + mutex_lock(&root->fs_info->trans_mutex); + + btrfs_destroy_pending_snapshots(t); + + btrfs_destroy_delalloc_inodes(root); + + spin_lock(&root->fs_info->new_trans_lock); + root->fs_info->running_transaction = NULL; + spin_unlock(&root->fs_info->new_trans_lock); + + btrfs_destroy_marked_extents(root, &t->dirty_pages, + EXTENT_DIRTY); + + btrfs_destroy_pinned_extent(root, + root->fs_info->pinned_extents); + + t->use_count = 0; + list_del_init(&t->list); + memset(t, 0, sizeof(*t)); + kmem_cache_free(btrfs_transaction_cachep, t); + } + + mutex_unlock(&root->fs_info->transaction_kthread_mutex); + mutex_unlock(&root->fs_info->trans_mutex); + + return 0; +} + static struct extent_io_ops btree_extent_io_ops = { .write_cache_pages_lock_hook = btree_lock_page_hook, .readpage_end_io_hook = btree_readpage_end_io_hook, diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h index 88e825a0bf21..07b20dc2fd95 100644 --- a/fs/btrfs/disk-io.h +++ b/fs/btrfs/disk-io.h @@ -52,6 +52,7 @@ int write_ctree_super(struct btrfs_trans_handle *trans, struct btrfs_root *root, int max_mirrors); struct buffer_head *btrfs_read_dev_super(struct block_device *bdev); int btrfs_commit_super(struct btrfs_root *root); +int btrfs_error_commit_super(struct btrfs_root *root); struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize); struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info, diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 055b837eab19..bcf303204f7f 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -8642,3 +8642,14 @@ out: btrfs_free_path(path); return ret; } + +int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end) +{ + return unpin_extent_range(root, start, end); +} + +int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr, + u64 num_bytes) +{ + return btrfs_discard_extent(root, bytenr, num_bytes); +} diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 05df688c96f4..f903433f5bdf 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -892,6 +892,17 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, if (err) goto out; + /* + * If BTRFS flips readonly due to some impossible error + * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR), + * although we have opened a file as writable, we have + * to stop this write operation to ensure FS consistency. + */ + if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) { + err = -EROFS; + goto out; + } + file_update_time(file); BTRFS_I(inode)->sequence++; diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 2963376e77f4..52e903b0a293 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -54,6 +54,90 @@ static const struct super_operations btrfs_super_ops; +static const char *btrfs_decode_error(struct btrfs_fs_info *fs_info, int errno, + char nbuf[16]) +{ + char *errstr = NULL; + + switch (errno) { + case -EIO: + errstr = "IO failure"; + break; + case -ENOMEM: + errstr = "Out of memory"; + break; + case -EROFS: + errstr = "Readonly filesystem"; + break; + default: + if (nbuf) { + if (snprintf(nbuf, 16, "error %d", -errno) >= 0) + errstr = nbuf; + } + break; + } + + return errstr; +} + +static void __save_error_info(struct btrfs_fs_info *fs_info) +{ + /* + * today we only save the error info into ram. Long term we'll + * also send it down to the disk + */ + fs_info->fs_state = BTRFS_SUPER_FLAG_ERROR; +} + +/* NOTE: + * We move write_super stuff at umount in order to avoid deadlock + * for umount hold all lock. + */ +static void save_error_info(struct btrfs_fs_info *fs_info) +{ + __save_error_info(fs_info); +} + +/* btrfs handle error by forcing the filesystem readonly */ +static void btrfs_handle_error(struct btrfs_fs_info *fs_info) +{ + struct super_block *sb = fs_info->sb; + + if (sb->s_flags & MS_RDONLY) + return; + + if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) { + sb->s_flags |= MS_RDONLY; + printk(KERN_INFO "btrfs is forced readonly\n"); + } +} + +/* + * __btrfs_std_error decodes expected errors from the caller and + * invokes the approciate error response. + */ +void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function, + unsigned int line, int errno) +{ + struct super_block *sb = fs_info->sb; + char nbuf[16]; + const char *errstr; + + /* + * Special case: if the error is EROFS, and we're already + * under MS_RDONLY, then it is safe here. + */ + if (errno == -EROFS && (sb->s_flags & MS_RDONLY)) + return; + + errstr = btrfs_decode_error(fs_info, errno, nbuf); + printk(KERN_CRIT "BTRFS error (device %s) in %s:%d: %s\n", + sb->s_id, function, line, errstr); + save_error_info(fs_info); + + btrfs_handle_error(fs_info); +} + static void btrfs_put_super(struct super_block *sb) { struct btrfs_root *root = btrfs_sb(sb); diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 29e30d832ec9..bae5c7b8bbe2 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -181,6 +181,9 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root, struct btrfs_trans_handle *h; struct btrfs_transaction *cur_trans; int ret; + + if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) + return ERR_PTR(-EROFS); again: h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS); if (!h) -- cgit v1.2.2 From cf78859f520f8275318f47d7864f4459d940cb6b Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Mon, 17 Jan 2011 21:21:14 +0100 Subject: xfs: Do not name variables "panic" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On platforms that call panic() inside their BUG() macro (m68k/sun3, and all platforms that don't set HAVE_ARCH_BUG), compilation fails with: | fs/xfs/support/debug.c: In function ‘xfs_cmn_err’: | fs/xfs/support/debug.c:92: error: called object ‘panic’ is not a function as the local variable "panic" conflicts with the "panic()" function. Rename the local variable to resolve this. Signed-off-by: Geert Uytterhoeven Reviewed-by: Christoph Hellwig Signed-off-by: Linus Torvalds --- fs/xfs/support/debug.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/xfs/support/debug.c b/fs/xfs/support/debug.c index e6cf955ec0fc..0df88897ef84 100644 --- a/fs/xfs/support/debug.c +++ b/fs/xfs/support/debug.c @@ -75,11 +75,11 @@ xfs_cmn_err( { struct va_format vaf; va_list args; - int panic = 0; + int do_panic = 0; if (xfs_panic_mask && (xfs_panic_mask & panic_tag)) { printk(KERN_ALERT "XFS: Transforming an alert into a BUG."); - panic = 1; + do_panic = 1; } va_start(args, fmt); @@ -89,7 +89,7 @@ xfs_cmn_err( printk(KERN_ALERT "Filesystem %s: %pV", mp->m_fsname, &vaf); va_end(args); - BUG_ON(panic); + BUG_ON(do_panic); } void -- cgit v1.2.2 From c14cc63a63e94d490ac6517a555113c30d420db4 Mon Sep 17 00:00:00 2001 From: Ian Kent Date: Tue, 18 Jan 2011 12:06:04 +0800 Subject: autofs4 - fix get_next_positive_dentry() The initialization condition in fs/autofs4/expire.c:get_next_positive_dentry() appears to be incorrect. If prev == NULL I believe that root should be returned. Further down, at the current dentry check for it being simple_positive() it looks like the d_lock for dentry p should be dropped instead of dentry ret, otherwise when p is assinged to ret we end up with no lock on p and a lost lock on ret, which leads to a deadlock. Signed-off-by: Ian Kent Signed-off-by: Al Viro --- fs/autofs4/expire.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c index 3ed79d76c233..f43100b9662b 100644 --- a/fs/autofs4/expire.c +++ b/fs/autofs4/expire.c @@ -96,7 +96,7 @@ static struct dentry *get_next_positive_dentry(struct dentry *prev, struct dentry *p, *ret; if (prev == NULL) - return dget(prev); + return dget(root); spin_lock(&autofs4_lock); relock: @@ -133,7 +133,7 @@ again: spin_lock_nested(&ret->d_lock, DENTRY_D_LOCK_NESTED); /* Negative dentry - try next */ if (!simple_positive(ret)) { - spin_unlock(&ret->d_lock); + spin_unlock(&p->d_lock); p = ret; goto again; } -- cgit v1.2.2 From 8931221411f9ff950de8fd686dc5ab881394cb9a Mon Sep 17 00:00:00 2001 From: Ian Kent Date: Tue, 18 Jan 2011 12:06:10 +0800 Subject: vfs - fix dentry ref count in do_lookup() There is a ref count problem in fs/namei.c:do_lookup(). When walking in ref-walk mode, if follow_managed() returns a fail we need to drop dentry and possibly vfsmount. Clean up properly, as we do in the other caller of follow_managed(). Signed-off-by: Ian Kent Signed-off-by: Al Viro --- fs/namei.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index b753192d8c3f..7d77f24d32a9 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1272,8 +1272,10 @@ done: path->mnt = mnt; path->dentry = dentry; err = follow_managed(path, nd->flags); - if (unlikely(err < 0)) + if (unlikely(err < 0)) { + path_put_conditional(path, nd); return err; + } *inode = path->dentry->d_inode; return 0; -- cgit v1.2.2 From c0bcc9d55252012805300ca01b9b7a143b4daf85 Mon Sep 17 00:00:00 2001 From: Ian Kent Date: Tue, 18 Jan 2011 12:06:15 +0800 Subject: autofs4 - fix debug print in autofs4_lookup() oz_mode isn't defined any more, use autofs4_oz_mode(sbi) instead. Signed-off-by: Ian Kent Signed-off-by: Al Viro --- fs/autofs4/root.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c index 1dba035fc376..427129ab5292 100644 --- a/fs/autofs4/root.c +++ b/fs/autofs4/root.c @@ -488,7 +488,8 @@ static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, s sbi = autofs4_sbi(dir->i_sb); DPRINTK("pid = %u, pgrp = %u, catatonic = %d, oz_mode = %d", - current->pid, task_pgrp_nr(current), sbi->catatonic, oz_mode); + current->pid, task_pgrp_nr(current), sbi->catatonic, + autofs4_oz_mode(sbi)); active = autofs4_lookup_active(dentry); if (active) { -- cgit v1.2.2 From 292c5ee802e9b969b84ee671a5e3001d94230f5b Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 17 Jan 2011 00:47:38 -0500 Subject: autofs4: keep symlink body in inode->i_private gets rid of all ->free()/->u.symlink machinery in autofs; we simply keep symlink bodies in inode->i_private and free them in ->evict_inode(). Acked-by: Ian Kent Signed-off-by: Al Viro --- fs/autofs4/autofs_i.h | 5 ----- fs/autofs4/inode.c | 27 +++++++-------------------- fs/autofs4/root.c | 2 +- fs/autofs4/symlink.c | 3 +-- 4 files changed, 9 insertions(+), 28 deletions(-) (limited to 'fs') diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h index 1f016bfb42d5..99a4af8d9c83 100644 --- a/fs/autofs4/autofs_i.h +++ b/fs/autofs4/autofs_i.h @@ -91,11 +91,6 @@ struct autofs_info { mode_t mode; size_t size; - - void (*free)(struct autofs_info *); - union { - const char *symlink; - } u; }; #define AUTOFS_INF_EXPIRING (1<<0) /* dentry is in the process of expiring */ diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c index 9e1a9dad23e1..cf8abc793d50 100644 --- a/fs/autofs4/inode.c +++ b/fs/autofs4/inode.c @@ -22,14 +22,6 @@ #include "autofs_i.h" #include -static void ino_lnkfree(struct autofs_info *ino) -{ - if (ino->u.symlink) { - kfree(ino->u.symlink); - ino->u.symlink = NULL; - } -} - struct autofs_info *autofs4_init_ino(struct autofs_info *ino, struct autofs_sb_info *sbi, mode_t mode) { @@ -60,16 +52,6 @@ struct autofs_info *autofs4_init_ino(struct autofs_info *ino, ino->sbi = sbi; - if (reinit && ino->free) - (ino->free)(ino); - - memset(&ino->u, 0, sizeof(ino->u)); - - ino->free = NULL; - - if (S_ISLNK(mode)) - ino->free = ino_lnkfree; - return ino; } @@ -79,8 +61,6 @@ void autofs4_free_ino(struct autofs_info *ino) ino->dentry->d_fsdata = NULL; ino->dentry = NULL; } - if (ino->free) - (ino->free)(ino); kfree(ino); } @@ -136,9 +116,16 @@ static int autofs4_show_options(struct seq_file *m, struct vfsmount *mnt) return 0; } +static void autofs4_evict_inode(struct inode *inode) +{ + end_writeback(inode); + kfree(inode->i_private); +} + static const struct super_operations autofs4_sops = { .statfs = simple_statfs, .show_options = autofs4_show_options, + .evict_inode = autofs4_evict_inode, }; enum {Opt_err, Opt_fd, Opt_uid, Opt_gid, Opt_pgrp, Opt_minproto, Opt_maxproto, diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c index 427129ab5292..f47aceabf58f 100644 --- a/fs/autofs4/root.c +++ b/fs/autofs4/root.c @@ -561,6 +561,7 @@ static int autofs4_dir_symlink(struct inode *dir, kfree(ino); return -ENOMEM; } + inode->i_private = cp; d_add(dentry, inode); dentry->d_fsdata = ino; @@ -570,7 +571,6 @@ static int autofs4_dir_symlink(struct inode *dir, if (p_ino && dentry->d_parent != dentry) atomic_inc(&p_ino->count); - ino->u.symlink = cp; dir->i_mtime = CURRENT_TIME; return 0; diff --git a/fs/autofs4/symlink.c b/fs/autofs4/symlink.c index b4ea82934d2e..f27c094a1919 100644 --- a/fs/autofs4/symlink.c +++ b/fs/autofs4/symlink.c @@ -14,8 +14,7 @@ static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd) { - struct autofs_info *ino = autofs4_dentry_ino(dentry); - nd_set_link(nd, (char *)ino->u.symlink); + nd_set_link(nd, dentry->d_inode->i_private); return NULL; } -- cgit v1.2.2 From 14a2f00bde7668fe18d1c8355d26c7c96961e1f7 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 16 Jan 2011 17:13:33 -0500 Subject: autofs4: autofs4_mkroot() is not different from autofs4_init_ino() Kill it. Mind you, it's been an obfuscated call of autofs4_init_ino() ever since 2.3.99pre6-4... Acked-by: Ian Kent Signed-off-by: Al Viro --- fs/autofs4/inode.c | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c index cf8abc793d50..7421b47b1bb9 100644 --- a/fs/autofs4/inode.c +++ b/fs/autofs4/inode.c @@ -215,17 +215,6 @@ static int parse_options(char *options, int *pipefd, uid_t *uid, gid_t *gid, return (*pipefd < 0); } -static struct autofs_info *autofs4_mkroot(struct autofs_sb_info *sbi) -{ - struct autofs_info *ino; - - ino = autofs4_init_ino(NULL, sbi, S_IFDIR | 0755); - if (!ino) - return NULL; - - return ino; -} - int autofs4_fill_super(struct super_block *s, void *data, int silent) { struct inode * root_inode; @@ -269,7 +258,7 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent) /* * Get the root inode and dentry, but defer checking for errors. */ - ino = autofs4_mkroot(sbi); + ino = autofs4_init_ino(NULL, sbi, S_IFDIR | 0755); if (!ino) goto fail_free; root_inode = autofs4_get_inode(s, ino); -- cgit v1.2.2 From 09f12c03fa699ce7d030c47add60577138927d4f Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 16 Jan 2011 17:20:23 -0500 Subject: autofs4: pass mode to autofs4_get_inode() explicitly In all cases we'd set inf->mode to know value just before passing it to autofs4_get_inode(). That kills the need to store it in autofs_info and pass it to autofs_init_ino() Acked-by: Ian Kent Signed-off-by: Al Viro --- fs/autofs4/autofs_i.h | 5 ++--- fs/autofs4/inode.c | 16 ++++++++-------- fs/autofs4/root.c | 10 +++++----- 3 files changed, 15 insertions(+), 16 deletions(-) (limited to 'fs') diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h index 99a4af8d9c83..c6d66db67ff1 100644 --- a/fs/autofs4/autofs_i.h +++ b/fs/autofs4/autofs_i.h @@ -89,7 +89,6 @@ struct autofs_info { uid_t uid; gid_t gid; - mode_t mode; size_t size; }; @@ -170,7 +169,7 @@ static inline int autofs4_ispending(struct dentry *dentry) return 0; } -struct inode *autofs4_get_inode(struct super_block *, struct autofs_info *); +struct inode *autofs4_get_inode(struct super_block *, struct autofs_info *, mode_t); void autofs4_free_ino(struct autofs_info *); /* Expiration */ @@ -280,7 +279,7 @@ static inline void managed_dentry_clear_managed(struct dentry *dentry) /* Initializing function */ int autofs4_fill_super(struct super_block *, void *, int); -struct autofs_info *autofs4_init_ino(struct autofs_info *, struct autofs_sb_info *sbi, mode_t mode); +struct autofs_info *autofs4_init_ino(struct autofs_info *, struct autofs_sb_info *sbi); /* Queue management functions */ diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c index 7421b47b1bb9..6b6f43f00c46 100644 --- a/fs/autofs4/inode.c +++ b/fs/autofs4/inode.c @@ -23,7 +23,7 @@ #include struct autofs_info *autofs4_init_ino(struct autofs_info *ino, - struct autofs_sb_info *sbi, mode_t mode) + struct autofs_sb_info *sbi) { int reinit = 1; @@ -47,7 +47,6 @@ struct autofs_info *autofs4_init_ino(struct autofs_info *ino, ino->uid = 0; ino->gid = 0; - ino->mode = mode; ino->last_used = jiffies; ino->sbi = sbi; @@ -258,10 +257,10 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent) /* * Get the root inode and dentry, but defer checking for errors. */ - ino = autofs4_init_ino(NULL, sbi, S_IFDIR | 0755); + ino = autofs4_init_ino(NULL, sbi); if (!ino) goto fail_free; - root_inode = autofs4_get_inode(s, ino); + root_inode = autofs4_get_inode(s, ino, S_IFDIR | 0755); if (!root_inode) goto fail_ino; @@ -345,14 +344,15 @@ fail_unlock: } struct inode *autofs4_get_inode(struct super_block *sb, - struct autofs_info *inf) + struct autofs_info *inf, + mode_t mode) { struct inode *inode = new_inode(sb); if (inode == NULL) return NULL; - inode->i_mode = inf->mode; + inode->i_mode = mode; if (sb->s_root) { inode->i_uid = sb->s_root->d_inode->i_uid; inode->i_gid = sb->s_root->d_inode->i_gid; @@ -360,11 +360,11 @@ struct inode *autofs4_get_inode(struct super_block *sb, inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; inode->i_ino = get_next_ino(); - if (S_ISDIR(inf->mode)) { + if (S_ISDIR(mode)) { inode->i_nlink = 2; inode->i_op = &autofs4_dir_inode_operations; inode->i_fop = &autofs4_dir_operations; - } else if (S_ISLNK(inf->mode)) { + } else if (S_ISLNK(mode)) { inode->i_size = inf->size; inode->i_op = &autofs4_symlink_inode_operations; } diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c index f47aceabf58f..e55dcdbeb450 100644 --- a/fs/autofs4/root.c +++ b/fs/autofs4/root.c @@ -508,7 +508,7 @@ static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, s if (autofs_type_indirect(sbi->type) && IS_ROOT(dentry->d_parent)) __managed_dentry_set_managed(dentry); - ino = autofs4_init_ino(NULL, sbi, 0555); + ino = autofs4_init_ino(NULL, sbi); if (!ino) return ERR_PTR(-ENOMEM); @@ -538,7 +538,7 @@ static int autofs4_dir_symlink(struct inode *dir, if (!autofs4_oz_mode(sbi)) return -EACCES; - ino = autofs4_init_ino(ino, sbi, S_IFLNK | 0555); + ino = autofs4_init_ino(ino, sbi); if (!ino) return -ENOMEM; @@ -554,7 +554,7 @@ static int autofs4_dir_symlink(struct inode *dir, strcpy(cp, symname); - inode = autofs4_get_inode(dir->i_sb, ino); + inode = autofs4_get_inode(dir->i_sb, ino, S_IFLNK | 0555); if (!inode) { kfree(cp); if (!dentry->d_fsdata) @@ -733,13 +733,13 @@ static int autofs4_dir_mkdir(struct inode *dir, struct dentry *dentry, int mode) DPRINTK("dentry %p, creating %.*s", dentry, dentry->d_name.len, dentry->d_name.name); - ino = autofs4_init_ino(ino, sbi, S_IFDIR | 0555); + ino = autofs4_init_ino(ino, sbi); if (!ino) return -ENOMEM; autofs4_del_active(dentry); - inode = autofs4_get_inode(dir->i_sb, ino); + inode = autofs4_get_inode(dir->i_sb, ino, S_IFDIR | 0555); if (!inode) { if (!dentry->d_fsdata) kfree(ino); -- cgit v1.2.2 From 0bf71d4d005176f6b6587ba64a377f9798213f21 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 16 Jan 2011 17:39:15 -0500 Subject: autofs4: kill ->size in autofs_info It's used only to pass the length of symlink body to autofs4_get_inode() in autofs4_dir_symlink(). We can bloody well set inode->i_size in autofs4_dir_symlink() directly and be done with that. Acked-by: Ian Kent Signed-off-by: Al Viro --- fs/autofs4/autofs_i.h | 2 -- fs/autofs4/inode.c | 2 -- fs/autofs4/root.c | 5 +++-- 3 files changed, 3 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h index c6d66db67ff1..0925bacb5c3c 100644 --- a/fs/autofs4/autofs_i.h +++ b/fs/autofs4/autofs_i.h @@ -88,8 +88,6 @@ struct autofs_info { uid_t uid; gid_t gid; - - size_t size; }; #define AUTOFS_INF_EXPIRING (1<<0) /* dentry is in the process of expiring */ diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c index 6b6f43f00c46..ac1a99ce820b 100644 --- a/fs/autofs4/inode.c +++ b/fs/autofs4/inode.c @@ -38,7 +38,6 @@ struct autofs_info *autofs4_init_ino(struct autofs_info *ino, if (!reinit) { ino->flags = 0; ino->dentry = NULL; - ino->size = 0; INIT_LIST_HEAD(&ino->active); ino->active_count = 0; INIT_LIST_HEAD(&ino->expiring); @@ -365,7 +364,6 @@ struct inode *autofs4_get_inode(struct super_block *sb, inode->i_op = &autofs4_dir_inode_operations; inode->i_fop = &autofs4_dir_operations; } else if (S_ISLNK(mode)) { - inode->i_size = inf->size; inode->i_op = &autofs4_symlink_inode_operations; } diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c index e55dcdbeb450..1ad3c6ca9b03 100644 --- a/fs/autofs4/root.c +++ b/fs/autofs4/root.c @@ -530,6 +530,7 @@ static int autofs4_dir_symlink(struct inode *dir, struct autofs_info *ino = autofs4_dentry_ino(dentry); struct autofs_info *p_ino; struct inode *inode; + size_t size = strlen(symname); char *cp; DPRINTK("%s <- %.*s", symname, @@ -544,8 +545,7 @@ static int autofs4_dir_symlink(struct inode *dir, autofs4_del_active(dentry); - ino->size = strlen(symname); - cp = kmalloc(ino->size + 1, GFP_KERNEL); + cp = kmalloc(size + 1, GFP_KERNEL); if (!cp) { if (!dentry->d_fsdata) kfree(ino); @@ -562,6 +562,7 @@ static int autofs4_dir_symlink(struct inode *dir, return -ENOMEM; } inode->i_private = cp; + inode->i_size = size; d_add(dentry, inode); dentry->d_fsdata = ino; -- cgit v1.2.2 From 726a5e0688fd344110d8f2979d87f243a4ba1a48 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 16 Jan 2011 17:43:52 -0500 Subject: autofs4: autofs4_get_inode() doesn't need autofs_info * argument anymore Acked-by: Ian Kent Signed-off-by: Al Viro --- fs/autofs4/autofs_i.h | 2 +- fs/autofs4/inode.c | 6 ++---- fs/autofs4/root.c | 4 ++-- 3 files changed, 5 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h index 0925bacb5c3c..8f15162f1672 100644 --- a/fs/autofs4/autofs_i.h +++ b/fs/autofs4/autofs_i.h @@ -167,7 +167,7 @@ static inline int autofs4_ispending(struct dentry *dentry) return 0; } -struct inode *autofs4_get_inode(struct super_block *, struct autofs_info *, mode_t); +struct inode *autofs4_get_inode(struct super_block *, mode_t); void autofs4_free_ino(struct autofs_info *); /* Expiration */ diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c index ac1a99ce820b..b3f9477c9745 100644 --- a/fs/autofs4/inode.c +++ b/fs/autofs4/inode.c @@ -259,7 +259,7 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent) ino = autofs4_init_ino(NULL, sbi); if (!ino) goto fail_free; - root_inode = autofs4_get_inode(s, ino, S_IFDIR | 0755); + root_inode = autofs4_get_inode(s, S_IFDIR | 0755); if (!root_inode) goto fail_ino; @@ -342,9 +342,7 @@ fail_unlock: return -EINVAL; } -struct inode *autofs4_get_inode(struct super_block *sb, - struct autofs_info *inf, - mode_t mode) +struct inode *autofs4_get_inode(struct super_block *sb, mode_t mode) { struct inode *inode = new_inode(sb); diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c index 1ad3c6ca9b03..83e5379c5ade 100644 --- a/fs/autofs4/root.c +++ b/fs/autofs4/root.c @@ -554,7 +554,7 @@ static int autofs4_dir_symlink(struct inode *dir, strcpy(cp, symname); - inode = autofs4_get_inode(dir->i_sb, ino, S_IFLNK | 0555); + inode = autofs4_get_inode(dir->i_sb, S_IFLNK | 0555); if (!inode) { kfree(cp); if (!dentry->d_fsdata) @@ -740,7 +740,7 @@ static int autofs4_dir_mkdir(struct inode *dir, struct dentry *dentry, int mode) autofs4_del_active(dentry); - inode = autofs4_get_inode(dir->i_sb, ino, S_IFDIR | 0555); + inode = autofs4_get_inode(dir->i_sb, S_IFDIR | 0555); if (!inode) { if (!dentry->d_fsdata) kfree(ino); -- cgit v1.2.2 From 5a37db302e698a83209eff22ca8f3fd05eb1d84b Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 16 Jan 2011 18:29:35 -0500 Subject: autofs4: mkdir and symlink always get a dentry that had passed lookup ... so ->d_fsdata will have been set up before we get there Acked-by: Ian Kent Signed-off-by: Al Viro --- fs/autofs4/root.c | 28 ++++++++++------------------ 1 file changed, 10 insertions(+), 18 deletions(-) (limited to 'fs') diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c index 83e5379c5ade..a5b93e8f49b5 100644 --- a/fs/autofs4/root.c +++ b/fs/autofs4/root.c @@ -539,18 +539,15 @@ static int autofs4_dir_symlink(struct inode *dir, if (!autofs4_oz_mode(sbi)) return -EACCES; - ino = autofs4_init_ino(ino, sbi); - if (!ino) - return -ENOMEM; + BUG_ON(!ino); + + autofs4_init_ino(ino, sbi); autofs4_del_active(dentry); cp = kmalloc(size + 1, GFP_KERNEL); - if (!cp) { - if (!dentry->d_fsdata) - kfree(ino); + if (!cp) return -ENOMEM; - } strcpy(cp, symname); @@ -565,8 +562,7 @@ static int autofs4_dir_symlink(struct inode *dir, inode->i_size = size; d_add(dentry, inode); - dentry->d_fsdata = ino; - ino->dentry = dget(dentry); + dget(dentry); atomic_inc(&ino->count); p_ino = autofs4_dentry_ino(dentry->d_parent); if (p_ino && dentry->d_parent != dentry) @@ -734,25 +730,21 @@ static int autofs4_dir_mkdir(struct inode *dir, struct dentry *dentry, int mode) DPRINTK("dentry %p, creating %.*s", dentry, dentry->d_name.len, dentry->d_name.name); - ino = autofs4_init_ino(ino, sbi); - if (!ino) - return -ENOMEM; + BUG_ON(!ino); + + autofs4_init_ino(ino, sbi); autofs4_del_active(dentry); inode = autofs4_get_inode(dir->i_sb, S_IFDIR | 0555); - if (!inode) { - if (!dentry->d_fsdata) - kfree(ino); + if (!inode) return -ENOMEM; - } d_add(dentry, inode); if (sbi->version < 5) autofs_set_leaf_automount_flags(dentry); - dentry->d_fsdata = ino; - ino->dentry = dget(dentry); + dget(dentry); atomic_inc(&ino->count); p_ino = autofs4_dentry_ino(dentry->d_parent); if (p_ino && dentry->d_parent != dentry) -- cgit v1.2.2 From 26e6c910670171410577c7df2aebe94cef76e150 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 16 Jan 2011 18:43:40 -0500 Subject: autofs4: split autofs4_init_ino() split init_ino into new_ino and clean_ino; the former is what used to be init_ino(NULL, sbi), the latter is for cases where we passed non-NULL ino. Lose unused arguments. Acked-by: Ian Kent Signed-off-by: Al Viro --- fs/autofs4/autofs_i.h | 3 ++- fs/autofs4/inode.c | 32 ++++++++++---------------------- fs/autofs4/root.c | 6 +++--- 3 files changed, 15 insertions(+), 26 deletions(-) (limited to 'fs') diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h index 8f15162f1672..bfa0c6e542f2 100644 --- a/fs/autofs4/autofs_i.h +++ b/fs/autofs4/autofs_i.h @@ -277,7 +277,8 @@ static inline void managed_dentry_clear_managed(struct dentry *dentry) /* Initializing function */ int autofs4_fill_super(struct super_block *, void *, int); -struct autofs_info *autofs4_init_ino(struct autofs_info *, struct autofs_sb_info *sbi); +struct autofs_info *autofs4_new_ino(struct autofs_sb_info *); +void autofs4_clean_ino(struct autofs_info *); /* Queue management functions */ diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c index b3f9477c9745..0df0c7c46fa2 100644 --- a/fs/autofs4/inode.c +++ b/fs/autofs4/inode.c @@ -22,35 +22,23 @@ #include "autofs_i.h" #include -struct autofs_info *autofs4_init_ino(struct autofs_info *ino, - struct autofs_sb_info *sbi) +struct autofs_info *autofs4_new_ino(struct autofs_sb_info *sbi) { - int reinit = 1; - - if (ino == NULL) { - reinit = 0; - ino = kmalloc(sizeof(*ino), GFP_KERNEL); - } - - if (ino == NULL) - return NULL; - - if (!reinit) { - ino->flags = 0; - ino->dentry = NULL; + struct autofs_info *ino = kzalloc(sizeof(*ino), GFP_KERNEL); + if (ino) { INIT_LIST_HEAD(&ino->active); - ino->active_count = 0; INIT_LIST_HEAD(&ino->expiring); - atomic_set(&ino->count, 0); + ino->last_used = jiffies; + ino->sbi = sbi; } + return ino; +} +void autofs4_clean_ino(struct autofs_info *ino) +{ ino->uid = 0; ino->gid = 0; ino->last_used = jiffies; - - ino->sbi = sbi; - - return ino; } void autofs4_free_ino(struct autofs_info *ino) @@ -256,7 +244,7 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent) /* * Get the root inode and dentry, but defer checking for errors. */ - ino = autofs4_init_ino(NULL, sbi); + ino = autofs4_new_ino(sbi); if (!ino) goto fail_free; root_inode = autofs4_get_inode(s, S_IFDIR | 0755); diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c index a5b93e8f49b5..f7c97c084c1f 100644 --- a/fs/autofs4/root.c +++ b/fs/autofs4/root.c @@ -508,7 +508,7 @@ static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, s if (autofs_type_indirect(sbi->type) && IS_ROOT(dentry->d_parent)) __managed_dentry_set_managed(dentry); - ino = autofs4_init_ino(NULL, sbi); + ino = autofs4_new_ino(sbi); if (!ino) return ERR_PTR(-ENOMEM); @@ -541,7 +541,7 @@ static int autofs4_dir_symlink(struct inode *dir, BUG_ON(!ino); - autofs4_init_ino(ino, sbi); + autofs4_clean_ino(ino); autofs4_del_active(dentry); @@ -732,7 +732,7 @@ static int autofs4_dir_mkdir(struct inode *dir, struct dentry *dentry, int mode) BUG_ON(!ino); - autofs4_init_ino(ino, sbi); + autofs4_clean_ino(ino); autofs4_del_active(dentry); -- cgit v1.2.2 From b89b12b46211d971d75e5ca8249817bc9e11c453 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 16 Jan 2011 21:42:32 -0500 Subject: autofs4: clean ->d_release() and autofs4_free_ino() up The latter is called only when both ino and dentry are about to be freed, so cleaning ->d_fsdata and ->dentry is pointless. Acked-by: Ian Kent Signed-off-by: Al Viro --- fs/autofs4/autofs_i.h | 1 - fs/autofs4/inode.c | 4 ---- fs/autofs4/root.c | 30 ++++++++++++++++-------------- 3 files changed, 16 insertions(+), 19 deletions(-) (limited to 'fs') diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h index bfa0c6e542f2..54f923792728 100644 --- a/fs/autofs4/autofs_i.h +++ b/fs/autofs4/autofs_i.h @@ -338,5 +338,4 @@ static inline void autofs4_del_expiring(struct dentry *dentry) return; } -void autofs4_dentry_release(struct dentry *); extern void autofs4_kill_sb(struct super_block *); diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c index 0df0c7c46fa2..180fa2425e49 100644 --- a/fs/autofs4/inode.c +++ b/fs/autofs4/inode.c @@ -43,10 +43,6 @@ void autofs4_clean_ino(struct autofs_info *ino) void autofs4_free_ino(struct autofs_info *ino) { - if (ino->dentry) { - ino->dentry->d_fsdata = NULL; - ino->dentry = NULL; - } kfree(ino); } diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c index f7c97c084c1f..014e7aba3b08 100644 --- a/fs/autofs4/root.c +++ b/fs/autofs4/root.c @@ -37,6 +37,7 @@ static int autofs4_dir_open(struct inode *inode, struct file *file); static struct dentry *autofs4_lookup(struct inode *,struct dentry *, struct nameidata *); static struct vfsmount *autofs4_d_automount(struct path *); static int autofs4_d_manage(struct dentry *, bool, bool); +static void autofs4_dentry_release(struct dentry *); const struct file_operations autofs4_root_operations = { .open = dcache_dir_open, @@ -138,25 +139,26 @@ out: return dcache_dir_open(inode, file); } -void autofs4_dentry_release(struct dentry *de) +static void autofs4_dentry_release(struct dentry *de) { - struct autofs_info *inf; + struct autofs_info *ino = autofs4_dentry_ino(de); + struct autofs_sb_info *sbi = autofs4_sbi(de->d_sb); DPRINTK("releasing %p", de); - inf = autofs4_dentry_ino(de); - if (inf) { - struct autofs_sb_info *sbi = autofs4_sbi(de->d_sb); - if (sbi) { - spin_lock(&sbi->lookup_lock); - if (!list_empty(&inf->active)) - list_del(&inf->active); - if (!list_empty(&inf->expiring)) - list_del(&inf->expiring); - spin_unlock(&sbi->lookup_lock); - } - autofs4_free_ino(inf); + if (!ino) + return; + + if (sbi) { + spin_lock(&sbi->lookup_lock); + if (!list_empty(&ino->active)) + list_del(&ino->active); + if (!list_empty(&ino->expiring)) + list_del(&ino->expiring); + spin_unlock(&sbi->lookup_lock); } + + autofs4_free_ino(ino); } static struct dentry *autofs4_lookup_active(struct dentry *dentry) -- cgit v1.2.2 From 23c3010808de86f21436eb822aacfa551bfc17e4 Mon Sep 17 00:00:00 2001 From: Benjamin Marzinski Date: Fri, 14 Jan 2011 22:39:16 -0600 Subject: GFS2: remove iopen glocks from cache on failed deletes When a file gets deleted on GFS2, if a node can't get an exclusive lock on the file's iopen glock, it punts on actually freeing up the space, because another node is using the file. When it does this, it needs to drop the iopen glock from its cache so that the other node can get an exclusive lock on it. Now, gfs2_delete_inode() sets GL_NOCACHE before dropping the shared lock on the iopen glock in preparation for grabbing it in the exclusive state. Since the node needs the glock in the exclusive state, dropping the shared lock from the cache doesn't slow down the case where no other nodes are using the file. Signed-off-by: Benjamin Marzinski Signed-off-by: Steven Whitehouse --- fs/gfs2/super.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index 16c2ecac7eb7..ec73ed70bae1 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c @@ -1336,6 +1336,7 @@ static void gfs2_evict_inode(struct inode *inode) if (error) goto out_truncate; + ip->i_iopen_gh.gh_flags |= GL_NOCACHE; gfs2_glock_dq_wait(&ip->i_iopen_gh); gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, &ip->i_iopen_gh); error = gfs2_glock_nq(&ip->i_iopen_gh); -- cgit v1.2.2 From 24d9765fc18c7838ccdbb0d71fb706321d9b824c Mon Sep 17 00:00:00 2001 From: Steven Whitehouse Date: Tue, 18 Jan 2011 14:49:08 +0000 Subject: GFS2: Fix error path in gfs2_lookup_by_inum() In the (impossible, except if there is fs corruption) error path in gfs2_lookup_by_inum() if the call to gfs2_inode_refresh() fails, it was leaving the function by calling iput() rather than iget_failed(). This would cause future lookups of the same inode to block forever. This patch fixes the problem by moving the call to gfs2_inode_refresh() into gfs2_inode_lookup() where iget_failed() is part of the error path already. Also this cleans up some unreachable code and makes gfs2_set_iop() static. Signed-off-by: Steven Whitehouse --- fs/gfs2/inode.c | 72 ++++++++++++++++++--------------------------------------- fs/gfs2/inode.h | 1 - 2 files changed, 22 insertions(+), 51 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index 2232b3c780bd..7aa7d4f8984a 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c @@ -74,16 +74,14 @@ static struct inode *gfs2_iget(struct super_block *sb, u64 no_addr) } /** - * GFS2 lookup code fills in vfs inode contents based on info obtained - * from directory entry inside gfs2_inode_lookup(). This has caused issues - * with NFS code path since its get_dentry routine doesn't have the relevant - * directory entry when gfs2_inode_lookup() is invoked. Part of the code - * segment inside gfs2_inode_lookup code needs to get moved around. + * gfs2_set_iop - Sets inode operations + * @inode: The inode with correct i_mode filled in * - * Clears I_NEW as well. - **/ + * GFS2 lookup code fills in vfs inode contents based on info obtained + * from directory entry inside gfs2_inode_lookup(). + */ -void gfs2_set_iop(struct inode *inode) +static void gfs2_set_iop(struct inode *inode) { struct gfs2_sbd *sdp = GFS2_SB(inode); umode_t mode = inode->i_mode; @@ -106,8 +104,6 @@ void gfs2_set_iop(struct inode *inode) inode->i_op = &gfs2_file_iops; init_special_inode(inode, inode->i_mode, inode->i_rdev); } - - unlock_new_inode(inode); } /** @@ -119,10 +115,8 @@ void gfs2_set_iop(struct inode *inode) * Returns: A VFS inode, or an error */ -struct inode *gfs2_inode_lookup(struct super_block *sb, - unsigned int type, - u64 no_addr, - u64 no_formal_ino) +struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type, + u64 no_addr, u64 no_formal_ino) { struct inode *inode; struct gfs2_inode *ip; @@ -152,51 +146,37 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh); if (unlikely(error)) goto fail_iopen; - ip->i_iopen_gh.gh_gl->gl_object = ip; + ip->i_iopen_gh.gh_gl->gl_object = ip; gfs2_glock_put(io_gl); io_gl = NULL; - if ((type == DT_UNKNOWN) && (no_formal_ino == 0)) - goto gfs2_nfsbypass; - - inode->i_mode = DT2IF(type); - - /* - * We must read the inode in order to work out its type in - * this case. Note that this doesn't happen often as we normally - * know the type beforehand. This code path only occurs during - * unlinked inode recovery (where it is safe to do this glock, - * which is not true in the general case). - */ if (type == DT_UNKNOWN) { - struct gfs2_holder gh; - error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); - if (unlikely(error)) - goto fail_glock; - /* Inode is now uptodate */ - gfs2_glock_dq_uninit(&gh); + /* Inode glock must be locked already */ + error = gfs2_inode_refresh(GFS2_I(inode)); + if (error) + goto fail_refresh; + } else { + inode->i_mode = DT2IF(type); } gfs2_set_iop(inode); + unlock_new_inode(inode); } -gfs2_nfsbypass: return inode; -fail_glock: - gfs2_glock_dq(&ip->i_iopen_gh); + +fail_refresh: + ip->i_iopen_gh.gh_gl->gl_object = NULL; + gfs2_glock_dq_uninit(&ip->i_iopen_gh); fail_iopen: if (io_gl) gfs2_glock_put(io_gl); fail_put: - if (inode->i_state & I_NEW) - ip->i_gl->gl_object = NULL; + ip->i_gl->gl_object = NULL; gfs2_glock_put(ip->i_gl); fail: - if (inode->i_state & I_NEW) - iget_failed(inode); - else - iput(inode); + iget_failed(inode); return ERR_PTR(error); } @@ -221,14 +201,6 @@ struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr, if (IS_ERR(inode)) goto fail; - error = gfs2_inode_refresh(GFS2_I(inode)); - if (error) - goto fail_iput; - - /* Pick up the works we bypass in gfs2_inode_lookup */ - if (inode->i_state & I_NEW) - gfs2_set_iop(inode); - /* Two extra checks for NFS only */ if (no_formal_ino) { error = -ESTALE; diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h index 732a183efdb3..3e00a66e7cbd 100644 --- a/fs/gfs2/inode.h +++ b/fs/gfs2/inode.h @@ -96,7 +96,6 @@ err: return -EIO; } -extern void gfs2_set_iop(struct inode *inode); extern struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type, u64 no_addr, u64 no_formal_ino); extern struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr, -- cgit v1.2.2 From 50aac4fec503960380ab594a93a6fbfdf3f8915f Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 18 Jan 2011 07:59:40 -0800 Subject: ceph: fix cap_wanted_delay_{min,max} mount option initialization These were initialized to 0 instead of the default, fallout from the RBD refactor in 3d14c5d2b6e15c21d8e5467dc62d33127c23a644. Signed-off-by: Sage Weil --- fs/ceph/super.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs') diff --git a/fs/ceph/super.c b/fs/ceph/super.c index bf6f0f34082a..9c5085465a63 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -290,6 +290,8 @@ static int parse_mount_options(struct ceph_mount_options **pfsopt, fsopt->rsize = CEPH_MOUNT_RSIZE_DEFAULT; fsopt->snapdir_name = kstrdup(CEPH_SNAPDIRNAME_DEFAULT, GFP_KERNEL); + fsopt->caps_wanted_delay_min = CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT; + fsopt->caps_wanted_delay_max = CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT; fsopt->cap_release_safety = CEPH_CAP_RELEASE_SAFETY_DEFAULT; fsopt->max_readdir = CEPH_MAX_READDIR_DEFAULT; fsopt->max_readdir_bytes = CEPH_MAX_READDIR_BYTES_DEFAULT; -- cgit v1.2.2 From 24be0c481067560b11441e794e27f166a3568863 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 18 Jan 2011 08:48:06 -0800 Subject: ceph: fix erroneous cap flush to non-auth mds The int flushing is global and not clear on each iteration of the loop, which can cause a second flush of caps to any MDSs with ids greater than the auth. Signed-off-by: Sage Weil --- fs/ceph/caps.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs') diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 60d27bc9eb83..f654c7e933ac 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -1658,6 +1658,8 @@ ack: if (cap == ci->i_auth_cap && ci->i_dirty_caps) flushing = __mark_caps_flushing(inode, session); + else + flushing = 0; mds = cap->mds; /* remember mds, so we don't repeat */ sent++; -- cgit v1.2.2 From 088b3f5e9ee2649f5cfc2f08d8ce654e3eeba310 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 18 Jan 2011 08:56:01 -0800 Subject: ceph: fix flushing of caps vs cap import If we are mid-flush and a cap is migrated to another node, we need to resend the cap flush message to the new MDS, and do so with the original flush_seq to avoid leaking across a sync boundary. Previously we didn't redo the flush (we only flushed newly dirty data), which would cause a later sync to hang forever. Signed-off-by: Sage Weil --- fs/ceph/caps.c | 38 ++++++++++++++++++++++++++++++++++---- 1 file changed, 34 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index f654c7e933ac..7def3f5903dd 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -1560,9 +1560,10 @@ retry_locked: /* NOTE: no side-effects allowed, until we take s_mutex */ revoking = cap->implemented & ~cap->issued; - if (revoking) - dout(" mds%d revoking %s\n", cap->mds, - ceph_cap_string(revoking)); + dout(" mds%d cap %p issued %s implemented %s revoking %s\n", + cap->mds, cap, ceph_cap_string(cap->issued), + ceph_cap_string(cap->implemented), + ceph_cap_string(revoking)); if (cap == ci->i_auth_cap && (cap->issued & CEPH_CAP_FILE_WR)) { @@ -1942,6 +1943,35 @@ void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc, } } +static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc, + struct ceph_mds_session *session, + struct inode *inode) +{ + struct ceph_inode_info *ci = ceph_inode(inode); + struct ceph_cap *cap; + int delayed = 0; + + spin_lock(&inode->i_lock); + cap = ci->i_auth_cap; + dout("kick_flushing_inode_caps %p flushing %s flush_seq %lld\n", inode, + ceph_cap_string(ci->i_flushing_caps), ci->i_cap_flush_seq); + __ceph_flush_snaps(ci, &session, 1); + if (ci->i_flushing_caps) { + delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, + __ceph_caps_used(ci), + __ceph_caps_wanted(ci), + cap->issued | cap->implemented, + ci->i_flushing_caps, NULL); + if (delayed) { + spin_lock(&inode->i_lock); + __cap_delay_requeue(mdsc, ci); + spin_unlock(&inode->i_lock); + } + } else { + spin_unlock(&inode->i_lock); + } +} + /* * Take references to capabilities we hold, so that we don't release @@ -2689,7 +2719,7 @@ static void handle_cap_import(struct ceph_mds_client *mdsc, ceph_add_cap(inode, session, cap_id, -1, issued, wanted, seq, mseq, realmino, CEPH_CAP_FLAG_AUTH, NULL /* no caps context */); - try_flush_caps(inode, session, NULL); + kick_flushing_inode_caps(mdsc, session, inode); up_read(&mdsc->snap_rwsem); /* make sure we re-request max_size, if necessary */ -- cgit v1.2.2 From 7e57b81c7688c762bc9e775bc83f9fc17946f527 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 18 Jan 2011 09:00:01 -0800 Subject: ceph: avoid immediate cap check after import The NODELAY flag avoids the heuristics that delay cap (issued/wanted) release. There's no reason for that after we import a cap, and it kills whatever benefit we get from those delays. Signed-off-by: Sage Weil --- fs/ceph/caps.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 7def3f5903dd..6b61ded701e1 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -2817,8 +2817,7 @@ void ceph_handle_caps(struct ceph_mds_session *session, case CEPH_CAP_OP_IMPORT: handle_cap_import(mdsc, inode, h, session, snaptrace, snaptrace_len); - ceph_check_caps(ceph_inode(inode), CHECK_CAPS_NODELAY, - session); + ceph_check_caps(ceph_inode(inode), 0, session); goto done_unlocked; } -- cgit v1.2.2 From 12fed00de963433128b5366a21a55808fab2f756 Mon Sep 17 00:00:00 2001 From: Pavel Shilovsky Date: Mon, 17 Jan 2011 20:15:44 +0300 Subject: CIFS: Fix oplock break handling (try #2) When we get oplock break notification we should set the appropriate value of OplockLevel field in oplock break acknowledge according to the oplock level held by the client in this time. As we only can have level II oplock or no oplock in the case of oplock break, we should be aware only about clientCanCacheRead field in cifsInodeInfo structure. Also fix bug connected with wrong interpretation of OplockLevel field during oplock break notification processing. Signed-off-by: Pavel Shilovsky Cc: Signed-off-by: Steve French --- fs/cifs/cifsproto.h | 2 +- fs/cifs/cifssmb.c | 4 +++- fs/cifs/file.c | 21 +++++++++++---------- fs/cifs/misc.c | 2 +- 4 files changed, 16 insertions(+), 13 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index e6d1481b16c1..95d5dbbb4c7a 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -347,7 +347,7 @@ extern int CIFSSMBLock(const int xid, struct cifsTconInfo *tcon, const __u16 netfid, const __u64 len, const __u64 offset, const __u32 numUnlock, const __u32 numLock, const __u8 lockType, - const bool waitFlag); + const bool waitFlag, const __u8 oplock_level); extern int CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon, const __u16 smb_file_id, const int get_flag, const __u64 len, struct file_lock *, diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 2f6795e524d3..3652cc60314c 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -1663,7 +1663,8 @@ int CIFSSMBLock(const int xid, struct cifsTconInfo *tcon, const __u16 smb_file_id, const __u64 len, const __u64 offset, const __u32 numUnlock, - const __u32 numLock, const __u8 lockType, const bool waitFlag) + const __u32 numLock, const __u8 lockType, + const bool waitFlag, const __u8 oplock_level) { int rc = 0; LOCK_REQ *pSMB = NULL; @@ -1691,6 +1692,7 @@ CIFSSMBLock(const int xid, struct cifsTconInfo *tcon, pSMB->NumberOfLocks = cpu_to_le16(numLock); pSMB->NumberOfUnlocks = cpu_to_le16(numUnlock); pSMB->LockType = lockType; + pSMB->OplockLevel = oplock_level; pSMB->AndXCommand = 0xFF; /* none */ pSMB->Fid = smb_file_id; /* netfid stays le */ diff --git a/fs/cifs/file.c b/fs/cifs/file.c index d843631c028d..af371910f543 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -726,12 +726,12 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) /* BB we could chain these into one lock request BB */ rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start, - 0, 1, lockType, 0 /* wait flag */ ); + 0, 1, lockType, 0 /* wait flag */, 0); if (rc == 0) { rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start, 1 /* numUnlock */ , 0 /* numLock */ , lockType, - 0 /* wait flag */ ); + 0 /* wait flag */, 0); pfLock->fl_type = F_UNLCK; if (rc != 0) cERROR(1, "Error unlocking previously locked " @@ -748,13 +748,13 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start, 0, 1, lockType | LOCKING_ANDX_SHARED_LOCK, - 0 /* wait flag */); + 0 /* wait flag */, 0); if (rc == 0) { rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start, 1, 0, lockType | LOCKING_ANDX_SHARED_LOCK, - 0 /* wait flag */); + 0 /* wait flag */, 0); pfLock->fl_type = F_RDLCK; if (rc != 0) cERROR(1, "Error unlocking " @@ -797,8 +797,8 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) if (numLock) { rc = CIFSSMBLock(xid, tcon, netfid, length, - pfLock->fl_start, - 0, numLock, lockType, wait_flag); + pfLock->fl_start, 0, numLock, lockType, + wait_flag, 0); if (rc == 0) { /* For Windows locks we must store them. */ @@ -818,9 +818,9 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) (pfLock->fl_start + length) >= (li->offset + li->length)) { stored_rc = CIFSSMBLock(xid, tcon, - netfid, - li->length, li->offset, - 1, 0, li->type, false); + netfid, li->length, + li->offset, 1, 0, + li->type, false, 0); if (stored_rc) rc = stored_rc; else { @@ -2192,7 +2192,8 @@ void cifs_oplock_break(struct work_struct *work) */ if (!cfile->oplock_break_cancelled) { rc = CIFSSMBLock(0, tlink_tcon(cfile->tlink), cfile->netfid, 0, - 0, 0, 0, LOCKING_ANDX_OPLOCK_RELEASE, false); + 0, 0, 0, LOCKING_ANDX_OPLOCK_RELEASE, false, + cinode->clientCanCacheRead ? 1 : 0); cFYI(1, "Oplock release rc = %d", rc); } diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index 43f10281bc19..09bfcf08a90f 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c @@ -571,7 +571,7 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv) pCifsInode = CIFS_I(netfile->dentry->d_inode); cifs_set_oplock_level(pCifsInode, - pSMB->OplockLevel); + pSMB->OplockLevel ? OPLOCK_READ : 0); /* * cifs_oplock_break_put() can't be called * from here. Get reference after queueing -- cgit v1.2.2 From 941b853d779de3298e39f1eb4e252984464eaea8 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Tue, 11 Jan 2011 07:24:01 -0500 Subject: cifs: don't fail writepages on -EAGAIN errors If CIFSSMBWrite2 returns -EAGAIN, then the error should be considered temporary. CIFS should retry the write instead of setting an error on the mapping and returning. For WB_SYNC_ALL, just retry the write immediately. In the WB_SYNC_NONE case, call redirty_page_for_writeback on all of the pages that didn't get written out and then move on. Also, fix up the handling of a short write with a successful return code. MS-CIFS says that 0 bytes_written means ENOSPC or EFBIG. It doesn't mention what a short, but non-zero write means, so for now treat it as we would an -EAGAIN return. Reviewed-by: Suresh Jayaraman Reviewed-by: Pavel Shilovsky Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/file.c | 49 +++++++++++++++++++++++++++++++++++++------------ 1 file changed, 37 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/cifs/file.c b/fs/cifs/file.c index af371910f543..cfa2e5ebcafe 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -1377,6 +1377,7 @@ retry: break; } if (n_iov) { +retry_write: open_file = find_writable_file(CIFS_I(mapping->host), false); if (!open_file) { @@ -1389,31 +1390,55 @@ retry: &bytes_written, iov, n_iov, long_op); cifsFileInfo_put(open_file); - cifs_update_eof(cifsi, offset, bytes_written); } - if (rc || bytes_written < bytes_to_write) { - cERROR(1, "Write2 ret %d, wrote %d", - rc, bytes_written); - mapping_set_error(mapping, rc); - } else { + cFYI(1, "Write2 rc=%d, wrote=%u", rc, bytes_written); + + /* + * For now, treat a short write as if nothing got + * written. A zero length write however indicates + * ENOSPC or EFBIG. We have no way to know which + * though, so call it ENOSPC for now. EFBIG would + * get translated to AS_EIO anyway. + * + * FIXME: make it take into account the data that did + * get written + */ + if (rc == 0) { + if (bytes_written == 0) + rc = -ENOSPC; + else if (bytes_written < bytes_to_write) + rc = -EAGAIN; + } + + /* retry on data-integrity flush */ + if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN) + goto retry_write; + + /* fix the stats and EOF */ + if (bytes_written > 0) { cifs_stats_bytes_written(tcon, bytes_written); + cifs_update_eof(cifsi, offset, bytes_written); } for (i = 0; i < n_iov; i++) { page = pvec.pages[first + i]; - /* Should we also set page error on - success rc but too little data written? */ - /* BB investigate retry logic on temporary - server crash cases and how recovery works - when page marked as error */ - if (rc) + /* on retryable write error, redirty page */ + if (rc == -EAGAIN) + redirty_page_for_writepage(wbc, page); + else if (rc != 0) SetPageError(page); kunmap(page); unlock_page(page); end_page_writeback(page); page_cache_release(page); } + + if (rc != -EAGAIN) + mapping_set_error(mapping, rc); + else + rc = 0; + if ((wbc->nr_to_write -= n_iov) <= 0) done = 1; index = next; -- cgit v1.2.2 From 9d78315b03fc91228306db42edc533efa69cb518 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Tue, 11 Jan 2011 07:24:01 -0500 Subject: cifs: no need to mark smb_ses_list as cifs_demultiplex_thread is exiting The TCP_Server_Info is refcounted and every SMB session holds a reference to it. Thus, smb_ses_list is always going to be empty when cifsd is coming down. This is dead code. Reviewed-by: Suresh Jayaraman Reviewed-by: Pavel Shilovsky Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/connect.c | 44 +++----------------------------------------- 1 file changed, 3 insertions(+), 41 deletions(-) (limited to 'fs') diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 9f59887badd2..75b538f50b12 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -346,7 +346,6 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server) struct kvec iov; struct socket *csocket = server->ssocket; struct list_head *tmp; - struct cifsSesInfo *ses; struct task_struct *task_to_wake = NULL; struct mid_q_entry *mid_entry; char temp; @@ -677,44 +676,19 @@ multi_t2_fnd: if (smallbuf) /* no sense logging a debug message if NULL */ cifs_small_buf_release(smallbuf); - /* - * BB: we shouldn't have to do any of this. It shouldn't be - * possible to exit from the thread with active SMB sessions - */ - spin_lock(&cifs_tcp_ses_lock); - if (list_empty(&server->pending_mid_q)) { - /* loop through server session structures attached to this and - mark them dead */ - list_for_each(tmp, &server->smb_ses_list) { - ses = list_entry(tmp, struct cifsSesInfo, - smb_ses_list); - ses->status = CifsExiting; - ses->server = NULL; - } - spin_unlock(&cifs_tcp_ses_lock); - } else { - /* although we can not zero the server struct pointer yet, - since there are active requests which may depnd on them, - mark the corresponding SMB sessions as exiting too */ - list_for_each(tmp, &server->smb_ses_list) { - ses = list_entry(tmp, struct cifsSesInfo, - smb_ses_list); - ses->status = CifsExiting; - } - + if (!list_empty(&server->pending_mid_q)) { spin_lock(&GlobalMid_Lock); list_for_each(tmp, &server->pending_mid_q) { - mid_entry = list_entry(tmp, struct mid_q_entry, qhead); + mid_entry = list_entry(tmp, struct mid_q_entry, qhead); if (mid_entry->midState == MID_REQUEST_SUBMITTED) { cFYI(1, "Clearing Mid 0x%x - waking up ", - mid_entry->mid); + mid_entry->mid); task_to_wake = mid_entry->tsk; if (task_to_wake) wake_up_process(task_to_wake); } } spin_unlock(&GlobalMid_Lock); - spin_unlock(&cifs_tcp_ses_lock); /* 1/8th of sec is more than enough time for them to exit */ msleep(125); } @@ -732,18 +706,6 @@ multi_t2_fnd: coming home not much else we can do but free the memory */ } - /* last chance to mark ses pointers invalid - if there are any pointing to this (e.g - if a crazy root user tried to kill cifsd - kernel thread explicitly this might happen) */ - /* BB: This shouldn't be necessary, see above */ - spin_lock(&cifs_tcp_ses_lock); - list_for_each(tmp, &server->smb_ses_list) { - ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list); - ses->server = NULL; - } - spin_unlock(&cifs_tcp_ses_lock); - kfree(server->hostname); task_to_wake = xchg(&server->tsk, NULL); kfree(server); -- cgit v1.2.2 From c5797a945cac4c470f0113fc839c521aab0d799d Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Tue, 11 Jan 2011 07:24:01 -0500 Subject: cifs: make wait_for_free_request take a TCP_Server_Info pointer The cifsSesInfo pointer is only used to get at the server. Reviewed-by: Suresh Jayaraman Reviewed-by: Pavel Shilovsky Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/transport.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) (limited to 'fs') diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 59ca81b16919..9a14f77e0ab2 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -244,31 +244,31 @@ smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer, return smb_sendv(server, &iov, 1); } -static int wait_for_free_request(struct cifsSesInfo *ses, const int long_op) +static int wait_for_free_request(struct TCP_Server_Info *server, + const int long_op) { if (long_op == CIFS_ASYNC_OP) { /* oplock breaks must not be held up */ - atomic_inc(&ses->server->inFlight); + atomic_inc(&server->inFlight); return 0; } spin_lock(&GlobalMid_Lock); while (1) { - if (atomic_read(&ses->server->inFlight) >= - cifs_max_pending){ + if (atomic_read(&server->inFlight) >= cifs_max_pending) { spin_unlock(&GlobalMid_Lock); #ifdef CONFIG_CIFS_STATS2 - atomic_inc(&ses->server->num_waiters); + atomic_inc(&server->num_waiters); #endif - wait_event(ses->server->request_q, - atomic_read(&ses->server->inFlight) + wait_event(server->request_q, + atomic_read(&server->inFlight) < cifs_max_pending); #ifdef CONFIG_CIFS_STATS2 - atomic_dec(&ses->server->num_waiters); + atomic_dec(&server->num_waiters); #endif spin_lock(&GlobalMid_Lock); } else { - if (ses->server->tcpStatus == CifsExiting) { + if (server->tcpStatus == CifsExiting) { spin_unlock(&GlobalMid_Lock); return -ENOENT; } @@ -278,7 +278,7 @@ static int wait_for_free_request(struct cifsSesInfo *ses, const int long_op) /* update # of requests on the wire to server */ if (long_op != CIFS_BLOCKING_OP) - atomic_inc(&ses->server->inFlight); + atomic_inc(&server->inFlight); spin_unlock(&GlobalMid_Lock); break; } @@ -413,7 +413,7 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, to the same server. We may make this configurable later or use ses->maxReq */ - rc = wait_for_free_request(ses, long_op); + rc = wait_for_free_request(ses->server, long_op); if (rc) { cifs_small_buf_release(in_buf); return rc; @@ -610,7 +610,7 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses, return -EIO; } - rc = wait_for_free_request(ses, long_op); + rc = wait_for_free_request(ses->server, long_op); if (rc) return rc; @@ -845,7 +845,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon, return -EIO; } - rc = wait_for_free_request(ses, CIFS_BLOCKING_OP); + rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP); if (rc) return rc; -- cgit v1.2.2 From 8097531a5cb55c6472118da094dc88caf9be66ac Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Tue, 11 Jan 2011 07:24:02 -0500 Subject: cifs: clean up accesses to midCount It's an atomic_t and the code accesses the "counter" field in it directly instead of using atomic_read(). It also is sometimes accessed under a spinlock and sometimes not. Move it out of the spinlock since we don't need belt-and-suspenders for something that's just informational. Reviewed-by: Suresh Jayaraman Reviewed-by: Pavel Shilovsky Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/cifs_debug.c | 2 +- fs/cifs/connect.c | 2 +- fs/cifs/transport.c | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c index ede98300a8cd..e2d0d5d455fa 100644 --- a/fs/cifs/cifs_debug.c +++ b/fs/cifs/cifs_debug.c @@ -331,7 +331,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v) atomic_read(&totSmBufAllocCount)); #endif /* CONFIG_CIFS_STATS2 */ - seq_printf(m, "Operations (MIDs): %d\n", midCount.counter); + seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount)); seq_printf(m, "\n%d session %d share reconnects\n", tcpSesReconnectCount.counter, tconInfoReconnectCount.counter); diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 75b538f50b12..465ecad6d7cc 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -628,7 +628,7 @@ multi_t2_fnd: } else if (!is_valid_oplock_break(smb_buffer, server) && !isMultiRsp) { cERROR(1, "No task to wake, unknown frame received! " - "NumMids %d", midCount.counter); + "NumMids %d", atomic_read(&midCount)); cifs_dump_mem("Received Data is: ", (char *)smb_buffer, sizeof(struct smb_hdr)); #ifdef CONFIG_CIFS_DEBUG2 diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 9a14f77e0ab2..b9eb0cffa003 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -61,10 +61,10 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server) temp->tsk = current; } - spin_lock(&GlobalMid_Lock); - list_add_tail(&temp->qhead, &server->pending_mid_q); atomic_inc(&midCount); temp->midState = MID_REQUEST_ALLOCATED; + spin_lock(&GlobalMid_Lock); + list_add_tail(&temp->qhead, &server->pending_mid_q); spin_unlock(&GlobalMid_Lock); return temp; } @@ -78,8 +78,8 @@ DeleteMidQEntry(struct mid_q_entry *midEntry) spin_lock(&GlobalMid_Lock); midEntry->midState = MID_FREE; list_del(&midEntry->qhead); - atomic_dec(&midCount); spin_unlock(&GlobalMid_Lock); + atomic_dec(&midCount); if (midEntry->largeBuf) cifs_buf_release(midEntry->resp_buf); else -- cgit v1.2.2 From ddc8cf8fc718587a3788330bf4f32b379f08b250 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Tue, 11 Jan 2011 07:24:02 -0500 Subject: cifs: move locked sections out of DeleteMidQEntry and AllocMidQEntry In later patches, we're going to need to have finer-grained control over the addition and removal of these structs from the pending_mid_q and we'll need to be able to call the destructor while holding the spinlock. Move the locked sections out of both routines and into the callers. Fix up current callers of DeleteMidQEntry to call a new routine that dequeues the entry and then destroys it. Reviewed-by: Suresh Jayaraman Reviewed-by: Pavel Shilovsky Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/transport.c | 41 ++++++++++++++++++++++++----------------- 1 file changed, 24 insertions(+), 17 deletions(-) (limited to 'fs') diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index b9eb0cffa003..801726b11e1e 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -63,9 +63,6 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server) atomic_inc(&midCount); temp->midState = MID_REQUEST_ALLOCATED; - spin_lock(&GlobalMid_Lock); - list_add_tail(&temp->qhead, &server->pending_mid_q); - spin_unlock(&GlobalMid_Lock); return temp; } @@ -75,10 +72,7 @@ DeleteMidQEntry(struct mid_q_entry *midEntry) #ifdef CONFIG_CIFS_STATS2 unsigned long now; #endif - spin_lock(&GlobalMid_Lock); midEntry->midState = MID_FREE; - list_del(&midEntry->qhead); - spin_unlock(&GlobalMid_Lock); atomic_dec(&midCount); if (midEntry->largeBuf) cifs_buf_release(midEntry->resp_buf); @@ -103,6 +97,16 @@ DeleteMidQEntry(struct mid_q_entry *midEntry) mempool_free(midEntry, cifs_mid_poolp); } +static void +delete_mid(struct mid_q_entry *mid) +{ + spin_lock(&GlobalMid_Lock); + list_del(&mid->qhead); + spin_unlock(&GlobalMid_Lock); + + DeleteMidQEntry(mid); +} + static int smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec) { @@ -308,6 +312,9 @@ static int allocate_mid(struct cifsSesInfo *ses, struct smb_hdr *in_buf, *ppmidQ = AllocMidQEntry(in_buf, ses->server); if (*ppmidQ == NULL) return -ENOMEM; + spin_lock(&GlobalMid_Lock); + list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q); + spin_unlock(&GlobalMid_Lock); return 0; } @@ -508,7 +515,7 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, } } spin_unlock(&GlobalMid_Lock); - DeleteMidQEntry(midQ); + delete_mid(midQ); /* Update # of requests on wire to server */ atomic_dec(&ses->server->inFlight); wake_up(&ses->server->request_q); @@ -564,14 +571,14 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, if ((flags & CIFS_NO_RESP) == 0) midQ->resp_buf = NULL; /* mark it so buf will not be freed by - DeleteMidQEntry */ + delete_mid */ } else { rc = -EIO; cFYI(1, "Bad MID state?"); } out: - DeleteMidQEntry(midQ); + delete_mid(midQ); atomic_dec(&ses->server->inFlight); wake_up(&ses->server->request_q); @@ -699,7 +706,7 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses, } } spin_unlock(&GlobalMid_Lock); - DeleteMidQEntry(midQ); + delete_mid(midQ); /* Update # of requests on wire to server */ atomic_dec(&ses->server->inFlight); wake_up(&ses->server->request_q); @@ -755,7 +762,7 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses, } out: - DeleteMidQEntry(midQ); + delete_mid(midQ); atomic_dec(&ses->server->inFlight); wake_up(&ses->server->request_q); @@ -863,7 +870,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon, rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number); if (rc) { - DeleteMidQEntry(midQ); + delete_mid(midQ); mutex_unlock(&ses->server->srv_mutex); return rc; } @@ -880,7 +887,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon, mutex_unlock(&ses->server->srv_mutex); if (rc < 0) { - DeleteMidQEntry(midQ); + delete_mid(midQ); return rc; } @@ -902,7 +909,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon, rc = send_nt_cancel(tcon, in_buf, midQ); if (rc) { - DeleteMidQEntry(midQ); + delete_mid(midQ); return rc; } } else { @@ -914,7 +921,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon, /* If we get -ENOLCK back the lock may have already been removed. Don't exit in this case. */ if (rc && rc != -ENOLCK) { - DeleteMidQEntry(midQ); + delete_mid(midQ); return rc; } } @@ -951,7 +958,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon, } } spin_unlock(&GlobalMid_Lock); - DeleteMidQEntry(midQ); + delete_mid(midQ); return rc; } @@ -1001,7 +1008,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon, BCC(out_buf) = le16_to_cpu(BCC_LE(out_buf)); out: - DeleteMidQEntry(midQ); + delete_mid(midQ); if (rstart && rc == -EACCES) return -ERESTARTSYS; return rc; -- cgit v1.2.2 From 053d50344568e5a4054266b44040297531125281 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Tue, 11 Jan 2011 07:24:02 -0500 Subject: cifs: move mid result processing into common function Reviewed-by: Suresh Jayaraman Reviewed-by: Pavel Shilovsky Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/transport.c | 121 +++++++++++++++++++--------------------------------- 1 file changed, 43 insertions(+), 78 deletions(-) (limited to 'fs') diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 801726b11e1e..15059c7ef2ae 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -389,6 +389,42 @@ SendReceiveNoRsp(const unsigned int xid, struct cifsSesInfo *ses, return rc; } +static int +sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server) +{ + int rc = 0; + + spin_lock(&GlobalMid_Lock); + + if (mid->resp_buf) { + spin_unlock(&GlobalMid_Lock); + return rc; + } + + cERROR(1, "No response to cmd %d mid %d", mid->command, mid->mid); + if (mid->midState == MID_REQUEST_SUBMITTED) { + if (server->tcpStatus == CifsExiting) + rc = -EHOSTDOWN; + else { + server->tcpStatus = CifsNeedReconnect; + mid->midState = MID_RETRY_NEEDED; + } + } + + if (rc != -EHOSTDOWN) { + if (mid->midState == MID_RETRY_NEEDED) { + rc = -EAGAIN; + cFYI(1, "marking request for retry"); + } else { + rc = -EIO; + } + } + spin_unlock(&GlobalMid_Lock); + + delete_mid(mid); + return rc; +} + int SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, struct kvec *iov, int n_vec, int *pRespBufType /* ret */, @@ -492,37 +528,13 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, /* No user interrupts in wait - wreaks havoc with performance */ wait_for_response(ses, midQ, timeout, 10 * HZ); - spin_lock(&GlobalMid_Lock); - - if (midQ->resp_buf == NULL) { - cERROR(1, "No response to cmd %d mid %d", - midQ->command, midQ->mid); - if (midQ->midState == MID_REQUEST_SUBMITTED) { - if (ses->server->tcpStatus == CifsExiting) - rc = -EHOSTDOWN; - else { - ses->server->tcpStatus = CifsNeedReconnect; - midQ->midState = MID_RETRY_NEEDED; - } - } - - if (rc != -EHOSTDOWN) { - if (midQ->midState == MID_RETRY_NEEDED) { - rc = -EAGAIN; - cFYI(1, "marking request for retry"); - } else { - rc = -EIO; - } - } - spin_unlock(&GlobalMid_Lock); - delete_mid(midQ); - /* Update # of requests on wire to server */ + rc = sync_mid_result(midQ, ses->server); + if (rc != 0) { atomic_dec(&ses->server->inFlight); wake_up(&ses->server->request_q); return rc; } - spin_unlock(&GlobalMid_Lock); receive_len = midQ->resp_buf->smb_buf_length; if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) { @@ -684,36 +696,13 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses, /* No user interrupts in wait - wreaks havoc with performance */ wait_for_response(ses, midQ, timeout, 10 * HZ); - spin_lock(&GlobalMid_Lock); - if (midQ->resp_buf == NULL) { - cERROR(1, "No response for cmd %d mid %d", - midQ->command, midQ->mid); - if (midQ->midState == MID_REQUEST_SUBMITTED) { - if (ses->server->tcpStatus == CifsExiting) - rc = -EHOSTDOWN; - else { - ses->server->tcpStatus = CifsNeedReconnect; - midQ->midState = MID_RETRY_NEEDED; - } - } - - if (rc != -EHOSTDOWN) { - if (midQ->midState == MID_RETRY_NEEDED) { - rc = -EAGAIN; - cFYI(1, "marking request for retry"); - } else { - rc = -EIO; - } - } - spin_unlock(&GlobalMid_Lock); - delete_mid(midQ); - /* Update # of requests on wire to server */ + rc = sync_mid_result(midQ, ses->server); + if (rc != 0) { atomic_dec(&ses->server->inFlight); wake_up(&ses->server->request_q); return rc; } - spin_unlock(&GlobalMid_Lock); receive_len = midQ->resp_buf->smb_buf_length; if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) { @@ -933,35 +922,11 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon, } } - spin_lock(&GlobalMid_Lock); - if (midQ->resp_buf) { - spin_unlock(&GlobalMid_Lock); - receive_len = midQ->resp_buf->smb_buf_length; - } else { - cERROR(1, "No response for cmd %d mid %d", - midQ->command, midQ->mid); - if (midQ->midState == MID_REQUEST_SUBMITTED) { - if (ses->server->tcpStatus == CifsExiting) - rc = -EHOSTDOWN; - else { - ses->server->tcpStatus = CifsNeedReconnect; - midQ->midState = MID_RETRY_NEEDED; - } - } - - if (rc != -EHOSTDOWN) { - if (midQ->midState == MID_RETRY_NEEDED) { - rc = -EAGAIN; - cFYI(1, "marking request for retry"); - } else { - rc = -EIO; - } - } - spin_unlock(&GlobalMid_Lock); - delete_mid(midQ); + rc = sync_mid_result(midQ, ses->server); + if (rc != 0) return rc; - } + receive_len = midQ->resp_buf->smb_buf_length; if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) { cERROR(1, "Frame too large received. Length: %d Xid: %d", receive_len, xid); -- cgit v1.2.2 From 1cd3508d5eab6a88fa643119cedd34b04215cffe Mon Sep 17 00:00:00 2001 From: Steve French Date: Wed, 19 Jan 2011 17:53:44 +0000 Subject: [CIFS] Update cifs version number Signed-off-by: Steve French --- fs/cifs/cifsfs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index 851030f74939..4739a531cded 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -118,5 +118,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); extern const struct export_operations cifs_export_ops; #endif /* EXPERIMENTAL */ -#define CIFS_VERSION "1.68" +#define CIFS_VERSION "1.69" #endif /* _CIFSFS_H */ -- cgit v1.2.2 From 540b2e377797d8715469d408b887baa9310c5f3e Mon Sep 17 00:00:00 2001 From: Shirish Pargaonkar Date: Tue, 18 Jan 2011 22:33:54 -0600 Subject: cifs: Fix regression during share-level security mounts (Repost) NTLM response length was changed to 16 bytes instead of 24 bytes that are sent in Tree Connection Request during share-level security share mounts. Revert it back to 24 bytes. Reported-and-Tested-by: Grzegorz Ozanski Acked-by: Jeff Layton Signed-off-by: Shirish Pargaonkar Acked-by: Suresh Jayaraman Cc: stable@kernel.org Signed-off-by: Steve French --- fs/cifs/connect.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 465ecad6d7cc..5c7f8450dbe0 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -2927,7 +2927,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses, bcc_ptr++; /* skip password */ /* already aligned so no need to do it below */ } else { - pSMB->PasswordLength = cpu_to_le16(CIFS_SESS_KEY_SIZE); + pSMB->PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE); /* BB FIXME add code to fail this if NTLMv2 or Kerberos specified as required (when that support is added to the vfs in the future) as only NTLM or the much @@ -2945,7 +2945,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses, #endif /* CIFS_WEAK_PW_HASH */ SMBNTencrypt(tcon->password, ses->server->cryptkey, bcc_ptr); - bcc_ptr += CIFS_SESS_KEY_SIZE; + bcc_ptr += CIFS_AUTH_RESP_SIZE; if (ses->capabilities & CAP_UNICODE) { /* must align unicode strings */ *bcc_ptr = 0; /* null byte password */ -- cgit v1.2.2 From 0da2a4ac33c291728d8be5bdb865467dcb078d13 Mon Sep 17 00:00:00 2001 From: Fred Isaman Date: Wed, 19 Jan 2011 14:18:50 -0500 Subject: NFS: fix handling of malloc failure during nfs_flush_multi() Cleanup of the allocated list entries should not call put_nfs_open_context() on each entry, as the context will always be NULL, causing an oops. Signed-off-by: Fred Isaman Signed-off-by: Trond Myklebust --- fs/nfs/write.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 10d648ea128b..c8278f4046cb 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -932,7 +932,7 @@ out_bad: while (!list_empty(&list)) { data = list_entry(list.next, struct nfs_write_data, pages); list_del(&data->pages); - nfs_writedata_release(data); + nfs_writedata_free(data); } nfs_redirty_request(req); return -ENOMEM; -- cgit v1.2.2 From 2fbc2f1729e785a7b2faf9d8d60926bb1ff62af0 Mon Sep 17 00:00:00 2001 From: Shirish Pargaonkar Date: Mon, 6 Dec 2010 14:56:46 -0600 Subject: cifs: Use mask of ACEs for SID Everyone to calculate all three permissions user, group, and other If a DACL has entries for ACEs for SID Everyone and Authenticated Users, factor in mask in respective entries during calculation of permissions for all three, user, group, and other. http://technet.microsoft.com/en-us/library/bb463216.aspx Signed-off-by: Shirish Pargaonkar Reviewed-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/cifsacl.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c index a437ec391a01..1e7636b145a8 100644 --- a/fs/cifs/cifsacl.c +++ b/fs/cifs/cifsacl.c @@ -41,9 +41,12 @@ static struct cifs_wksid wksidarr[NUM_WK_SIDS] = { ; -/* security id for everyone */ +/* security id for everyone/world system group */ static const struct cifs_sid sid_everyone = { 1, 1, {0, 0, 0, 0, 0, 1}, {0} }; +/* security id for Authenticated Users system group */ +static const struct cifs_sid sid_authusers = { + 1, 1, {0, 0, 0, 0, 0, 5}, {11} }; /* group users */ static const struct cifs_sid sid_user = {1, 2 , {0, 0, 0, 0, 0, 5}, {} }; @@ -365,7 +368,7 @@ static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl, if (num_aces > 0) { umode_t user_mask = S_IRWXU; umode_t group_mask = S_IRWXG; - umode_t other_mask = S_IRWXO; + umode_t other_mask = S_IRWXU | S_IRWXG | S_IRWXO; ppace = kmalloc(num_aces * sizeof(struct cifs_ace *), GFP_KERNEL); @@ -390,6 +393,12 @@ static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl, ppace[i]->type, &fattr->cf_mode, &other_mask); + if (compare_sids(&(ppace[i]->sid), &sid_authusers)) + access_flags_to_mode(ppace[i]->access_req, + ppace[i]->type, + &fattr->cf_mode, + &other_mask); + /* memcpy((void *)(&(cifscred->aces[i])), (void *)ppace[i], -- cgit v1.2.2 From 0ade640e9cda805692dbf688f4bb69e94719275a Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Tue, 11 Jan 2011 07:24:02 -0500 Subject: cifs: wait indefinitely for responses The client should not be timing out on individual SMB requests. Too much of the state between client and server is tied to the state of the socket. If we time out requests and issue spurious disconnects then that comprimises data integrity. Instead of doing this complicated dance where we try to decide how long to wait for a response for particular requests, have the client instead wait indefinitely for a response. Also, use a TASK_KILLABLE sleep here so that fatal signals will break out of this waiting. Later patches will add support for detecting dead peers and forcing reconnects based on that. Reviewed-by: Suresh Jayaraman Reviewed-by: Pavel Shilovsky Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/transport.c | 110 ++++++++-------------------------------------------- 1 file changed, 17 insertions(+), 93 deletions(-) (limited to 'fs') diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 15059c7ef2ae..c41c9c4f0a79 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -318,48 +318,17 @@ static int allocate_mid(struct cifsSesInfo *ses, struct smb_hdr *in_buf, return 0; } -static int wait_for_response(struct cifsSesInfo *ses, - struct mid_q_entry *midQ, - unsigned long timeout, - unsigned long time_to_wait) +static int +wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ) { - unsigned long curr_timeout; - - for (;;) { - curr_timeout = timeout + jiffies; - wait_event_timeout(ses->server->response_q, - midQ->midState != MID_REQUEST_SUBMITTED, timeout); - - if (time_after(jiffies, curr_timeout) && - (midQ->midState == MID_REQUEST_SUBMITTED) && - ((ses->server->tcpStatus == CifsGood) || - (ses->server->tcpStatus == CifsNew))) { - - unsigned long lrt; + int error; - /* We timed out. Is the server still - sending replies ? */ - spin_lock(&GlobalMid_Lock); - lrt = ses->server->lstrp; - spin_unlock(&GlobalMid_Lock); + error = wait_event_killable(server->response_q, + midQ->midState != MID_REQUEST_SUBMITTED); + if (error < 0) + return -ERESTARTSYS; - /* Calculate time_to_wait past last receive time. - Although we prefer not to time out if the - server is still responding - we will time - out if the server takes more than 15 (or 45 - or 180) seconds to respond to this request - and has not responded to any request from - other threads on the client within 10 seconds */ - lrt += time_to_wait; - if (time_after(jiffies, lrt)) { - /* No replies for time_to_wait. */ - cERROR(1, "server not responding"); - return -1; - } - } else { - return 0; - } - } + return 0; } @@ -433,7 +402,6 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, int rc = 0; int long_op; unsigned int receive_len; - unsigned long timeout; struct mid_q_entry *midQ; struct smb_hdr *in_buf = iov[0].iov_base; @@ -500,33 +468,12 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, if (rc < 0) goto out; - if (long_op == CIFS_STD_OP) - timeout = 15 * HZ; - else if (long_op == CIFS_VLONG_OP) /* e.g. slow writes past EOF */ - timeout = 180 * HZ; - else if (long_op == CIFS_LONG_OP) - timeout = 45 * HZ; /* should be greater than - servers oplock break timeout (about 43 seconds) */ - else if (long_op == CIFS_ASYNC_OP) - goto out; - else if (long_op == CIFS_BLOCKING_OP) - timeout = 0x7FFFFFFF; /* large, but not so large as to wrap */ - else { - cERROR(1, "unknown timeout flag %d", long_op); - rc = -EIO; + if (long_op == CIFS_ASYNC_OP) goto out; - } - - /* wait for 15 seconds or until woken up due to response arriving or - due to last connection to this server being unmounted */ - if (signal_pending(current)) { - /* if signal pending do not hold up user for full smb timeout - but we still give response a chance to complete */ - timeout = 2 * HZ; - } - /* No user interrupts in wait - wreaks havoc with performance */ - wait_for_response(ses, midQ, timeout, 10 * HZ); + rc = wait_for_response(ses->server, midQ); + if (rc != 0) + goto out; rc = sync_mid_result(midQ, ses->server); if (rc != 0) { @@ -604,7 +551,6 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses, { int rc = 0; unsigned int receive_len; - unsigned long timeout; struct mid_q_entry *midQ; if (ses == NULL) { @@ -668,33 +614,12 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses, if (rc < 0) goto out; - if (long_op == CIFS_STD_OP) - timeout = 15 * HZ; - /* wait for 15 seconds or until woken up due to response arriving or - due to last connection to this server being unmounted */ - else if (long_op == CIFS_ASYNC_OP) - goto out; - else if (long_op == CIFS_VLONG_OP) /* writes past EOF can be slow */ - timeout = 180 * HZ; - else if (long_op == CIFS_LONG_OP) - timeout = 45 * HZ; /* should be greater than - servers oplock break timeout (about 43 seconds) */ - else if (long_op == CIFS_BLOCKING_OP) - timeout = 0x7FFFFFFF; /* large but no so large as to wrap */ - else { - cERROR(1, "unknown timeout flag %d", long_op); - rc = -EIO; + if (long_op == CIFS_ASYNC_OP) goto out; - } - if (signal_pending(current)) { - /* if signal pending do not hold up user for full smb timeout - but we still give response a chance to complete */ - timeout = 2 * HZ; - } - - /* No user interrupts in wait - wreaks havoc with performance */ - wait_for_response(ses, midQ, timeout, 10 * HZ); + rc = wait_for_response(ses->server, midQ); + if (rc != 0) + goto out; rc = sync_mid_result(midQ, ses->server); if (rc != 0) { @@ -915,8 +840,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon, } } - /* Wait 5 seconds for the response. */ - if (wait_for_response(ses, midQ, 5 * HZ, 5 * HZ) == 0) { + if (wait_for_response(ses->server, midQ) == 0) { /* We got the response - restart system call. */ rstart = 1; } -- cgit v1.2.2 From dad255b182363db1d1124458cd3fb0a4deac0d0f Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Tue, 11 Jan 2011 07:24:02 -0500 Subject: cifs: don't reconnect server when we don't get a response We only want to force a reconnect to the server under very limited and specific circumstances. Now that we have processes waiting indefinitely for responses, we shouldn't reach this point unless a reconnect is already in process. Thus, there's no reason to re-mark the server for reconnect here. Reviewed-by: Suresh Jayaraman Reviewed-by: Pavel Shilovsky Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/transport.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index c41c9c4f0a79..f65cdec042e4 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -374,10 +374,8 @@ sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server) if (mid->midState == MID_REQUEST_SUBMITTED) { if (server->tcpStatus == CifsExiting) rc = -EHOSTDOWN; - else { - server->tcpStatus = CifsNeedReconnect; + else mid->midState = MID_RETRY_NEEDED; - } } if (rc != -EHOSTDOWN) { -- cgit v1.2.2 From 74dd92a881b62014ca3c754db6868e1f142f2fb9 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Tue, 11 Jan 2011 07:24:02 -0500 Subject: cifs: clean up sync_mid_result Make it use a switch statement based on the value of the midStatus. If the resp_buf is set, then MID_RESPONSE_RECEIVED is too. Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/transport.c | 35 ++++++++++++++++++----------------- 1 file changed, 18 insertions(+), 17 deletions(-) (limited to 'fs') diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index f65cdec042e4..6abd1445c983 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -363,28 +363,29 @@ sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server) { int rc = 0; - spin_lock(&GlobalMid_Lock); + cFYI(1, "%s: cmd=%d mid=%d state=%d", __func__, mid->command, + mid->mid, mid->midState); - if (mid->resp_buf) { + spin_lock(&GlobalMid_Lock); + switch (mid->midState) { + case MID_RESPONSE_RECEIVED: spin_unlock(&GlobalMid_Lock); return rc; - } - - cERROR(1, "No response to cmd %d mid %d", mid->command, mid->mid); - if (mid->midState == MID_REQUEST_SUBMITTED) { - if (server->tcpStatus == CifsExiting) + case MID_REQUEST_SUBMITTED: + /* socket is going down, reject all calls */ + if (server->tcpStatus == CifsExiting) { + cERROR(1, "%s: canceling mid=%d cmd=0x%x state=%d", + __func__, mid->mid, mid->command, mid->midState); rc = -EHOSTDOWN; - else - mid->midState = MID_RETRY_NEEDED; - } - - if (rc != -EHOSTDOWN) { - if (mid->midState == MID_RETRY_NEEDED) { - rc = -EAGAIN; - cFYI(1, "marking request for retry"); - } else { - rc = -EIO; + break; } + case MID_RETRY_NEEDED: + rc = -EAGAIN; + break; + default: + cERROR(1, "%s: invalid mid state mid=%d state=%d", __func__, + mid->mid, mid->midState); + rc = -EIO; } spin_unlock(&GlobalMid_Lock); -- cgit v1.2.2 From 2b84a36c5529da136d28b268e75268892d09869c Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Tue, 11 Jan 2011 07:24:21 -0500 Subject: cifs: allow for different handling of received response In order to incorporate async requests, we need to allow for a more general way to do things on receive, rather than just waking up a process. Turn the task pointer in the mid_q_entry into a callback function and a generic data pointer. When a response comes in, or the socket is reconnected, cifsd can call the callback function in order to wake up the process. The default is to just wake up the current process which should mean no change in behavior for existing code. Also, clean up the locking in cifs_reconnect. There doesn't seem to be any need to hold both the srv_mutex and GlobalMid_Lock when walking the list of mids. Reviewed-by: Suresh Jayaraman Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/cifs_debug.c | 8 ++++---- fs/cifs/cifsglob.h | 15 ++++++++++++++- fs/cifs/connect.c | 53 +++++++++++++++++++++++++--------------------------- fs/cifs/transport.c | 19 +++++++++++++++++-- 4 files changed, 60 insertions(+), 35 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c index e2d0d5d455fa..65829d32128c 100644 --- a/fs/cifs/cifs_debug.c +++ b/fs/cifs/cifs_debug.c @@ -79,11 +79,11 @@ void cifs_dump_mids(struct TCP_Server_Info *server) spin_lock(&GlobalMid_Lock); list_for_each(tmp, &server->pending_mid_q) { mid_entry = list_entry(tmp, struct mid_q_entry, qhead); - cERROR(1, "State: %d Cmd: %d Pid: %d Tsk: %p Mid %d", + cERROR(1, "State: %d Cmd: %d Pid: %d Cbdata: %p Mid %d", mid_entry->midState, (int)mid_entry->command, mid_entry->pid, - mid_entry->tsk, + mid_entry->callback_data, mid_entry->mid); #ifdef CONFIG_CIFS_STATS2 cERROR(1, "IsLarge: %d buf: %p time rcv: %ld now: %ld", @@ -218,11 +218,11 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v) mid_entry = list_entry(tmp3, struct mid_q_entry, qhead); seq_printf(m, "\tState: %d com: %d pid:" - " %d tsk: %p mid %d\n", + " %d cbdata: %p mid %d\n", mid_entry->midState, (int)mid_entry->command, mid_entry->pid, - mid_entry->tsk, + mid_entry->callback_data, mid_entry->mid); } spin_unlock(&GlobalMid_Lock); diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 606ca8bb7102..4de737575959 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -508,6 +508,18 @@ static inline void cifs_stats_bytes_read(struct cifsTconInfo *tcon, #endif +struct mid_q_entry; + +/* + * This is the prototype for the mid callback function. When creating one, + * take special care to avoid deadlocks. Things to bear in mind: + * + * - it will be called by cifsd + * - the GlobalMid_Lock will be held + * - the mid will be removed from the pending_mid_q list + */ +typedef void (mid_callback_t)(struct mid_q_entry *mid); + /* one of these for every pending CIFS request to the server */ struct mid_q_entry { struct list_head qhead; /* mids waiting on reply from this server */ @@ -519,7 +531,8 @@ struct mid_q_entry { unsigned long when_sent; /* time when smb send finished */ unsigned long when_received; /* when demux complete (taken off wire) */ #endif - struct task_struct *tsk; /* task waiting for response */ + mid_callback_t *callback; /* call completion callback */ + void *callback_data; /* general purpose pointer for callback */ struct smb_hdr *resp_buf; /* response buffer */ int midState; /* wish this were enum but can not pass to wait_event */ __u8 command; /* smb command code */ diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 5c7f8450dbe0..aa66de1db5f5 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -152,6 +152,7 @@ cifs_reconnect(struct TCP_Server_Info *server) /* before reconnecting the tcp session, mark the smb session (uid) and the tid bad so they are not used until reconnected */ + cFYI(1, "%s: marking sessions and tcons for reconnect", __func__); spin_lock(&cifs_tcp_ses_lock); list_for_each(tmp, &server->smb_ses_list) { ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list); @@ -163,7 +164,9 @@ cifs_reconnect(struct TCP_Server_Info *server) } } spin_unlock(&cifs_tcp_ses_lock); + /* do not want to be sending data on a socket we are freeing */ + cFYI(1, "%s: tearing down socket", __func__); mutex_lock(&server->srv_mutex); if (server->ssocket) { cFYI(1, "State: 0x%x Flags: 0x%lx", server->ssocket->state, @@ -180,22 +183,19 @@ cifs_reconnect(struct TCP_Server_Info *server) kfree(server->session_key.response); server->session_key.response = NULL; server->session_key.len = 0; + mutex_unlock(&server->srv_mutex); + /* mark submitted MIDs for retry and issue callback */ + cFYI(1, "%s: issuing mid callbacks", __func__); spin_lock(&GlobalMid_Lock); - list_for_each(tmp, &server->pending_mid_q) { - mid_entry = list_entry(tmp, struct - mid_q_entry, - qhead); - if (mid_entry->midState == MID_REQUEST_SUBMITTED) { - /* Mark other intransit requests as needing - retry so we do not immediately mark the - session bad again (ie after we reconnect - below) as they timeout too */ + list_for_each_safe(tmp, tmp2, &server->pending_mid_q) { + mid_entry = list_entry(tmp, struct mid_q_entry, qhead); + if (mid_entry->midState == MID_REQUEST_SUBMITTED) mid_entry->midState = MID_RETRY_NEEDED; - } + list_del_init(&mid_entry->qhead); + mid_entry->callback(mid_entry); } spin_unlock(&GlobalMid_Lock); - mutex_unlock(&server->srv_mutex); while ((server->tcpStatus != CifsExiting) && (server->tcpStatus != CifsGood)) { @@ -212,10 +212,9 @@ cifs_reconnect(struct TCP_Server_Info *server) if (server->tcpStatus != CifsExiting) server->tcpStatus = CifsGood; spin_unlock(&GlobalMid_Lock); - /* atomic_set(&server->inFlight,0);*/ - wake_up(&server->response_q); } } + return rc; } @@ -345,7 +344,7 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server) struct msghdr smb_msg; struct kvec iov; struct socket *csocket = server->ssocket; - struct list_head *tmp; + struct list_head *tmp, *tmp2; struct task_struct *task_to_wake = NULL; struct mid_q_entry *mid_entry; char temp; @@ -558,10 +557,9 @@ incomplete_rcv: continue; } - - task_to_wake = NULL; + mid_entry = NULL; spin_lock(&GlobalMid_Lock); - list_for_each(tmp, &server->pending_mid_q) { + list_for_each_safe(tmp, tmp2, &server->pending_mid_q) { mid_entry = list_entry(tmp, struct mid_q_entry, qhead); if ((mid_entry->mid == smb_buffer->Mid) && @@ -602,8 +600,9 @@ incomplete_rcv: mid_entry->resp_buf = smb_buffer; mid_entry->largeBuf = isLargeBuf; multi_t2_fnd: - task_to_wake = mid_entry->tsk; mid_entry->midState = MID_RESPONSE_RECEIVED; + list_del_init(&mid_entry->qhead); + mid_entry->callback(mid_entry); #ifdef CONFIG_CIFS_STATS2 mid_entry->when_received = jiffies; #endif @@ -613,9 +612,11 @@ multi_t2_fnd: server->lstrp = jiffies; break; } + mid_entry = NULL; } spin_unlock(&GlobalMid_Lock); - if (task_to_wake) { + + if (mid_entry != NULL) { /* Was previous buf put in mpx struct for multi-rsp? */ if (!isMultiRsp) { /* smb buffer will be freed by user thread */ @@ -624,7 +625,6 @@ multi_t2_fnd: else smallbuf = NULL; } - wake_up_process(task_to_wake); } else if (!is_valid_oplock_break(smb_buffer, server) && !isMultiRsp) { cERROR(1, "No task to wake, unknown frame received! " @@ -678,15 +678,12 @@ multi_t2_fnd: if (!list_empty(&server->pending_mid_q)) { spin_lock(&GlobalMid_Lock); - list_for_each(tmp, &server->pending_mid_q) { + list_for_each_safe(tmp, tmp2, &server->pending_mid_q) { mid_entry = list_entry(tmp, struct mid_q_entry, qhead); - if (mid_entry->midState == MID_REQUEST_SUBMITTED) { - cFYI(1, "Clearing Mid 0x%x - waking up ", - mid_entry->mid); - task_to_wake = mid_entry->tsk; - if (task_to_wake) - wake_up_process(task_to_wake); - } + cFYI(1, "Clearing Mid 0x%x - issuing callback", + mid_entry->mid); + list_del_init(&mid_entry->qhead); + mid_entry->callback(mid_entry); } spin_unlock(&GlobalMid_Lock); /* 1/8th of sec is more than enough time for them to exit */ diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 6abd1445c983..d77b6154cf22 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -36,6 +36,12 @@ extern mempool_t *cifs_mid_poolp; +static void +wake_up_task(struct mid_q_entry *mid) +{ + wake_up_process(mid->callback_data); +} + static struct mid_q_entry * AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server) { @@ -58,7 +64,13 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server) /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */ /* when mid allocated can be before when sent */ temp->when_alloc = jiffies; - temp->tsk = current; + + /* + * The default is for the mid to be synchronous, so the + * default callback just wakes up the current task. + */ + temp->callback = wake_up_task; + temp->callback_data = current; } atomic_inc(&midCount); @@ -367,6 +379,9 @@ sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server) mid->mid, mid->midState); spin_lock(&GlobalMid_Lock); + /* ensure that it's no longer on the pending_mid_q */ + list_del_init(&mid->qhead); + switch (mid->midState) { case MID_RESPONSE_RECEIVED: spin_unlock(&GlobalMid_Lock); @@ -389,7 +404,7 @@ sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server) } spin_unlock(&GlobalMid_Lock); - delete_mid(mid); + DeleteMidQEntry(mid); return rc; } -- cgit v1.2.2 From a6827c184ea9f5452e4aaa7c799dd3c7cc9ba05e Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Tue, 11 Jan 2011 07:24:21 -0500 Subject: cifs: add cifs_call_async Add a function that will send a request, and set up the mid for an async reply. Reviewed-by: Suresh Jayaraman Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/cifsproto.h | 5 +++++ fs/cifs/transport.c | 58 ++++++++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 62 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index 95d5dbbb4c7a..7f2988bb8929 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -61,6 +61,11 @@ extern char *cifs_compose_mount_options(const char *sb_mountdata, const char *fullpath, const struct dfs_info3_param *ref, char **devname); /* extern void renew_parental_timestamps(struct dentry *direntry);*/ +extern struct mid_q_entry *AllocMidQEntry(const struct smb_hdr *smb_buffer, + struct TCP_Server_Info *server); +extern int cifs_call_async(struct TCP_Server_Info *server, + struct smb_hdr *in_buf, mid_callback_t *callback, + void *cbdata); extern int SendReceive(const unsigned int /* xid */ , struct cifsSesInfo *, struct smb_hdr * /* input */ , struct smb_hdr * /* out */ , diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index d77b6154cf22..166b65a3763e 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -42,7 +42,7 @@ wake_up_task(struct mid_q_entry *mid) wake_up_process(mid->callback_data); } -static struct mid_q_entry * +struct mid_q_entry * AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server) { struct mid_q_entry *temp; @@ -344,6 +344,62 @@ wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ) } +/* + * Send a SMB request and set the callback function in the mid to handle + * the result. Caller is responsible for dealing with timeouts. + */ +int +cifs_call_async(struct TCP_Server_Info *server, struct smb_hdr *in_buf, + mid_callback_t *callback, void *cbdata) +{ + int rc; + struct mid_q_entry *mid; + + rc = wait_for_free_request(server, CIFS_ASYNC_OP); + if (rc) + return rc; + + mutex_lock(&server->srv_mutex); + mid = AllocMidQEntry(in_buf, server); + if (mid == NULL) { + mutex_unlock(&server->srv_mutex); + return -ENOMEM; + } + + /* put it on the pending_mid_q */ + spin_lock(&GlobalMid_Lock); + list_add_tail(&mid->qhead, &server->pending_mid_q); + spin_unlock(&GlobalMid_Lock); + + rc = cifs_sign_smb(in_buf, server, &mid->sequence_number); + if (rc) { + mutex_unlock(&server->srv_mutex); + goto out_err; + } + + mid->callback = callback; + mid->callback_data = cbdata; + mid->midState = MID_REQUEST_SUBMITTED; +#ifdef CONFIG_CIFS_STATS2 + atomic_inc(&server->inSend); +#endif + rc = smb_send(server, in_buf, in_buf->smb_buf_length); +#ifdef CONFIG_CIFS_STATS2 + atomic_dec(&server->inSend); + mid->when_sent = jiffies; +#endif + mutex_unlock(&server->srv_mutex); + if (rc) + goto out_err; + + return rc; +out_err: + delete_mid(mid); + atomic_dec(&server->inFlight); + wake_up(&server->request_q); + return rc; +} + /* * * Send an SMB Request. No response info (other than return code) -- cgit v1.2.2 From 766fdbb57fdb1e53bc34c431103e95383d7f13ba Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Tue, 11 Jan 2011 07:24:21 -0500 Subject: cifs: add ability to send an echo request Reviewed-by: Suresh Jayaraman Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/cifspdu.h | 15 +++++++++++++++ fs/cifs/cifsproto.h | 2 ++ fs/cifs/cifssmb.c | 47 +++++++++++++++++++++++++++++++++++++++++++++++ fs/cifs/transport.c | 2 +- 4 files changed, 65 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h index de36b09763a8..ea205b4fcad2 100644 --- a/fs/cifs/cifspdu.h +++ b/fs/cifs/cifspdu.h @@ -50,6 +50,7 @@ #define SMB_COM_SETATTR 0x09 /* trivial response */ #define SMB_COM_LOCKING_ANDX 0x24 /* trivial response */ #define SMB_COM_COPY 0x29 /* trivial rsp, fail filename ignrd*/ +#define SMB_COM_ECHO 0x2B /* echo request */ #define SMB_COM_OPEN_ANDX 0x2D /* Legacy open for old servers */ #define SMB_COM_READ_ANDX 0x2E #define SMB_COM_WRITE_ANDX 0x2F @@ -760,6 +761,20 @@ typedef struct smb_com_tconx_rsp_ext { * */ +typedef struct smb_com_echo_req { + struct smb_hdr hdr; + __le16 EchoCount; + __le16 ByteCount; + char Data[1]; +} __attribute__((packed)) ECHO_REQ; + +typedef struct smb_com_echo_rsp { + struct smb_hdr hdr; + __le16 SequenceNumber; + __le16 ByteCount; + char Data[1]; +} __attribute__((packed)) ECHO_RSP; + typedef struct smb_com_logoff_andx_req { struct smb_hdr hdr; /* wct = 2 */ __u8 AndXCommand; diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index 7f2988bb8929..982895fa7615 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -63,6 +63,7 @@ extern char *cifs_compose_mount_options(const char *sb_mountdata, /* extern void renew_parental_timestamps(struct dentry *direntry);*/ extern struct mid_q_entry *AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server); +extern void DeleteMidQEntry(struct mid_q_entry *midEntry); extern int cifs_call_async(struct TCP_Server_Info *server, struct smb_hdr *in_buf, mid_callback_t *callback, void *cbdata); @@ -358,6 +359,7 @@ extern int CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon, const __u64 len, struct file_lock *, const __u16 lock_type, const bool waitFlag); extern int CIFSSMBTDis(const int xid, struct cifsTconInfo *tcon); +extern int CIFSSMBEcho(struct TCP_Server_Info *server); extern int CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses); extern struct cifsSesInfo *sesInfoAlloc(void); diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 3652cc60314c..54b9f5d8d1db 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -706,6 +706,53 @@ CIFSSMBTDis(const int xid, struct cifsTconInfo *tcon) return rc; } +/* + * This is a no-op for now. We're not really interested in the reply, but + * rather in the fact that the server sent one and that server->lstrp + * gets updated. + * + * FIXME: maybe we should consider checking that the reply matches request? + */ +static void +cifs_echo_callback(struct mid_q_entry *mid) +{ + struct TCP_Server_Info *server = mid->callback_data; + + DeleteMidQEntry(mid); + atomic_dec(&server->inFlight); + wake_up(&server->request_q); +} + +int +CIFSSMBEcho(struct TCP_Server_Info *server) +{ + ECHO_REQ *smb; + int rc = 0; + + cFYI(1, "In echo request"); + + rc = small_smb_init(SMB_COM_ECHO, 0, NULL, (void **)&smb); + if (rc) + return rc; + + /* set up echo request */ + smb->hdr.Tid = cpu_to_le16(0xffff); + smb->hdr.WordCount = cpu_to_le16(1); + smb->EchoCount = cpu_to_le16(1); + smb->ByteCount = cpu_to_le16(1); + smb->Data[0] = 'a'; + smb->hdr.smb_buf_length += 3; + + rc = cifs_call_async(server, (struct smb_hdr *)smb, + cifs_echo_callback, server); + if (rc) + cFYI(1, "Echo request failed: %d", rc); + + cifs_small_buf_release(smb); + + return rc; +} + int CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses) { diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 166b65a3763e..a0cef4960516 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -78,7 +78,7 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server) return temp; } -static void +void DeleteMidQEntry(struct mid_q_entry *midEntry) { #ifdef CONFIG_CIFS_STATS2 -- cgit v1.2.2 From c74093b694998d30105d9904686da5e3576497c4 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Tue, 11 Jan 2011 07:24:23 -0500 Subject: cifs: set up recurring workqueue job to do SMB echo requests Reviewed-by: Suresh Jayaraman Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/cifsglob.h | 1 + fs/cifs/connect.c | 29 +++++++++++++++++++++++++++++ 2 files changed, 30 insertions(+) (limited to 'fs') diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 4de737575959..9c728dd5b146 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -218,6 +218,7 @@ struct TCP_Server_Info { bool sec_kerberosu2u; /* supports U2U Kerberos */ bool sec_ntlmssp; /* supports NTLMSSP */ bool session_estab; /* mark when very first sess is established */ + struct delayed_work echo; /* echo ping workqueue job */ #ifdef CONFIG_CIFS_FSCACHE struct fscache_cookie *fscache; /* client index cache cookie */ #endif diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index aa66de1db5f5..f38ca084c9d2 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -52,6 +52,9 @@ #define CIFS_PORT 445 #define RFC1001_PORT 139 +/* SMB echo "timeout" -- FIXME: tunable? */ +#define SMB_ECHO_INTERVAL (60 * HZ) + extern void SMBNTencrypt(unsigned char *passwd, unsigned char *c8, unsigned char *p24); @@ -333,6 +336,26 @@ static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB) } +static void +cifs_echo_request(struct work_struct *work) +{ + int rc; + struct TCP_Server_Info *server = container_of(work, + struct TCP_Server_Info, echo.work); + + /* no need to ping if we got a response recently */ + if (time_before(jiffies, server->lstrp + SMB_ECHO_INTERVAL - HZ)) + goto requeue_echo; + + rc = CIFSSMBEcho(server); + if (rc) + cFYI(1, "Unable to send echo request to server: %s", + server->hostname); + +requeue_echo: + queue_delayed_work(system_nrt_wq, &server->echo, SMB_ECHO_INTERVAL); +} + static int cifs_demultiplex_thread(struct TCP_Server_Info *server) { @@ -1571,6 +1594,8 @@ cifs_put_tcp_session(struct TCP_Server_Info *server) list_del_init(&server->tcp_ses_list); spin_unlock(&cifs_tcp_ses_lock); + cancel_delayed_work_sync(&server->echo); + spin_lock(&GlobalMid_Lock); server->tcpStatus = CifsExiting; spin_unlock(&GlobalMid_Lock); @@ -1662,6 +1687,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info) tcp_ses->sequence_number = 0; INIT_LIST_HEAD(&tcp_ses->tcp_ses_list); INIT_LIST_HEAD(&tcp_ses->smb_ses_list); + INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request); /* * at this point we are the only ones with the pointer @@ -1710,6 +1736,9 @@ cifs_get_tcp_session(struct smb_vol *volume_info) cifs_fscache_get_client_cookie(tcp_ses); + /* queue echo request delayed work */ + queue_delayed_work(system_nrt_wq, &tcp_ses->echo, SMB_ECHO_INTERVAL); + return tcp_ses; out_err_crypto_release: -- cgit v1.2.2 From fda3594362184383e73f0a2a5fa5b38ac0e04fd8 Mon Sep 17 00:00:00 2001 From: Steve French Date: Thu, 20 Jan 2011 18:06:34 +0000 Subject: [CIFS] cifs: reconnect unresponsive servers If the server isn't responding to echoes, we don't want to leave tasks hung waiting for it to reply. At that point, we'll want to reconnect so that soft mounts can return an error to userspace quickly. If the client hasn't received a reply after a specified number of echo intervals, assume that the transport is down and attempt to reconnect the socket. The number of echo_intervals to wait before attempting to reconnect is tunable via a module parameter. Setting it to 0, means that the client will never attempt to reconnect. The default is 5. Signed-off-by: Jeff Layton --- fs/cifs/cifsfs.c | 6 +++++- fs/cifs/cifsglob.h | 3 +++ fs/cifs/connect.c | 21 +++++++++++++++++---- 3 files changed, 25 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index d9f652a522a6..99d777a03dd0 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -77,7 +77,11 @@ unsigned int cifs_max_pending = CIFS_MAX_REQ; module_param(cifs_max_pending, int, 0); MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server. " "Default: 50 Range: 2 to 256"); - +unsigned short echo_retries = 5; +module_param(echo_retries, ushort, 0644); +MODULE_PARM_DESC(echo_retries, "Number of echo attempts before giving up and " + "reconnecting server. Default: 5. 0 means " + "never reconnect."); extern mempool_t *cifs_sm_req_poolp; extern mempool_t *cifs_req_poolp; extern mempool_t *cifs_mid_poolp; diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 9c728dd5b146..7040abc638fa 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -804,6 +804,9 @@ GLOBAL_EXTERN unsigned int cifs_min_rcv; /* min size of big ntwrk buf pool */ GLOBAL_EXTERN unsigned int cifs_min_small; /* min size of small buf pool */ GLOBAL_EXTERN unsigned int cifs_max_pending; /* MAX requests at once to server*/ +/* reconnect after this many failed echo attempts */ +GLOBAL_EXTERN unsigned short echo_retries; + void cifs_oplock_break(struct work_struct *work); void cifs_oplock_break_get(struct cifsFileInfo *cfile); void cifs_oplock_break_put(struct cifsFileInfo *cfile); diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index f38ca084c9d2..f5d7b59a3553 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -186,6 +186,7 @@ cifs_reconnect(struct TCP_Server_Info *server) kfree(server->session_key.response); server->session_key.response = NULL; server->session_key.len = 0; + server->lstrp = jiffies; mutex_unlock(&server->srv_mutex); /* mark submitted MIDs for retry and issue callback */ @@ -420,7 +421,20 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server) smb_msg.msg_control = NULL; smb_msg.msg_controllen = 0; pdu_length = 4; /* enough to get RFC1001 header */ + incomplete_rcv: + if (echo_retries > 0 && + time_after(jiffies, server->lstrp + + (echo_retries * SMB_ECHO_INTERVAL))) { + cERROR(1, "Server %s has not responded in %d seconds. " + "Reconnecting...", server->hostname, + (echo_retries * SMB_ECHO_INTERVAL / HZ)); + cifs_reconnect(server); + csocket = server->ssocket; + wake_up(&server->response_q); + continue; + } + length = kernel_recvmsg(csocket, &smb_msg, &iov, 1, pdu_length, 0 /* BB other flags? */); @@ -581,6 +595,8 @@ incomplete_rcv: } mid_entry = NULL; + server->lstrp = jiffies; + spin_lock(&GlobalMid_Lock); list_for_each_safe(tmp, tmp2, &server->pending_mid_q) { mid_entry = list_entry(tmp, struct mid_q_entry, qhead); @@ -629,10 +645,6 @@ multi_t2_fnd: #ifdef CONFIG_CIFS_STATS2 mid_entry->when_received = jiffies; #endif - /* so we do not time out requests to server - which is still responding (since server could - be busy but not dead) */ - server->lstrp = jiffies; break; } mid_entry = NULL; @@ -1685,6 +1697,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info) volume_info->target_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL); tcp_ses->session_estab = false; tcp_ses->sequence_number = 0; + tcp_ses->lstrp = jiffies; INIT_LIST_HEAD(&tcp_ses->tcp_ses_list); INIT_LIST_HEAD(&tcp_ses->smb_ses_list); INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request); -- cgit v1.2.2 From 7749981ec31aa40e28a1ef5687e46bc1aa278fae Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Tue, 11 Jan 2011 07:24:23 -0500 Subject: cifs: remove code for setting timeouts on requests Since we don't time out individual requests anymore, remove the code that we used to use for setting timeouts on different requests. Reviewed-by: Pavel Shilovsky Reviewed-by: Suresh Jayaraman Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/cifsglob.h | 9 +++------ fs/cifs/cifssmb.c | 8 ++++---- fs/cifs/connect.c | 2 +- fs/cifs/file.c | 44 +++++++------------------------------------- fs/cifs/sess.c | 2 +- fs/cifs/transport.c | 2 +- 6 files changed, 17 insertions(+), 50 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 7040abc638fa..571132c95231 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -636,12 +636,9 @@ static inline void free_dfs_info_array(struct dfs_info3_param *param, #define CIFS_IOVEC 4 /* array of response buffers */ /* Type of Request to SendReceive2 */ -#define CIFS_STD_OP 0 /* normal request timeout */ -#define CIFS_LONG_OP 1 /* long op (up to 45 sec, oplock time) */ -#define CIFS_VLONG_OP 2 /* sloow op - can take up to 180 seconds */ -#define CIFS_BLOCKING_OP 4 /* operation can block */ -#define CIFS_ASYNC_OP 8 /* do not wait for response */ -#define CIFS_TIMEOUT_MASK 0x00F /* only one of 5 above set in req */ +#define CIFS_BLOCKING_OP 1 /* operation can block */ +#define CIFS_ASYNC_OP 2 /* do not wait for response */ +#define CIFS_TIMEOUT_MASK 0x003 /* only one of above set in req */ #define CIFS_LOG_ERROR 0x010 /* log NT STATUS if non-zero */ #define CIFS_LARGE_BUF_OP 0x020 /* large request buffer */ #define CIFS_NO_RESP 0x040 /* no response buffer required */ diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 54b9f5d8d1db..37113450757b 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -1240,7 +1240,7 @@ OldOpenRetry: pSMB->ByteCount = cpu_to_le16(count); /* long_op set to 1 to allow for oplock break timeouts */ rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, - (struct smb_hdr *)pSMBr, &bytes_returned, CIFS_LONG_OP); + (struct smb_hdr *)pSMBr, &bytes_returned, 0); cifs_stats_inc(&tcon->num_opens); if (rc) { cFYI(1, "Error in Open = %d", rc); @@ -1353,7 +1353,7 @@ openRetry: pSMB->ByteCount = cpu_to_le16(count); /* long_op set to 1 to allow for oplock break timeouts */ rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB, - (struct smb_hdr *)pSMBr, &bytes_returned, CIFS_LONG_OP); + (struct smb_hdr *)pSMBr, &bytes_returned, 0); cifs_stats_inc(&tcon->num_opens); if (rc) { cFYI(1, "Error in Open = %d", rc); @@ -1435,7 +1435,7 @@ CIFSSMBRead(const int xid, struct cifsTconInfo *tcon, const int netfid, iov[0].iov_base = (char *)pSMB; iov[0].iov_len = pSMB->hdr.smb_buf_length + 4; rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovecs */, - &resp_buf_type, CIFS_STD_OP | CIFS_LOG_ERROR); + &resp_buf_type, CIFS_LOG_ERROR); cifs_stats_inc(&tcon->num_reads); pSMBr = (READ_RSP *)iov[0].iov_base; if (rc) { @@ -3136,7 +3136,7 @@ CIFSSMBGetCIFSACL(const int xid, struct cifsTconInfo *tcon, __u16 fid, iov[0].iov_len = pSMB->hdr.smb_buf_length + 4; rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovec */, &buf_type, - CIFS_STD_OP); + 0); cifs_stats_inc(&tcon->num_acl_get); if (rc) { cFYI(1, "Send error in QuerySecDesc = %d", rc); diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index f5d7b59a3553..8d4657596301 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -3022,7 +3022,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses, pSMB->ByteCount = cpu_to_le16(count); rc = SendReceive(xid, ses, smb_buffer, smb_buffer_response, &length, - CIFS_STD_OP); + 0); /* above now done in SendReceive */ if ((rc == 0) && (tcon != NULL)) { diff --git a/fs/cifs/file.c b/fs/cifs/file.c index cfa2e5ebcafe..bd2a028af833 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -839,29 +839,6 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) return rc; } -/* - * Set the timeout on write requests past EOF. For some servers (Windows) - * these calls can be very long. - * - * If we're writing >10M past the EOF we give a 180s timeout. Anything less - * than that gets a 45s timeout. Writes not past EOF get 15s timeouts. - * The 10M cutoff is totally arbitrary. A better scheme for this would be - * welcome if someone wants to suggest one. - * - * We may be able to do a better job with this if there were some way to - * declare that a file should be sparse. - */ -static int -cifs_write_timeout(struct cifsInodeInfo *cifsi, loff_t offset) -{ - if (offset <= cifsi->server_eof) - return CIFS_STD_OP; - else if (offset > (cifsi->server_eof + (10 * 1024 * 1024))) - return CIFS_VLONG_OP; - else - return CIFS_LONG_OP; -} - /* update the file size (if needed) after a write */ static void cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset, @@ -882,7 +859,7 @@ ssize_t cifs_user_write(struct file *file, const char __user *write_data, unsigned int total_written; struct cifs_sb_info *cifs_sb; struct cifsTconInfo *pTcon; - int xid, long_op; + int xid; struct cifsFileInfo *open_file; struct cifsInodeInfo *cifsi = CIFS_I(inode); @@ -903,7 +880,6 @@ ssize_t cifs_user_write(struct file *file, const char __user *write_data, xid = GetXid(); - long_op = cifs_write_timeout(cifsi, *poffset); for (total_written = 0; write_size > total_written; total_written += bytes_written) { rc = -EAGAIN; @@ -931,7 +907,7 @@ ssize_t cifs_user_write(struct file *file, const char __user *write_data, min_t(const int, cifs_sb->wsize, write_size - total_written), *poffset, &bytes_written, - NULL, write_data + total_written, long_op); + NULL, write_data + total_written, 0); } if (rc || (bytes_written == 0)) { if (total_written) @@ -944,8 +920,6 @@ ssize_t cifs_user_write(struct file *file, const char __user *write_data, cifs_update_eof(cifsi, *poffset, bytes_written); *poffset += bytes_written; } - long_op = CIFS_STD_OP; /* subsequent writes fast - - 15 seconds is plenty */ } cifs_stats_bytes_written(pTcon, total_written); @@ -974,7 +948,7 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file, unsigned int total_written; struct cifs_sb_info *cifs_sb; struct cifsTconInfo *pTcon; - int xid, long_op; + int xid; struct dentry *dentry = open_file->dentry; struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode); @@ -987,7 +961,6 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file, xid = GetXid(); - long_op = cifs_write_timeout(cifsi, *poffset); for (total_written = 0; write_size > total_written; total_written += bytes_written) { rc = -EAGAIN; @@ -1017,7 +990,7 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file, rc = CIFSSMBWrite2(xid, pTcon, open_file->netfid, len, *poffset, &bytes_written, - iov, 1, long_op); + iov, 1, 0); } else rc = CIFSSMBWrite(xid, pTcon, open_file->netfid, @@ -1025,7 +998,7 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file, write_size - total_written), *poffset, &bytes_written, write_data + total_written, - NULL, long_op); + NULL, 0); } if (rc || (bytes_written == 0)) { if (total_written) @@ -1038,8 +1011,6 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file, cifs_update_eof(cifsi, *poffset, bytes_written); *poffset += bytes_written; } - long_op = CIFS_STD_OP; /* subsequent writes fast - - 15 seconds is plenty */ } cifs_stats_bytes_written(pTcon, total_written); @@ -1239,7 +1210,7 @@ static int cifs_writepages(struct address_space *mapping, struct pagevec pvec; int rc = 0; int scanned = 0; - int xid, long_op; + int xid; cifs_sb = CIFS_SB(mapping->host->i_sb); @@ -1384,11 +1355,10 @@ retry_write: cERROR(1, "No writable handles for inode"); rc = -EBADF; } else { - long_op = cifs_write_timeout(cifsi, offset); rc = CIFSSMBWrite2(xid, tcon, open_file->netfid, bytes_to_write, offset, &bytes_written, iov, n_iov, - long_op); + 0); cifsFileInfo_put(open_file); } diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index eb746486e49e..1cffd82c4f13 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c @@ -879,7 +879,7 @@ ssetup_ntlmssp_authenticate: BCC_LE(smb_buf) = cpu_to_le16(count); rc = SendReceive2(xid, ses, iov, 3 /* num_iovecs */, &resp_buf_type, - CIFS_STD_OP /* not long */ | CIFS_LOG_ERROR); + CIFS_LOG_ERROR); /* SMB request buf freed in SendReceive2 */ pSMB = (SESSION_SETUP_ANDX *)iov[0].iov_base; diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index a0cef4960516..fe92c4cb75f5 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -798,7 +798,7 @@ send_lock_cancel(const unsigned int xid, struct cifsTconInfo *tcon, pSMB->hdr.Mid = GetNextMid(ses->server); return SendReceive(xid, ses, in_buf, out_buf, - &bytes_returned, CIFS_STD_OP); + &bytes_returned, 0); } int -- cgit v1.2.2 From 76dcc26f1d7f1c98c3f595379dcd9562f01bf38d Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Tue, 11 Jan 2011 07:24:24 -0500 Subject: cifs: mangle existing header for SMB_COM_NT_CANCEL The NT_CANCEL command looks just like the original command, except for a few small differences. The send_nt_cancel function however currently takes a tcon, which we don't have in SendReceive and SendReceive2. Instead of "respinning" the entire header for an NT_CANCEL, just mangle the existing header by replacing just the fields we need. This means we don't need a tcon and allows us to call it from other places. Reviewed-by: Pavel Shilovsky Reviewed-by: Suresh Jayaraman Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/transport.c | 63 ++++++++++++++++++++++++++++++++--------------------- 1 file changed, 38 insertions(+), 25 deletions(-) (limited to 'fs') diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index fe92c4cb75f5..c8e2808cd5e6 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -464,6 +464,43 @@ sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server) return rc; } +/* + * An NT cancel request header looks just like the original request except: + * + * The Command is SMB_COM_NT_CANCEL + * The WordCount is zeroed out + * The ByteCount is zeroed out + * + * This function mangles an existing request buffer into a + * SMB_COM_NT_CANCEL request and then sends it. + */ +static int +send_nt_cancel(struct TCP_Server_Info *server, struct smb_hdr *in_buf, + struct mid_q_entry *mid) +{ + int rc = 0; + + /* -4 for RFC1001 length and +2 for BCC field */ + in_buf->smb_buf_length = sizeof(struct smb_hdr) - 4 + 2; + in_buf->Command = SMB_COM_NT_CANCEL; + in_buf->WordCount = 0; + BCC_LE(in_buf) = 0; + + mutex_lock(&server->srv_mutex); + rc = cifs_sign_smb(in_buf, server, &mid->sequence_number); + if (rc) { + mutex_unlock(&server->srv_mutex); + return rc; + } + rc = smb_send(server, in_buf, in_buf->smb_buf_length); + mutex_unlock(&server->srv_mutex); + + cFYI(1, "issued NT_CANCEL for mid %u, rc = %d", + in_buf->Mid, rc); + + return rc; +} + int SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, struct kvec *iov, int n_vec, int *pRespBufType /* ret */, @@ -753,29 +790,6 @@ out: return rc; } -/* Send an NT_CANCEL SMB to cause the POSIX blocking lock to return. */ - -static int -send_nt_cancel(struct cifsTconInfo *tcon, struct smb_hdr *in_buf, - struct mid_q_entry *midQ) -{ - int rc = 0; - struct cifsSesInfo *ses = tcon->ses; - __u16 mid = in_buf->Mid; - - header_assemble(in_buf, SMB_COM_NT_CANCEL, tcon, 0); - in_buf->Mid = mid; - mutex_lock(&ses->server->srv_mutex); - rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number); - if (rc) { - mutex_unlock(&ses->server->srv_mutex); - return rc; - } - rc = smb_send(ses->server, in_buf, in_buf->smb_buf_length); - mutex_unlock(&ses->server->srv_mutex); - return rc; -} - /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows blocking lock to return. */ @@ -890,8 +904,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon, if (in_buf->Command == SMB_COM_TRANSACTION2) { /* POSIX lock. We send a NT_CANCEL SMB to cause the blocking lock to return. */ - - rc = send_nt_cancel(tcon, in_buf, midQ); + rc = send_nt_cancel(ses->server, in_buf, midQ); if (rc) { delete_mid(midQ); return rc; -- cgit v1.2.2 From 4f8ba8a0c095933dd54a2c281750c8a85b329b26 Mon Sep 17 00:00:00 2001 From: Pavel Shilovsky Date: Sun, 21 Nov 2010 22:36:12 +0300 Subject: CIFS: Make cifsFileInfo_put work with strict cache mode On strict cache mode when we close the last file handle of the inode we should set invalid_mapping flag on this inode to prevent data coherency problem when we open it again but it has been modified on the server. Reviewed-by: Jeff Layton Signed-off-by: Pavel Shilovsky Signed-off-by: Steve French --- fs/cifs/cifs_fs_sb.h | 1 + fs/cifs/file.c | 8 ++++++++ 2 files changed, 9 insertions(+) (limited to 'fs') diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h index 7852cd677051..ac51cd2d33ae 100644 --- a/fs/cifs/cifs_fs_sb.h +++ b/fs/cifs/cifs_fs_sb.h @@ -40,6 +40,7 @@ #define CIFS_MOUNT_FSCACHE 0x8000 /* local caching enabled */ #define CIFS_MOUNT_MF_SYMLINKS 0x10000 /* Minshall+French Symlinks enabled */ #define CIFS_MOUNT_MULTIUSER 0x20000 /* multiuser mount */ +#define CIFS_MOUNT_STRICT_IO 0x40000 /* strict cache mode */ struct cifs_sb_info { struct rb_root tlink_tree; diff --git a/fs/cifs/file.c b/fs/cifs/file.c index bd2a028af833..1b26c2717599 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -287,6 +287,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file) struct inode *inode = cifs_file->dentry->d_inode; struct cifsTconInfo *tcon = tlink_tcon(cifs_file->tlink); struct cifsInodeInfo *cifsi = CIFS_I(inode); + struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct cifsLockInfo *li, *tmp; spin_lock(&cifs_file_list_lock); @@ -302,6 +303,13 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file) if (list_empty(&cifsi->openFileList)) { cFYI(1, "closing last open instance for inode %p", cifs_file->dentry->d_inode); + + /* in strict cache mode we need invalidate mapping on the last + close because it may cause a error when we open this file + again and get at least level II oplock */ + if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) + CIFS_I(inode)->invalid_mapping = true; + cifs_set_oplock_level(cifsi, 0); } spin_unlock(&cifs_file_list_lock); -- cgit v1.2.2 From 8be7e6ba142423e6ad98fed293c96f196f685229 Mon Sep 17 00:00:00 2001 From: Pavel Shilovsky Date: Sun, 12 Dec 2010 13:11:13 +0300 Subject: CIFS: Implement cifs_strict_fsync Invalidate inode mapping if we don't have at least Level II oplock in cifs_strict_fsync. Also remove filemap_write_and_wait call from cifs_fsync because it is previously called from vfs_fsync_range. Add file operations' structures for strict cache mode. Reviewed-by: Jeff Layton Signed-off-by: Pavel Shilovsky Signed-off-by: Steve French --- fs/cifs/cifsfs.c | 38 ++++++++++++++++++++++++++++++++++++++ fs/cifs/cifsfs.h | 8 ++++++-- fs/cifs/file.c | 36 ++++++++++++++++++++++++++++-------- fs/cifs/inode.c | 8 ++++++-- 4 files changed, 78 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 99d777a03dd0..f6093e401c5a 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -733,6 +733,25 @@ const struct file_operations cifs_file_ops = { .setlease = cifs_setlease, }; +const struct file_operations cifs_file_strict_ops = { + .read = do_sync_read, + .write = do_sync_write, + .aio_read = generic_file_aio_read, + .aio_write = cifs_file_aio_write, + .open = cifs_open, + .release = cifs_close, + .lock = cifs_lock, + .fsync = cifs_strict_fsync, + .flush = cifs_flush, + .mmap = cifs_file_mmap, + .splice_read = generic_file_splice_read, + .llseek = cifs_llseek, +#ifdef CONFIG_CIFS_POSIX + .unlocked_ioctl = cifs_ioctl, +#endif /* CONFIG_CIFS_POSIX */ + .setlease = cifs_setlease, +}; + const struct file_operations cifs_file_direct_ops = { /* no aio, no readv - BB reevaluate whether they can be done with directio, no cache */ @@ -751,6 +770,7 @@ const struct file_operations cifs_file_direct_ops = { .llseek = cifs_llseek, .setlease = cifs_setlease, }; + const struct file_operations cifs_file_nobrl_ops = { .read = do_sync_read, .write = do_sync_write, @@ -769,6 +789,24 @@ const struct file_operations cifs_file_nobrl_ops = { .setlease = cifs_setlease, }; +const struct file_operations cifs_file_strict_nobrl_ops = { + .read = do_sync_read, + .write = do_sync_write, + .aio_read = generic_file_aio_read, + .aio_write = cifs_file_aio_write, + .open = cifs_open, + .release = cifs_close, + .fsync = cifs_strict_fsync, + .flush = cifs_flush, + .mmap = cifs_file_mmap, + .splice_read = generic_file_splice_read, + .llseek = cifs_llseek, +#ifdef CONFIG_CIFS_POSIX + .unlocked_ioctl = cifs_ioctl, +#endif /* CONFIG_CIFS_POSIX */ + .setlease = cifs_setlease, +}; + const struct file_operations cifs_file_direct_nobrl_ops = { /* no mmap, no aio, no readv - BB reevaluate whether they can be done with directio, no cache */ diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index 4739a531cded..10c4303c282d 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -61,6 +61,7 @@ extern int cifs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *); extern int cifs_revalidate_file(struct file *filp); extern int cifs_revalidate_dentry(struct dentry *); +extern void cifs_invalidate_mapping(struct inode *inode); extern int cifs_getattr(struct vfsmount *, struct dentry *, struct kstat *); extern int cifs_setattr(struct dentry *, struct iattr *); @@ -72,8 +73,10 @@ extern const struct inode_operations cifs_dfs_referral_inode_operations; /* Functions related to files and directories */ extern const struct file_operations cifs_file_ops; extern const struct file_operations cifs_file_direct_ops; /* if directio mnt */ -extern const struct file_operations cifs_file_nobrl_ops; -extern const struct file_operations cifs_file_direct_nobrl_ops; /* no brlocks */ +extern const struct file_operations cifs_file_strict_ops; /* if strictio mnt */ +extern const struct file_operations cifs_file_nobrl_ops; /* no brlocks */ +extern const struct file_operations cifs_file_direct_nobrl_ops; +extern const struct file_operations cifs_file_strict_nobrl_ops; extern int cifs_open(struct inode *inode, struct file *file); extern int cifs_close(struct inode *inode, struct file *file); extern int cifs_closedir(struct inode *inode, struct file *file); @@ -83,6 +86,7 @@ extern ssize_t cifs_user_write(struct file *file, const char __user *write_data, size_t write_size, loff_t *poffset); extern int cifs_lock(struct file *, int, struct file_lock *); extern int cifs_fsync(struct file *, int); +extern int cifs_strict_fsync(struct file *, int); extern int cifs_flush(struct file *, fl_owner_t id); extern int cifs_file_mmap(struct file * , struct vm_area_struct *); extern const struct file_operations cifs_dir_ops; diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 1b26c2717599..5790fab7349b 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -1528,27 +1528,47 @@ static int cifs_write_end(struct file *file, struct address_space *mapping, return rc; } -int cifs_fsync(struct file *file, int datasync) +int cifs_strict_fsync(struct file *file, int datasync) { int xid; int rc = 0; struct cifsTconInfo *tcon; struct cifsFileInfo *smbfile = file->private_data; struct inode *inode = file->f_path.dentry->d_inode; + struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); xid = GetXid(); cFYI(1, "Sync file - name: %s datasync: 0x%x", file->f_path.dentry->d_name.name, datasync); - rc = filemap_write_and_wait(inode->i_mapping); - if (rc == 0) { - struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); + if (!CIFS_I(inode)->clientCanCacheRead) + cifs_invalidate_mapping(inode); - tcon = tlink_tcon(smbfile->tlink); - if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) - rc = CIFSSMBFlush(xid, tcon, smbfile->netfid); - } + tcon = tlink_tcon(smbfile->tlink); + if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) + rc = CIFSSMBFlush(xid, tcon, smbfile->netfid); + + FreeXid(xid); + return rc; +} + +int cifs_fsync(struct file *file, int datasync) +{ + int xid; + int rc = 0; + struct cifsTconInfo *tcon; + struct cifsFileInfo *smbfile = file->private_data; + struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); + + xid = GetXid(); + + cFYI(1, "Sync file - name: %s datasync: 0x%x", + file->f_path.dentry->d_name.name, datasync); + + tcon = tlink_tcon(smbfile->tlink); + if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) + rc = CIFSSMBFlush(xid, tcon, smbfile->netfid); FreeXid(xid); return rc; diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 6c9ee8014ff0..8852470b4fbb 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -44,13 +44,17 @@ static void cifs_set_ops(struct inode *inode) inode->i_fop = &cifs_file_direct_nobrl_ops; else inode->i_fop = &cifs_file_direct_ops; + } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) { + if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL) + inode->i_fop = &cifs_file_strict_nobrl_ops; + else + inode->i_fop = &cifs_file_strict_ops; } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL) inode->i_fop = &cifs_file_nobrl_ops; else { /* not direct, send byte range locks */ inode->i_fop = &cifs_file_ops; } - /* check if server can support readpages */ if (cifs_sb_master_tcon(cifs_sb)->ses->server->maxBuf < PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE) @@ -1679,7 +1683,7 @@ cifs_inode_needs_reval(struct inode *inode) /* * Zap the cache. Called when invalid_mapping flag is set. */ -static void +void cifs_invalidate_mapping(struct inode *inode) { int rc; -- cgit v1.2.2 From 7a6a19b17ab9103ec708c18befd28f2a3908d4c1 Mon Sep 17 00:00:00 2001 From: Pavel Shilovsky Date: Tue, 14 Dec 2010 11:29:51 +0300 Subject: CIFS: Implement cifs_file_strict_mmap (try #2) Invalidate inode mapping if we don't have at least Level II oplock. Reviewed-by: Jeff Layton Signed-off-by: Pavel Shilovsky Signed-off-by: Steve French --- fs/cifs/cifsfs.c | 4 ++-- fs/cifs/cifsfs.h | 1 + fs/cifs/file.c | 15 +++++++++++++++ 3 files changed, 18 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index f6093e401c5a..e24d966fb214 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -743,7 +743,7 @@ const struct file_operations cifs_file_strict_ops = { .lock = cifs_lock, .fsync = cifs_strict_fsync, .flush = cifs_flush, - .mmap = cifs_file_mmap, + .mmap = cifs_file_strict_mmap, .splice_read = generic_file_splice_read, .llseek = cifs_llseek, #ifdef CONFIG_CIFS_POSIX @@ -798,7 +798,7 @@ const struct file_operations cifs_file_strict_nobrl_ops = { .release = cifs_close, .fsync = cifs_strict_fsync, .flush = cifs_flush, - .mmap = cifs_file_mmap, + .mmap = cifs_file_strict_mmap, .splice_read = generic_file_splice_read, .llseek = cifs_llseek, #ifdef CONFIG_CIFS_POSIX diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index 10c4303c282d..710072e36912 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -89,6 +89,7 @@ extern int cifs_fsync(struct file *, int); extern int cifs_strict_fsync(struct file *, int); extern int cifs_flush(struct file *, fl_owner_t id); extern int cifs_file_mmap(struct file * , struct vm_area_struct *); +extern int cifs_file_strict_mmap(struct file * , struct vm_area_struct *); extern const struct file_operations cifs_dir_ops; extern int cifs_dir_open(struct inode *inode, struct file *file); extern int cifs_readdir(struct file *file, void *direntry, filldir_t filldir); diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 5790fab7349b..0b32377ef8b7 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -1769,6 +1769,21 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size, return total_read; } +int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma) +{ + int rc, xid; + struct inode *inode = file->f_path.dentry->d_inode; + + xid = GetXid(); + + if (!CIFS_I(inode)->clientCanCacheRead) + cifs_invalidate_mapping(inode); + + rc = generic_file_mmap(file, vma); + FreeXid(xid); + return rc; +} + int cifs_file_mmap(struct file *file, struct vm_area_struct *vma) { int rc, xid; -- cgit v1.2.2 From a70307eeeb25b89f6b2baf3cf3f0cef83c96ba12 Mon Sep 17 00:00:00 2001 From: Pavel Shilovsky Date: Tue, 14 Dec 2010 11:50:41 +0300 Subject: CIFS: Implement cifs_strict_readv (try #4) Read from the cache if we have at least Level II oplock - otherwise read from the server. Add cifs_user_readv to let the client read into iovec buffers. Reviewed-by: Jeff Layton Signed-off-by: Pavel Shilovsky Signed-off-by: Steve French --- fs/cifs/cifsfs.c | 4 +- fs/cifs/cifsfs.h | 4 +- fs/cifs/file.c | 116 +++++++++++++++++++++++++++++++++++++------------------ 3 files changed, 84 insertions(+), 40 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index e24d966fb214..a8323f1dc1c4 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -736,7 +736,7 @@ const struct file_operations cifs_file_ops = { const struct file_operations cifs_file_strict_ops = { .read = do_sync_read, .write = do_sync_write, - .aio_read = generic_file_aio_read, + .aio_read = cifs_strict_readv, .aio_write = cifs_file_aio_write, .open = cifs_open, .release = cifs_close, @@ -792,7 +792,7 @@ const struct file_operations cifs_file_nobrl_ops = { const struct file_operations cifs_file_strict_nobrl_ops = { .read = do_sync_read, .write = do_sync_write, - .aio_read = generic_file_aio_read, + .aio_read = cifs_strict_readv, .aio_write = cifs_file_aio_write, .open = cifs_open, .release = cifs_close, diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index 710072e36912..f23206d46531 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -81,7 +81,9 @@ extern int cifs_open(struct inode *inode, struct file *file); extern int cifs_close(struct inode *inode, struct file *file); extern int cifs_closedir(struct inode *inode, struct file *file); extern ssize_t cifs_user_read(struct file *file, char __user *read_data, - size_t read_size, loff_t *poffset); + size_t read_size, loff_t *poffset); +extern ssize_t cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov, + unsigned long nr_segs, loff_t pos); extern ssize_t cifs_user_write(struct file *file, const char __user *write_data, size_t write_size, loff_t *poffset); extern int cifs_lock(struct file *, int, struct file_lock *); diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 0b32377ef8b7..d7d65a70678e 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -1619,42 +1619,42 @@ int cifs_flush(struct file *file, fl_owner_t id) return rc; } -ssize_t cifs_user_read(struct file *file, char __user *read_data, - size_t read_size, loff_t *poffset) +static ssize_t +cifs_iovec_read(struct file *file, const struct iovec *iov, + unsigned long nr_segs, loff_t *poffset) { - int rc = -EACCES; - unsigned int bytes_read = 0; - unsigned int total_read = 0; - unsigned int current_read_size; + int rc; + int xid; + unsigned int total_read, bytes_read = 0; + size_t len, cur_len; + int iov_offset = 0; struct cifs_sb_info *cifs_sb; struct cifsTconInfo *pTcon; - int xid; struct cifsFileInfo *open_file; - char *smb_read_data; - char __user *current_offset; struct smb_com_read_rsp *pSMBr; + char *read_data; + + if (!nr_segs) + return 0; + + len = iov_length(iov, nr_segs); + if (!len) + return 0; xid = GetXid(); cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); - if (file->private_data == NULL) { - rc = -EBADF; - FreeXid(xid); - return rc; - } open_file = file->private_data; pTcon = tlink_tcon(open_file->tlink); if ((file->f_flags & O_ACCMODE) == O_WRONLY) cFYI(1, "attempting read on write only file instance"); - for (total_read = 0, current_offset = read_data; - read_size > total_read; - total_read += bytes_read, current_offset += bytes_read) { - current_read_size = min_t(const int, read_size - total_read, - cifs_sb->rsize); + for (total_read = 0; total_read < len; total_read += bytes_read) { + cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize); rc = -EAGAIN; - smb_read_data = NULL; + read_data = NULL; + while (rc == -EAGAIN) { int buf_type = CIFS_NO_BUFFER; if (open_file->invalidHandle) { @@ -1662,27 +1662,25 @@ ssize_t cifs_user_read(struct file *file, char __user *read_data, if (rc != 0) break; } - rc = CIFSSMBRead(xid, pTcon, - open_file->netfid, - current_read_size, *poffset, - &bytes_read, &smb_read_data, - &buf_type); - pSMBr = (struct smb_com_read_rsp *)smb_read_data; - if (smb_read_data) { - if (copy_to_user(current_offset, - smb_read_data + - 4 /* RFC1001 length field */ + - le16_to_cpu(pSMBr->DataOffset), - bytes_read)) + rc = CIFSSMBRead(xid, pTcon, open_file->netfid, + cur_len, *poffset, &bytes_read, + &read_data, &buf_type); + pSMBr = (struct smb_com_read_rsp *)read_data; + if (read_data) { + char *data_offset = read_data + 4 + + le16_to_cpu(pSMBr->DataOffset); + if (memcpy_toiovecend(iov, data_offset, + iov_offset, bytes_read)) rc = -EFAULT; - if (buf_type == CIFS_SMALL_BUFFER) - cifs_small_buf_release(smb_read_data); + cifs_small_buf_release(read_data); else if (buf_type == CIFS_LARGE_BUFFER) - cifs_buf_release(smb_read_data); - smb_read_data = NULL; + cifs_buf_release(read_data); + read_data = NULL; + iov_offset += bytes_read; } } + if (rc || (bytes_read == 0)) { if (total_read) { break; @@ -1695,13 +1693,57 @@ ssize_t cifs_user_read(struct file *file, char __user *read_data, *poffset += bytes_read; } } + FreeXid(xid); return total_read; } +ssize_t cifs_user_read(struct file *file, char __user *read_data, + size_t read_size, loff_t *poffset) +{ + struct iovec iov; + iov.iov_base = read_data; + iov.iov_len = read_size; + + return cifs_iovec_read(file, &iov, 1, poffset); +} + +static ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov, + unsigned long nr_segs, loff_t pos) +{ + ssize_t read; + + read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos); + if (read > 0) + iocb->ki_pos = pos; + + return read; +} + +ssize_t cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov, + unsigned long nr_segs, loff_t pos) +{ + struct inode *inode; + + inode = iocb->ki_filp->f_path.dentry->d_inode; + + if (CIFS_I(inode)->clientCanCacheRead) + return generic_file_aio_read(iocb, iov, nr_segs, pos); + + /* + * In strict cache mode we need to read from the server all the time + * if we don't have level II oplock because the server can delay mtime + * change - so we can't make a decision about inode invalidating. + * And we can also fail with pagereading if there are mandatory locks + * on pages affected by this read but not on the region from pos to + * pos+len-1. + */ + + return cifs_user_readv(iocb, iov, nr_segs, pos); +} static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size, - loff_t *poffset) + loff_t *poffset) { int rc = -EACCES; unsigned int bytes_read = 0; -- cgit v1.2.2 From c3dccf48174e50668b7c63544ac8c60c07a45978 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Thu, 20 Jan 2011 13:36:50 -0500 Subject: cifs: TCP_Server_Info diet Remove fields that are completely unused, and rearrange struct according to recommendations by "pahole". Before: /* size: 1112, cachelines: 18, members: 49 */ /* sum members: 1086, holes: 8, sum holes: 26 */ /* bit holes: 1, sum bit holes: 7 bits */ /* last cacheline: 24 bytes */ After: /* size: 1072, cachelines: 17, members: 42 */ /* sum members: 1065, holes: 3, sum holes: 7 */ /* last cacheline: 48 bytes */ ...savings of 40 bytes per struct on x86_64. 21 bytes by field removal, and 19 by reorganizing to eliminate holes. Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/cifsglob.h | 26 +++++++++----------------- fs/cifs/cifssmb.c | 2 -- 2 files changed, 9 insertions(+), 19 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 571132c95231..36f097e1ee74 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -161,6 +161,7 @@ struct TCP_Server_Info { int srv_count; /* reference counter */ /* 15 character server name + 0x20 16th byte indicating type = srv */ char server_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL]; + enum statusEnum tcpStatus; /* what we think the status is */ char *hostname; /* hostname portion of UNC string */ struct socket *ssocket; struct sockaddr_storage dstaddr; @@ -168,25 +169,16 @@ struct TCP_Server_Info { wait_queue_head_t response_q; wait_queue_head_t request_q; /* if more than maxmpx to srvr must block*/ struct list_head pending_mid_q; - void *Server_NlsInfo; /* BB - placeholder for future NLS info */ - unsigned short server_codepage; /* codepage for the server */ - enum protocolEnum protocolType; - char versionMajor; - char versionMinor; - bool svlocal:1; /* local server or remote */ bool noblocksnd; /* use blocking sendmsg */ bool noautotune; /* do not autotune send buf sizes */ bool tcp_nodelay; atomic_t inFlight; /* number of requests on the wire to server */ -#ifdef CONFIG_CIFS_STATS2 - atomic_t inSend; /* requests trying to send */ - atomic_t num_waiters; /* blocked waiting to get in sendrecv */ -#endif - enum statusEnum tcpStatus; /* what we think the status is */ struct mutex srv_mutex; struct task_struct *tsk; char server_GUID[16]; char secMode; + bool session_estab; /* mark when very first sess is established */ + u16 dialect; /* dialect index that server chose */ enum securityEnum secType; unsigned int maxReq; /* Clients should submit no more */ /* than maxReq distinct unanswered SMBs to the server when using */ @@ -199,8 +191,6 @@ struct TCP_Server_Info { unsigned int max_vcs; /* maximum number of smb sessions, at least those that can be specified uniquely with vcnumbers */ - char sessid[4]; /* unique token id for this session */ - /* (returned on Negotiate */ int capabilities; /* allow selective disabling of caps by smb sess */ int timeAdj; /* Adjust for difference in server time zone in sec */ __u16 CurrentMid; /* multiplex id - rotating counter */ @@ -210,18 +200,20 @@ struct TCP_Server_Info { __u32 sequence_number; /* for signing, protected by srv_mutex */ struct session_key session_key; unsigned long lstrp; /* when we got last response from this server */ - u16 dialect; /* dialect index that server chose */ struct cifs_secmech secmech; /* crypto sec mech functs, descriptors */ /* extended security flavors that server supports */ + bool sec_ntlmssp; /* supports NTLMSSP */ + bool sec_kerberosu2u; /* supports U2U Kerberos */ bool sec_kerberos; /* supports plain Kerberos */ bool sec_mskerberos; /* supports legacy MS Kerberos */ - bool sec_kerberosu2u; /* supports U2U Kerberos */ - bool sec_ntlmssp; /* supports NTLMSSP */ - bool session_estab; /* mark when very first sess is established */ struct delayed_work echo; /* echo ping workqueue job */ #ifdef CONFIG_CIFS_FSCACHE struct fscache_cookie *fscache; /* client index cache cookie */ #endif +#ifdef CONFIG_CIFS_STATS2 + atomic_t inSend; /* requests trying to send */ + atomic_t num_waiters; /* blocked waiting to get in sendrecv */ +#endif }; /* diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 37113450757b..5b1f6637f161 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -452,7 +452,6 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses) server->maxBuf = min((__u32)le16_to_cpu(rsp->MaxBufSize), (__u32)CIFSMaxBufSize + MAX_CIFS_HDR_SIZE); server->max_vcs = le16_to_cpu(rsp->MaxNumberVcs); - GETU32(server->sessid) = le32_to_cpu(rsp->SessionKey); /* even though we do not use raw we might as well set this accurately, in case we ever find a need for it */ if ((le16_to_cpu(rsp->RawMode) & RAW_ENABLE) == RAW_ENABLE) { @@ -566,7 +565,6 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses) (__u32) CIFSMaxBufSize + MAX_CIFS_HDR_SIZE); server->max_rw = le32_to_cpu(pSMBr->MaxRawSize); cFYI(DBG2, "Max buf = %d", ses->server->maxBuf); - GETU32(ses->server->sessid) = le32_to_cpu(pSMBr->SessionKey); server->capabilities = le32_to_cpu(pSMBr->Capabilities); server->timeAdj = (int)(__s16)le16_to_cpu(pSMBr->ServerTimeZone); server->timeAdj *= 60; -- cgit v1.2.2 From aae62fdb6b9a6605abdea7370c4a0e005e6c1cd7 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Thu, 20 Jan 2011 13:36:50 -0500 Subject: cifs: move time field in cifsInodeInfo ...and remove length qualifiers from bools. Before: /* size: 1176, cachelines: 19, members: 13 */ /* sum members: 1165, holes: 2, sum holes: 11 */ /* bit holes: 1, sum bit holes: 4 bits */ /* last cacheline: 24 bytes */ After: /* size: 1168, cachelines: 19, members: 13 */ /* last cacheline: 16 bytes */ ...savings of 8 bytes per inode. Signed-off-by: Jeff Layton Reviewed-by: Pavel Shilovsky Signed-off-by: Steve French --- fs/cifs/cifsglob.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 36f097e1ee74..5bfb75346cb0 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -439,11 +439,11 @@ struct cifsInodeInfo { /* BB add in lists for dirty pages i.e. write caching info for oplock */ struct list_head openFileList; __u32 cifsAttrs; /* e.g. DOS archive bit, sparse, compressed, system */ - unsigned long time; /* jiffies of last update/check of inode */ - bool clientCanCacheRead:1; /* read oplock */ - bool clientCanCacheAll:1; /* read and writebehind oplock */ - bool delete_pending:1; /* DELETE_ON_CLOSE is set */ - bool invalid_mapping:1; /* pagecache is invalid */ + bool clientCanCacheRead; /* read oplock */ + bool clientCanCacheAll; /* read and writebehind oplock */ + bool delete_pending; /* DELETE_ON_CLOSE is set */ + bool invalid_mapping; /* pagecache is invalid */ + unsigned long time; /* jiffies of last update of inode */ u64 server_eof; /* current file size on server */ u64 uniqueid; /* server inode number */ u64 createtime; /* creation time on server */ -- cgit v1.2.2 From 690c522fa5a62825af880775e3ef1e55755667b2 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Thu, 20 Jan 2011 13:36:51 -0500 Subject: cifs: use get/put_unaligned functions to access ByteCount It's possible that when we access the ByteCount that the alignment will be off. Most CPUs deal with that transparently, but there's usually some performance impact. Some CPUs raise an exception on unaligned accesses. Fix this by accessing the byte count using the get_unaligned and put_unaligned inlined functions. While we're at it, fix the types of some of the variables that end up getting returns from these functions. Acked-by: Pavel Shilovsky Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/cifspdu.h | 47 +++++++++++++++++++++++++++++++++++++++++++---- fs/cifs/cifssmb.c | 14 +++++--------- fs/cifs/connect.c | 10 +++++----- fs/cifs/netmisc.c | 4 ++-- fs/cifs/sess.c | 13 ++++++------- fs/cifs/transport.c | 9 ++++----- 6 files changed, 65 insertions(+), 32 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h index ea205b4fcad2..b5c8cc5d7a7f 100644 --- a/fs/cifs/cifspdu.h +++ b/fs/cifs/cifspdu.h @@ -23,6 +23,7 @@ #define _CIFSPDU_H #include +#include #include "smbfsctl.h" #ifdef CONFIG_CIFS_WEAK_PW_HASH @@ -426,11 +427,49 @@ struct smb_hdr { __u16 Mid; __u8 WordCount; } __attribute__((packed)); -/* given a pointer to an smb_hdr retrieve the value of byte count */ -#define BCC(smb_var) (*(__u16 *)((char *)(smb_var) + sizeof(struct smb_hdr) + (2 * (smb_var)->WordCount))) -#define BCC_LE(smb_var) (*(__le16 *)((char *)(smb_var) + sizeof(struct smb_hdr) + (2 * (smb_var)->WordCount))) + +/* given a pointer to an smb_hdr retrieve a char pointer to the byte count */ +#define BCC(smb_var) ((unsigned char *)(smb_var) + sizeof(struct smb_hdr) + \ + (2 * (smb_var)->WordCount)) + /* given a pointer to an smb_hdr retrieve the pointer to the byte area */ -#define pByteArea(smb_var) ((unsigned char *)(smb_var) + sizeof(struct smb_hdr) + (2 * (smb_var)->WordCount) + 2) +#define pByteArea(smb_var) (BCC(smb_var) + 2) + +/* get the converted ByteCount for a SMB packet and return it */ +static inline __u16 +get_bcc(struct smb_hdr *hdr) +{ + __u16 *bc_ptr = (__u16 *)BCC(hdr); + + return get_unaligned(bc_ptr); +} + +/* get the unconverted ByteCount for a SMB packet and return it */ +static inline __u16 +get_bcc_le(struct smb_hdr *hdr) +{ + __le16 *bc_ptr = (__le16 *)BCC(hdr); + + return get_unaligned_le16(bc_ptr); +} + +/* set the ByteCount for a SMB packet in host-byte order */ +static inline void +put_bcc(__u16 count, struct smb_hdr *hdr) +{ + __u16 *bc_ptr = (__u16 *)BCC(hdr); + + put_unaligned(count, bc_ptr); +} + +/* set the ByteCount for a SMB packet in little-endian */ +static inline void +put_bcc_le(__u16 count, struct smb_hdr *hdr) +{ + __le16 *bc_ptr = (__le16 *)BCC(hdr); + + put_unaligned_le16(count, bc_ptr); +} /* * Computer Name Length (since Netbios name was length 16 with last byte 0x20) diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 5b1f6637f161..39cec0d9cd1b 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -333,7 +333,6 @@ static int validate_t2(struct smb_t2_rsp *pSMB) { int rc = -EINVAL; int total_size; - char *pBCC; /* check for plausible wct, bcc and t2 data and parm sizes */ /* check for parm and data offset going beyond end of smb */ @@ -346,13 +345,9 @@ static int validate_t2(struct smb_t2_rsp *pSMB) if (total_size < 512) { total_size += le16_to_cpu(pSMB->t2_rsp.DataCount); - /* BCC le converted in SendReceive */ - pBCC = (pSMB->hdr.WordCount * 2) + - sizeof(struct smb_hdr) + - (char *)pSMB; - if ((total_size <= (*(u16 *)pBCC)) && - (total_size < - CIFSMaxBufSize+MAX_CIFS_HDR_SIZE)) { + if (total_size <= get_bcc(&pSMB->hdr) && + total_size < + CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) { return 0; } } @@ -362,6 +357,7 @@ static int validate_t2(struct smb_t2_rsp *pSMB) sizeof(struct smb_t2_rsp) + 16); return rc; } + int CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses) { @@ -5609,7 +5605,7 @@ QAllEAsRetry: } /* make sure list_len doesn't go past end of SMB */ - end_of_smb = (char *)pByteArea(&pSMBr->hdr) + BCC(&pSMBr->hdr); + end_of_smb = (char *)pByteArea(&pSMBr->hdr) + get_bcc(&pSMBr->hdr); if ((char *)ea_response_data + list_len > end_of_smb) { cFYI(1, "EA list appears to go beyond SMB"); rc = -EIO; diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 8d4657596301..ca20e813275d 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -318,9 +318,9 @@ static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB) memcpy(data_area_of_target, data_area_of_buf2, total_in_buf2); total_in_buf += total_in_buf2; pSMBt->t2_rsp.DataCount = cpu_to_le16(total_in_buf); - byte_count = le16_to_cpu(BCC_LE(pTargetSMB)); + byte_count = get_bcc_le(pTargetSMB); byte_count += total_in_buf2; - BCC_LE(pTargetSMB) = cpu_to_le16(byte_count); + put_bcc_le(byte_count, pTargetSMB); byte_count = pTargetSMB->smb_buf_length; byte_count += total_in_buf2; @@ -2937,8 +2937,8 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses, TCONX_RSP *pSMBr; unsigned char *bcc_ptr; int rc = 0; - int length, bytes_left; - __u16 count; + int length; + __u16 bytes_left, count; if (ses == NULL) return -EIO; @@ -3032,7 +3032,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses, tcon->need_reconnect = false; tcon->tid = smb_buffer_response->Tid; bcc_ptr = pByteArea(smb_buffer_response); - bytes_left = BCC(smb_buffer_response); + bytes_left = get_bcc(smb_buffer_response); length = strnlen(bcc_ptr, bytes_left - 2); if (smb_buffer->Flags2 & SMBFLG2_UNICODE) is_unicode = true; diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c index 6783ce6cdc89..8d9189f64477 100644 --- a/fs/cifs/netmisc.c +++ b/fs/cifs/netmisc.c @@ -916,14 +916,14 @@ unsigned int smbCalcSize(struct smb_hdr *ptr) { return (sizeof(struct smb_hdr) + (2 * ptr->WordCount) + - 2 /* size of the bcc field */ + BCC(ptr)); + 2 /* size of the bcc field */ + get_bcc(ptr)); } unsigned int smbCalcSize_LE(struct smb_hdr *ptr) { return (sizeof(struct smb_hdr) + (2 * ptr->WordCount) + - 2 /* size of the bcc field */ + le16_to_cpu(BCC_LE(ptr))); + 2 /* size of the bcc field */ + get_bcc_le(ptr)); } /* The following are taken from fs/ntfs/util.c */ diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index 1cffd82c4f13..1adc9625a344 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c @@ -277,7 +277,7 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses, } static void -decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifsSesInfo *ses, +decode_unicode_ssetup(char **pbcc_area, __u16 bleft, struct cifsSesInfo *ses, const struct nls_table *nls_cp) { int len; @@ -323,7 +323,7 @@ decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifsSesInfo *ses, return; } -static int decode_ascii_ssetup(char **pbcc_area, int bleft, +static int decode_ascii_ssetup(char **pbcc_area, __u16 bleft, struct cifsSesInfo *ses, const struct nls_table *nls_cp) { @@ -575,12 +575,11 @@ CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, char *str_area; SESSION_SETUP_ANDX *pSMB; __u32 capabilities; - int count; + __u16 count; int resp_buf_type; struct kvec iov[3]; enum securityEnum type; - __u16 action; - int bytes_remaining; + __u16 action, bytes_remaining; struct key *spnego_key = NULL; __le32 phase = NtLmNegotiate; /* NTLMSSP, if needed, is multistage */ u16 blob_len; @@ -876,7 +875,7 @@ ssetup_ntlmssp_authenticate: count = iov[1].iov_len + iov[2].iov_len; smb_buf->smb_buf_length += count; - BCC_LE(smb_buf) = cpu_to_le16(count); + put_bcc_le(count, smb_buf); rc = SendReceive2(xid, ses, iov, 3 /* num_iovecs */, &resp_buf_type, CIFS_LOG_ERROR); @@ -910,7 +909,7 @@ ssetup_ntlmssp_authenticate: cFYI(1, "UID = %d ", ses->Suid); /* response can have either 3 or 4 word count - Samba sends 3 */ /* and lanman response is 3 */ - bytes_remaining = BCC(smb_buf); + bytes_remaining = get_bcc(smb_buf); bcc_ptr = pByteArea(smb_buf); if (smb_buf->WordCount == 4) { diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index c8e2808cd5e6..c1ccca1a933f 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -484,7 +484,7 @@ send_nt_cancel(struct TCP_Server_Info *server, struct smb_hdr *in_buf, in_buf->smb_buf_length = sizeof(struct smb_hdr) - 4 + 2; in_buf->Command = SMB_COM_NT_CANCEL; in_buf->WordCount = 0; - BCC_LE(in_buf) = 0; + put_bcc_le(0, in_buf); mutex_lock(&server->srv_mutex); rc = cifs_sign_smb(in_buf, server, &mid->sequence_number); @@ -632,8 +632,7 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, if (receive_len >= sizeof(struct smb_hdr) - 4 /* do not count RFC1001 header */ + (2 * midQ->resp_buf->WordCount) + 2 /* bcc */ ) - BCC(midQ->resp_buf) = - le16_to_cpu(BCC_LE(midQ->resp_buf)); + put_bcc(get_bcc_le(midQ->resp_buf), midQ->resp_buf); if ((flags & CIFS_NO_RESP) == 0) midQ->resp_buf = NULL; /* mark it so buf will not be freed by @@ -776,7 +775,7 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses, if (receive_len >= sizeof(struct smb_hdr) - 4 /* do not count RFC1001 header */ + (2 * out_buf->WordCount) + 2 /* bcc */ ) - BCC(out_buf) = le16_to_cpu(BCC_LE(out_buf)); + put_bcc(get_bcc_le(midQ->resp_buf), midQ->resp_buf); } else { rc = -EIO; cERROR(1, "Bad MID state?"); @@ -977,7 +976,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon, if (receive_len >= sizeof(struct smb_hdr) - 4 /* do not count RFC1001 header */ + (2 * out_buf->WordCount) + 2 /* bcc */ ) - BCC(out_buf) = le16_to_cpu(BCC_LE(out_buf)); + put_bcc(get_bcc_le(out_buf), out_buf); out: delete_mid(midQ); -- cgit v1.2.2 From 12df83c9b901cfe8ca7a66fbe0effc6d873cbbb9 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Thu, 20 Jan 2011 13:36:51 -0500 Subject: cifs: clean up unaligned accesses in validate_t2 ...and clean up function to reduce indentation. Signed-off-by: Jeff Layton Acked-by: Pavel Shilovsky Reviewed-by: Shirish Pargaonkar Signed-off-by: Steve French --- fs/cifs/cifssmb.c | 44 +++++++++++++++++++++++--------------------- 1 file changed, 23 insertions(+), 21 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 39cec0d9cd1b..675041a6949c 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -331,31 +331,33 @@ smb_init_no_reconnect(int smb_command, int wct, struct cifsTconInfo *tcon, static int validate_t2(struct smb_t2_rsp *pSMB) { - int rc = -EINVAL; - int total_size; + unsigned int total_size; + + /* check for plausible wct */ + if (pSMB->hdr.WordCount < 10) + goto vt2_err; - /* check for plausible wct, bcc and t2 data and parm sizes */ /* check for parm and data offset going beyond end of smb */ - if (pSMB->hdr.WordCount >= 10) { - if ((le16_to_cpu(pSMB->t2_rsp.ParameterOffset) <= 1024) && - (le16_to_cpu(pSMB->t2_rsp.DataOffset) <= 1024)) { - /* check that bcc is at least as big as parms + data */ - /* check that bcc is less than negotiated smb buffer */ - total_size = le16_to_cpu(pSMB->t2_rsp.ParameterCount); - if (total_size < 512) { - total_size += - le16_to_cpu(pSMB->t2_rsp.DataCount); - if (total_size <= get_bcc(&pSMB->hdr) && - total_size < - CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) { - return 0; - } - } - } - } + if (get_unaligned_le16(&pSMB->t2_rsp.ParameterOffset) > 1024 || + get_unaligned_le16(&pSMB->t2_rsp.DataOffset) > 1024) + goto vt2_err; + + /* check that bcc is at least as big as parms + data */ + /* check that bcc is less than negotiated smb buffer */ + total_size = get_unaligned_le16(&pSMB->t2_rsp.ParameterCount); + if (total_size >= 512) + goto vt2_err; + + total_size += get_unaligned_le16(&pSMB->t2_rsp.DataCount); + if (total_size > get_bcc(&pSMB->hdr) || + total_size >= CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) + goto vt2_err; + + return 0; +vt2_err: cifs_dump_mem("Invalid transact2 SMB: ", (char *)pSMB, sizeof(struct smb_t2_rsp) + 16); - return rc; + return -EINVAL; } int -- cgit v1.2.2 From 26ec254869c0158ea8db6de83b7644e2d93cac2a Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Thu, 20 Jan 2011 13:36:51 -0500 Subject: cifs: fix unaligned access in check2ndT2 and coalesce_t2 Signed-off-by: Jeff Layton Acked-by: Pavel Shilovsky Reviewed-by: Shirish Pargaonkar Signed-off-by: Steve French --- fs/cifs/connect.c | 33 ++++++++++++++------------------- 1 file changed, 14 insertions(+), 19 deletions(-) (limited to 'fs') diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index ca20e813275d..18d3c7724d6e 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -232,9 +232,8 @@ cifs_reconnect(struct TCP_Server_Info *server) static int check2ndT2(struct smb_hdr *pSMB, unsigned int maxBufSize) { struct smb_t2_rsp *pSMBt; - int total_data_size; - int data_in_this_rsp; int remaining; + __u16 total_data_size, data_in_this_rsp; if (pSMB->Command != SMB_COM_TRANSACTION2) return 0; @@ -248,8 +247,8 @@ static int check2ndT2(struct smb_hdr *pSMB, unsigned int maxBufSize) pSMBt = (struct smb_t2_rsp *)pSMB; - total_data_size = le16_to_cpu(pSMBt->t2_rsp.TotalDataCount); - data_in_this_rsp = le16_to_cpu(pSMBt->t2_rsp.DataCount); + total_data_size = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount); + data_in_this_rsp = get_unaligned_le16(&pSMBt->t2_rsp.DataCount); remaining = total_data_size - data_in_this_rsp; @@ -275,21 +274,18 @@ static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB) { struct smb_t2_rsp *pSMB2 = (struct smb_t2_rsp *)psecond; struct smb_t2_rsp *pSMBt = (struct smb_t2_rsp *)pTargetSMB; - int total_data_size; - int total_in_buf; - int remaining; - int total_in_buf2; char *data_area_of_target; char *data_area_of_buf2; - __u16 byte_count; + int remaining; + __u16 byte_count, total_data_size, total_in_buf, total_in_buf2; - total_data_size = le16_to_cpu(pSMBt->t2_rsp.TotalDataCount); + total_data_size = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount); - if (total_data_size != le16_to_cpu(pSMB2->t2_rsp.TotalDataCount)) { + if (total_data_size != + get_unaligned_le16(&pSMB2->t2_rsp.TotalDataCount)) cFYI(1, "total data size of primary and secondary t2 differ"); - } - total_in_buf = le16_to_cpu(pSMBt->t2_rsp.DataCount); + total_in_buf = get_unaligned_le16(&pSMBt->t2_rsp.DataCount); remaining = total_data_size - total_in_buf; @@ -299,25 +295,25 @@ static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB) if (remaining == 0) /* nothing to do, ignore */ return 0; - total_in_buf2 = le16_to_cpu(pSMB2->t2_rsp.DataCount); + total_in_buf2 = get_unaligned_le16(&pSMB2->t2_rsp.DataCount); if (remaining < total_in_buf2) { cFYI(1, "transact2 2nd response contains too much data"); } /* find end of first SMB data area */ data_area_of_target = (char *)&pSMBt->hdr.Protocol + - le16_to_cpu(pSMBt->t2_rsp.DataOffset); + get_unaligned_le16(&pSMBt->t2_rsp.DataOffset); /* validate target area */ - data_area_of_buf2 = (char *) &pSMB2->hdr.Protocol + - le16_to_cpu(pSMB2->t2_rsp.DataOffset); + data_area_of_buf2 = (char *)&pSMB2->hdr.Protocol + + get_unaligned_le16(&pSMB2->t2_rsp.DataOffset); data_area_of_target += total_in_buf; /* copy second buffer into end of first buffer */ memcpy(data_area_of_target, data_area_of_buf2, total_in_buf2); total_in_buf += total_in_buf2; - pSMBt->t2_rsp.DataCount = cpu_to_le16(total_in_buf); + put_unaligned_le16(total_in_buf, &pSMBt->t2_rsp.DataCount); byte_count = get_bcc_le(pTargetSMB); byte_count += total_in_buf2; put_bcc_le(byte_count, pTargetSMB); @@ -334,7 +330,6 @@ static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB) return 0; /* we are done */ } else /* more responses to go */ return 1; - } static void -- cgit v1.2.2 From ba2dbf30df210b519bdd8d34ac2ecaaeeef34c44 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Thu, 20 Jan 2011 13:36:51 -0500 Subject: cifs: clean up unaligned accesses in cifs_unicode.c Make sure we use get/put_unaligned routines when accessing wide character strings. Signed-off-by: Jeff Layton Acked-by: Pavel Shilovsky Reviewed-by: Shirish Pargaonkar Signed-off-by: Steve French --- fs/cifs/cifs_unicode.c | 51 +++++++++++++++++++++++++++----------------------- 1 file changed, 28 insertions(+), 23 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c index 430f510a1720..5f6e71857be6 100644 --- a/fs/cifs/cifs_unicode.c +++ b/fs/cifs/cifs_unicode.c @@ -44,10 +44,14 @@ cifs_ucs2_bytes(const __le16 *from, int maxbytes, int charlen, outlen = 0; int maxwords = maxbytes / 2; char tmp[NLS_MAX_CHARSET_SIZE]; + __u16 ftmp; - for (i = 0; i < maxwords && from[i]; i++) { - charlen = codepage->uni2char(le16_to_cpu(from[i]), tmp, - NLS_MAX_CHARSET_SIZE); + for (i = 0; i < maxwords; i++) { + ftmp = get_unaligned_le16(&from[i]); + if (ftmp == 0) + break; + + charlen = codepage->uni2char(ftmp, tmp, NLS_MAX_CHARSET_SIZE); if (charlen > 0) outlen += charlen; else @@ -58,9 +62,9 @@ cifs_ucs2_bytes(const __le16 *from, int maxbytes, } /* - * cifs_mapchar - convert a little-endian char to proper char in codepage + * cifs_mapchar - convert a host-endian char to proper char in codepage * @target - where converted character should be copied - * @src_char - 2 byte little-endian source character + * @src_char - 2 byte host-endian source character * @cp - codepage to which character should be converted * @mapchar - should character be mapped according to mapchars mount option? * @@ -69,7 +73,7 @@ cifs_ucs2_bytes(const __le16 *from, int maxbytes, * enough to hold the result of the conversion (at least NLS_MAX_CHARSET_SIZE). */ static int -cifs_mapchar(char *target, const __le16 src_char, const struct nls_table *cp, +cifs_mapchar(char *target, const __u16 src_char, const struct nls_table *cp, bool mapchar) { int len = 1; @@ -82,7 +86,7 @@ cifs_mapchar(char *target, const __le16 src_char, const struct nls_table *cp, * build_path_from_dentry are modified, as they use slash as * separator. */ - switch (le16_to_cpu(src_char)) { + switch (src_char) { case UNI_COLON: *target = ':'; break; @@ -109,8 +113,7 @@ out: return len; cp_convert: - len = cp->uni2char(le16_to_cpu(src_char), target, - NLS_MAX_CHARSET_SIZE); + len = cp->uni2char(src_char, target, NLS_MAX_CHARSET_SIZE); if (len <= 0) { *target = '?'; len = 1; @@ -149,6 +152,7 @@ cifs_from_ucs2(char *to, const __le16 *from, int tolen, int fromlen, int nullsize = nls_nullsize(codepage); int fromwords = fromlen / 2; char tmp[NLS_MAX_CHARSET_SIZE]; + __u16 ftmp; /* * because the chars can be of varying widths, we need to take care @@ -158,19 +162,23 @@ cifs_from_ucs2(char *to, const __le16 *from, int tolen, int fromlen, */ safelen = tolen - (NLS_MAX_CHARSET_SIZE + nullsize); - for (i = 0; i < fromwords && from[i]; i++) { + for (i = 0; i < fromwords; i++) { + ftmp = get_unaligned_le16(&from[i]); + if (ftmp == 0) + break; + /* * check to see if converting this character might make the * conversion bleed into the null terminator */ if (outlen >= safelen) { - charlen = cifs_mapchar(tmp, from[i], codepage, mapchar); + charlen = cifs_mapchar(tmp, ftmp, codepage, mapchar); if ((outlen + charlen) > (tolen - nullsize)) break; } /* put converted char into 'to' buffer */ - charlen = cifs_mapchar(&to[outlen], from[i], codepage, mapchar); + charlen = cifs_mapchar(&to[outlen], ftmp, codepage, mapchar); outlen += charlen; } @@ -193,24 +201,21 @@ cifs_strtoUCS(__le16 *to, const char *from, int len, { int charlen; int i; - wchar_t *wchar_to = (wchar_t *)to; /* needed to quiet sparse */ + wchar_t wchar_to; /* needed to quiet sparse */ for (i = 0; len && *from; i++, from += charlen, len -= charlen) { - - /* works for 2.4.0 kernel or later */ - charlen = codepage->char2uni(from, len, &wchar_to[i]); + charlen = codepage->char2uni(from, len, &wchar_to); if (charlen < 1) { - cERROR(1, "strtoUCS: char2uni of %d returned %d", - (int)*from, charlen); + cERROR(1, "strtoUCS: char2uni of 0x%x returned %d", + *from, charlen); /* A question mark */ - to[i] = cpu_to_le16(0x003f); + wchar_to = 0x003f; charlen = 1; - } else - to[i] = cpu_to_le16(wchar_to[i]); - + } + put_unaligned_le16(wchar_to, &to[i]); } - to[i] = 0; + put_unaligned_le16(0, &to[i]); return i; } -- cgit v1.2.2 From 84cdf74e8096a10dd6acbb870dd404b92f07a756 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Thu, 20 Jan 2011 13:36:51 -0500 Subject: cifs: fix unaligned accesses in cifsConvertToUCS Move cifsConvertToUCS to cifs_unicode.c where all of the other unicode related functions live. Have it store mapped characters in 'temp' and then use put_unaligned_le16 to copy it to the target buffer. Also fix the comments to match kernel coding style. Signed-off-by: Jeff Layton Acked-by: Pavel Shilovsky Reviewed-by: Shirish Pargaonkar Signed-off-by: Steve French --- fs/cifs/cifs_unicode.c | 76 ++++++++++++++++++++++++++++++++++++++++++++++++++ fs/cifs/misc.c | 71 ---------------------------------------------- 2 files changed, 76 insertions(+), 71 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c index 5f6e71857be6..fc0fd4fde306 100644 --- a/fs/cifs/cifs_unicode.c +++ b/fs/cifs/cifs_unicode.c @@ -257,3 +257,79 @@ cifs_strndup_from_ucs(const char *src, const int maxlen, const bool is_unicode, return dst; } +/* + * Convert 16 bit Unicode pathname to wire format from string in current code + * page. Conversion may involve remapping up the six characters that are + * only legal in POSIX-like OS (if they are present in the string). Path + * names are little endian 16 bit Unicode on the wire + */ +int +cifsConvertToUCS(__le16 *target, const char *source, int maxlen, + const struct nls_table *cp, int mapChars) +{ + int i, j, charlen; + int len_remaining = maxlen; + char src_char; + __u16 temp; + + if (!mapChars) + return cifs_strtoUCS(target, source, PATH_MAX, cp); + + for (i = 0, j = 0; i < maxlen; j++) { + src_char = source[i]; + switch (src_char) { + case 0: + put_unaligned_le16(0, &target[j]); + goto ctoUCS_out; + case ':': + temp = UNI_COLON; + break; + case '*': + temp = UNI_ASTERIK; + break; + case '?': + temp = UNI_QUESTION; + break; + case '<': + temp = UNI_LESSTHAN; + break; + case '>': + temp = UNI_GRTRTHAN; + break; + case '|': + temp = UNI_PIPE; + break; + /* + * FIXME: We can not handle remapping backslash (UNI_SLASH) + * until all the calls to build_path_from_dentry are modified, + * as they use backslash as separator. + */ + default: + charlen = cp->char2uni(source+i, len_remaining, + &temp); + /* + * if no match, use question mark, which at least in + * some cases serves as wild card + */ + if (charlen < 1) { + temp = 0x003f; + charlen = 1; + } + len_remaining -= charlen; + /* + * character may take more than one byte in the source + * string, but will take exactly two bytes in the + * target string + */ + i += charlen; + continue; + } + put_unaligned_le16(temp, &target[j]); + i++; /* move to next char in source string */ + len_remaining--; + } + +ctoUCS_out: + return i; +} + diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index 09bfcf08a90f..a09e077ba925 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c @@ -637,77 +637,6 @@ dump_smb(struct smb_hdr *smb_buf, int smb_buf_length) return; } -/* Convert 16 bit Unicode pathname to wire format from string in current code - page. Conversion may involve remapping up the seven characters that are - only legal in POSIX-like OS (if they are present in the string). Path - names are little endian 16 bit Unicode on the wire */ -int -cifsConvertToUCS(__le16 *target, const char *source, int maxlen, - const struct nls_table *cp, int mapChars) -{ - int i, j, charlen; - int len_remaining = maxlen; - char src_char; - __u16 temp; - - if (!mapChars) - return cifs_strtoUCS(target, source, PATH_MAX, cp); - - for (i = 0, j = 0; i < maxlen; j++) { - src_char = source[i]; - switch (src_char) { - case 0: - target[j] = 0; - goto ctoUCS_out; - case ':': - target[j] = cpu_to_le16(UNI_COLON); - break; - case '*': - target[j] = cpu_to_le16(UNI_ASTERIK); - break; - case '?': - target[j] = cpu_to_le16(UNI_QUESTION); - break; - case '<': - target[j] = cpu_to_le16(UNI_LESSTHAN); - break; - case '>': - target[j] = cpu_to_le16(UNI_GRTRTHAN); - break; - case '|': - target[j] = cpu_to_le16(UNI_PIPE); - break; - /* BB We can not handle remapping slash until - all the calls to build_path_from_dentry - are modified, as they use slash as separator BB */ - /* case '\\': - target[j] = cpu_to_le16(UNI_SLASH); - break;*/ - default: - charlen = cp->char2uni(source+i, - len_remaining, &temp); - /* if no match, use question mark, which - at least in some cases servers as wild card */ - if (charlen < 1) { - target[j] = cpu_to_le16(0x003f); - charlen = 1; - } else - target[j] = cpu_to_le16(temp); - len_remaining -= charlen; - /* character may take more than one byte in the - the source string, but will take exactly two - bytes in the target string */ - i += charlen; - continue; - } - i++; /* move to next char in source string */ - len_remaining--; - } - -ctoUCS_out: - return i; -} - void cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb) { -- cgit v1.2.2 From 28e58ee8ce1f0e69c207f747b7b9054b071e328d Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Thu, 20 Jan 2011 16:21:59 -0800 Subject: Fix broken "pipe: use event aware wakeups" optimization Commit e462c448fdc8 ("pipe: use event aware wakeups") optimized the pipe event wakeup calls to avoid wakeups if the events do not match the requested set. However, the optimization was buggy, in that it didn't actually use the correct sets for the events: when we make room for more data to be written, the pipe poll() routine will return both the POLLOUT _and_ POLLWRNORM bits. Similarly for read. And most critically, when a pipe is released, that will potentially result in POLLHUP|POLLERR (depending on whether it was the last reader or writer), not just the regular POLLIN|POLLOUT. This bug showed itself as a hung gnome-screensaver-dialog process, stuck forever (or at least until it was poked by a signal or by being traced) in a poll() system call. Cc: Davide Libenzi Cc: David S. Miller Cc: Eric Dumazet Cc: Jens Axboe Cc: Andrew Morton Signed-off-by: Linus Torvalds --- fs/pipe.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/pipe.c b/fs/pipe.c index 89e9e19b1b2e..da42f7db50de 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -441,7 +441,7 @@ redo: break; } if (do_wakeup) { - wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT); + wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM); kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); } pipe_wait(pipe); @@ -450,7 +450,7 @@ redo: /* Signal writers asynchronously that there is more room. */ if (do_wakeup) { - wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT); + wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM); kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); } if (ret > 0) @@ -612,7 +612,7 @@ redo2: break; } if (do_wakeup) { - wake_up_interruptible_sync_poll(&pipe->wait, POLLIN); + wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM); kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); do_wakeup = 0; } @@ -623,7 +623,7 @@ redo2: out: mutex_unlock(&inode->i_mutex); if (do_wakeup) { - wake_up_interruptible_sync_poll(&pipe->wait, POLLIN); + wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM); kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); } if (ret > 0) @@ -715,7 +715,7 @@ pipe_release(struct inode *inode, int decr, int decw) if (!pipe->readers && !pipe->writers) { free_pipe_info(inode); } else { - wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT); + wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP); kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); } -- cgit v1.2.2 From 6a108a14fa356ef607be308b68337939e56ea94e Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Thu, 20 Jan 2011 14:44:16 -0800 Subject: kconfig: rename CONFIG_EMBEDDED to CONFIG_EXPERT The meaning of CONFIG_EMBEDDED has long since been obsoleted; the option is used to configure any non-standard kernel with a much larger scope than only small devices. This patch renames the option to CONFIG_EXPERT in init/Kconfig and fixes references to the option throughout the kernel. A new CONFIG_EMBEDDED option is added that automatically selects CONFIG_EXPERT when enabled and can be used in the future to isolate options that should only be considered for embedded systems (RISC architectures, SLOB, etc). Calling the option "EXPERT" more accurately represents its intention: only expert users who understand the impact of the configuration changes they are making should enable it. Reviewed-by: Ingo Molnar Acked-by: David Woodhouse Signed-off-by: David Rientjes Cc: Greg KH Cc: "David S. Miller" Cc: Jens Axboe Cc: Arnd Bergmann Cc: Robin Holt Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/Kconfig | 2 +- fs/proc/Kconfig | 6 +++--- fs/sysfs/Kconfig | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/Kconfig b/fs/Kconfig index 9a7921ae4763..3db9caa57edc 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -50,7 +50,7 @@ config EXPORTFS tristate config FILE_LOCKING - bool "Enable POSIX file locking API" if EMBEDDED + bool "Enable POSIX file locking API" if EXPERT default y help This option enables standard file locking support, required diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig index 6a0068841d96..15af6222f8a4 100644 --- a/fs/proc/Kconfig +++ b/fs/proc/Kconfig @@ -1,5 +1,5 @@ config PROC_FS - bool "/proc file system support" if EMBEDDED + bool "/proc file system support" if EXPERT default y help This is a virtual file system providing information about the status @@ -40,7 +40,7 @@ config PROC_VMCORE Exports the dump image of crashed kernel in ELF format. config PROC_SYSCTL - bool "Sysctl support (/proc/sys)" if EMBEDDED + bool "Sysctl support (/proc/sys)" if EXPERT depends on PROC_FS select SYSCTL default y @@ -61,7 +61,7 @@ config PROC_SYSCTL config PROC_PAGE_MONITOR default y depends on PROC_FS && MMU - bool "Enable /proc page monitoring" if EMBEDDED + bool "Enable /proc page monitoring" if EXPERT help Various /proc files exist to monitor process memory utilization: /proc/pid/smaps, /proc/pid/clear_refs, /proc/pid/pagemap, diff --git a/fs/sysfs/Kconfig b/fs/sysfs/Kconfig index f4b67588b9d6..8c41feacbac5 100644 --- a/fs/sysfs/Kconfig +++ b/fs/sysfs/Kconfig @@ -1,5 +1,5 @@ config SYSFS - bool "sysfs file system support" if EMBEDDED + bool "sysfs file system support" if EXPERT default y help The sysfs filesystem is a virtual filesystem that the kernel uses to -- cgit v1.2.2 From 20d9600cb407b0b55fef6ee814b60345c6f58264 Mon Sep 17 00:00:00 2001 From: David Dillow Date: Thu, 20 Jan 2011 14:44:22 -0800 Subject: fs/direct-io.c: don't try to allocate more than BIO_MAX_PAGES in a bio When using devices that support max_segments > BIO_MAX_PAGES (256), direct IO tries to allocate a bio with more pages than allowed, which leads to an oops in dio_bio_alloc(). Clamp the request to the supported maximum, and change dio_bio_alloc() to reflect that bio_alloc() will always return a bio when called with __GFP_WAIT and a valid number of vectors. [akpm@linux-foundation.org: remove redundant BUG_ON()] Signed-off-by: David Dillow Reviewed-by: Jeff Moyer Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/direct-io.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/direct-io.c b/fs/direct-io.c index 85882f6ba5f7..b044705eedd4 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -325,12 +325,16 @@ void dio_end_io(struct bio *bio, int error) } EXPORT_SYMBOL_GPL(dio_end_io); -static int +static void dio_bio_alloc(struct dio *dio, struct block_device *bdev, sector_t first_sector, int nr_vecs) { struct bio *bio; + /* + * bio_alloc() is guaranteed to return a bio when called with + * __GFP_WAIT and we request a valid number of vectors. + */ bio = bio_alloc(GFP_KERNEL, nr_vecs); bio->bi_bdev = bdev; @@ -342,7 +346,6 @@ dio_bio_alloc(struct dio *dio, struct block_device *bdev, dio->bio = bio; dio->logical_offset_in_bio = dio->cur_page_fs_offset; - return 0; } /* @@ -583,8 +586,9 @@ static int dio_new_bio(struct dio *dio, sector_t start_sector) goto out; sector = start_sector << (dio->blkbits - 9); nr_pages = min(dio->pages_in_io, bio_get_nr_vecs(dio->map_bh.b_bdev)); + nr_pages = min(nr_pages, BIO_MAX_PAGES); BUG_ON(nr_pages <= 0); - ret = dio_bio_alloc(dio, dio->map_bh.b_bdev, sector, nr_pages); + dio_bio_alloc(dio, dio->map_bh.b_bdev, sector, nr_pages); dio->boundary = 0; out: return ret; -- cgit v1.2.2 From 99d86c8f1b7101d7c55dbf644b32bb1f0d7eb303 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Thu, 20 Jan 2011 21:19:25 -0500 Subject: cifs: fix up CIFSSMBEcho for unaligned access Make sure that CIFSSMBEcho can handle unaligned fields. Also fix a minor bug that causes this warning: fs/cifs/cifssmb.c: In function 'CIFSSMBEcho': fs/cifs/cifssmb.c:740: warning: large integer implicitly truncated to unsigned type ...WordCount is u8, not __le16, so no need to convert it. This patch should apply cleanly on top of the rest of the patchset to clean up unaligned access. Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/cifssmb.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 675041a6949c..3106f5e5c633 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -733,9 +733,9 @@ CIFSSMBEcho(struct TCP_Server_Info *server) /* set up echo request */ smb->hdr.Tid = cpu_to_le16(0xffff); - smb->hdr.WordCount = cpu_to_le16(1); - smb->EchoCount = cpu_to_le16(1); - smb->ByteCount = cpu_to_le16(1); + smb->hdr.WordCount = 1; + put_unaligned_le16(1, &smb->EchoCount); + put_bcc_le(1, &smb->hdr); smb->Data[0] = 'a'; smb->hdr.smb_buf_length += 3; -- cgit v1.2.2 From 0ca7a5b9ac5d301845dd6382ff25a699b6263a81 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Fri, 21 Jan 2011 16:40:31 +0900 Subject: nilfs2: fix crash after one superblock became unavailable Fixes the following kernel oops in nilfs_setup_super() which could arise if one of two super-blocks is unavailable. > BUG: unable to handle kernel NULL pointer dereference at (null) > Pid: 3529, comm: mount.nilfs2 Not tainted 2.6.37 #1 / > EIP: 0060:[] EFLAGS: 00010202 CPU: 3 > EIP is at memcpy+0xc/0x1b > Call Trace: > [] ? nilfs_setup_super+0x6c/0xa5 [nilfs2] > [] ? nilfs_get_root_dentry+0x81/0xcb [nilfs2] > [] ? nilfs_mount+0x4f9/0x62c [nilfs2] > [] ? kstrdup+0x36/0x3f > [] ? nilfs_mount+0x0/0x62c [nilfs2] > [] ? vfs_kern_mount+0x4d/0x12c > [] ? get_fs_type+0x76/0x8f > [] ? do_kern_mount+0x33/0xbf > [] ? do_mount+0x2ed/0x714 > [] ? copy_mount_options+0x28/0xfc > [] ? sys_mount+0x72/0xaf > [] ? syscall_call+0x7/0xb Reported-by: Wakko Warner Signed-off-by: Ryusuke Konishi Tested-by: Wakko Warner Cc: stable [2.6.37, 2.6.36] LKML-Reference: <20110121024918.GA29598@animx.eu.org> --- fs/nilfs2/super.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index 0994f6a76c07..58fd707174e1 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c @@ -704,7 +704,8 @@ skip_mount_setup: sbp[0]->s_state = cpu_to_le16(le16_to_cpu(sbp[0]->s_state) & ~NILFS_VALID_FS); /* synchronize sbp[1] with sbp[0] */ - memcpy(sbp[1], sbp[0], nilfs->ns_sbsize); + if (sbp[1]) + memcpy(sbp[1], sbp[0], nilfs->ns_sbsize); return nilfs_commit_super(sbi, NILFS_SB_COMMIT_ALL); } -- cgit v1.2.2 From ff5fdb61493d95332945630fcae249f896098652 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sat, 22 Jan 2011 20:16:06 -0800 Subject: fs: fix new dcache.c kernel-doc warnings Fix new fs/dcache.c kernel-doc warnings: Warning(fs/dcache.c:184): No description found for parameter 'dentry' Warning(fs/dcache.c:296): No description found for parameter 'parent' Warning(fs/dcache.c:1985): No description found for parameter 'dparent' Warning(fs/dcache.c:1985): Excess function parameter 'parent' description in 'd_validate' Signed-off-by: Randy Dunlap Cc: Alexander Viro Cc: Nick Piggin Signed-off-by: Linus Torvalds --- fs/dcache.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/dcache.c b/fs/dcache.c index 9f493ee4dcba..2a6bd9a4ae97 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -176,6 +176,7 @@ static void d_free(struct dentry *dentry) /** * dentry_rcuwalk_barrier - invalidate in-progress rcu-walk lookups + * @dentry: the target dentry * After this call, in-progress rcu-walk path lookup will fail. This * should be called after unhashing, and after changing d_inode (if * the dentry has not already been unhashed). @@ -281,6 +282,7 @@ static void dentry_lru_move_tail(struct dentry *dentry) /** * d_kill - kill dentry and return parent * @dentry: dentry to kill + * @parent: parent dentry * * The dentry must already be unhashed and removed from the LRU. * @@ -1973,7 +1975,7 @@ out: /** * d_validate - verify dentry provided from insecure source (deprecated) * @dentry: The dentry alleged to be valid child of @dparent - * @parent: The parent dentry (known to be valid) + * @dparent: The parent dentry (known to be valid) * * An insecure source has sent us a dentry, here we verify it and dget() it. * This is used by ncpfs in its readdir implementation. -- cgit v1.2.2 From 3f391c79b0686ce183668c6e2b7d02f3e716766c Mon Sep 17 00:00:00 2001 From: Jesper Juhl Date: Sat, 22 Jan 2011 21:07:16 +0100 Subject: CIFS: Remove pointless variable assignment in cifs_dfs_do_automount() In fs/cifs/cifs_dfs_ref.c::cifs_dfs_do_automount() we have this code: ... mnt = ERR_PTR(-EINVAL); if (IS_ERR(tlink)) { mnt = ERR_CAST(tlink); goto free_full_path; } ses = tlink_tcon(tlink)->ses; rc = get_dfs_path(xid, ses, full_path + 1, cifs_sb->local_nls, &num_referrals, &referrals, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); cifs_put_tlink(tlink); mnt = ERR_PTR(-ENOENT); ... The assignment of 'mnt = ERR_PTR(-EINVAL);' is completely pointless. If we take the 'if (IS_ERR(tlink))' branch we'll set 'mnt' again and we'll also do so if we do not take the branch. There is no way we'll ever use 'mnt' with the assigned 'ERR_PTR(-EINVAL)' value, so we may as well just remove the pointless assignment. Signed-off-by: Jesper Juhl Signed-off-by: Steve French --- fs/cifs/cifs_dfs_ref.c | 1 - 1 file changed, 1 deletion(-) (limited to 'fs') diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c index 7ed36536e754..f1c68629f277 100644 --- a/fs/cifs/cifs_dfs_ref.c +++ b/fs/cifs/cifs_dfs_ref.c @@ -297,7 +297,6 @@ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt) cifs_sb = CIFS_SB(mntpt->d_inode->i_sb); tlink = cifs_sb_tlink(cifs_sb); - mnt = ERR_PTR(-EINVAL); if (IS_ERR(tlink)) { mnt = ERR_CAST(tlink); goto free_full_path; -- cgit v1.2.2 From f1d0c998653f1eeec60ee6420e550135b62dbab4 Mon Sep 17 00:00:00 2001 From: Rob Landley Date: Sat, 22 Jan 2011 15:44:05 -0600 Subject: Make CIFS mount work in a container. Teach cifs about network namespaces, so mounting uses adresses/routing visible from the container rather than from init context. A container is a chroot on steroids that changes more than just the root filesystem the new processes see. One thing containers can isolate is "network namespaces", meaning each container can have its own set of ethernet interfaces, each with its own own IP address and routing to the outside world. And if you open a socket in _userspace_ from processes within such a container, this works fine. But sockets opened from within the kernel still use a single global networking context in a lot of places, meaning the new socket's address and routing are correct for PID 1 on the host, but are _not_ what userspace processes in the container get to use. So when you mount a network filesystem from within in a container, the mount code in the CIFS driver uses the host's networking context and not the container's networking context, so it gets the wrong address, uses the wrong routing, and may even try to go out an interface that the container can't even access... Bad stuff. This patch copies the mount process's network context into the CIFS structure that stores the rest of the server information for that mount point, and changes the socket open code to use the saved network context instead of the global network context. I.E. "when you attempt to use these addresses, do so relative to THIS set of network interfaces and routing rules, not the old global context from back before we supported containers". The big long HOWTO sets up a test environment on the assumption you've never used ocntainers before. It basically says: 1) configure and build a new kernel that has container support 2) build a new root filesystem that includes the userspace container control package (LXC) 3) package/run them under KVM (so you don't have to mess up your host system in order to play with containers). 4) set up some containers under the KVM system 5) set up contradictory routing in the KVM system and the container so that the host and the container see different things for the same address 6) try to mount a CIFS share from both contexts so you can both force it to work and force it to fail. For a long drawn out test reproduction sequence, see: http://landley.livejournal.com/47024.html http://landley.livejournal.com/47205.html http://landley.livejournal.com/47476.html Signed-off-by: Rob Landley Reviewed-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/cifsglob.h | 33 +++++++++++++++++++++++++++++++++ fs/cifs/connect.c | 12 ++++++++++-- 2 files changed, 43 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 5bfb75346cb0..edd5b29b53c9 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -166,6 +166,9 @@ struct TCP_Server_Info { struct socket *ssocket; struct sockaddr_storage dstaddr; struct sockaddr_storage srcaddr; /* locally bind to this IP */ +#ifdef CONFIG_NET_NS + struct net *net; +#endif wait_queue_head_t response_q; wait_queue_head_t request_q; /* if more than maxmpx to srvr must block*/ struct list_head pending_mid_q; @@ -216,6 +219,36 @@ struct TCP_Server_Info { #endif }; +/* + * Macros to allow the TCP_Server_Info->net field and related code to drop out + * when CONFIG_NET_NS isn't set. + */ + +#ifdef CONFIG_NET_NS + +static inline struct net *cifs_net_ns(struct TCP_Server_Info *srv) +{ + return srv->net; +} + +static inline void cifs_set_net_ns(struct TCP_Server_Info *srv, struct net *net) +{ + srv->net = net; +} + +#else + +static inline struct net *cifs_net_ns(struct TCP_Server_Info *srv) +{ + return &init_net; +} + +static inline void cifs_set_net_ns(struct TCP_Server_Info *srv, struct net *net) +{ +} + +#endif + /* * Session structure. One of these for each uid session with a particular host */ diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 18d3c7724d6e..0cc3b81c2e84 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -1568,6 +1568,9 @@ cifs_find_tcp_session(struct sockaddr *addr, struct smb_vol *vol) spin_lock(&cifs_tcp_ses_lock); list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) { + if (!net_eq(cifs_net_ns(server), current->nsproxy->net_ns)) + continue; + if (!match_address(server, addr, (struct sockaddr *)&vol->srcaddr)) continue; @@ -1598,6 +1601,8 @@ cifs_put_tcp_session(struct TCP_Server_Info *server) return; } + put_net(cifs_net_ns(server)); + list_del_init(&server->tcp_ses_list); spin_unlock(&cifs_tcp_ses_lock); @@ -1672,6 +1677,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info) goto out_err; } + cifs_set_net_ns(tcp_ses, get_net(current->nsproxy->net_ns)); tcp_ses->hostname = extract_hostname(volume_info->UNC); if (IS_ERR(tcp_ses->hostname)) { rc = PTR_ERR(tcp_ses->hostname); @@ -1752,6 +1758,8 @@ cifs_get_tcp_session(struct smb_vol *volume_info) out_err_crypto_release: cifs_crypto_shash_release(tcp_ses); + put_net(cifs_net_ns(tcp_ses)); + out_err: if (tcp_ses) { if (!IS_ERR(tcp_ses->hostname)) @@ -2263,8 +2271,8 @@ generic_ip_connect(struct TCP_Server_Info *server) } if (socket == NULL) { - rc = sock_create_kern(sfamily, SOCK_STREAM, - IPPROTO_TCP, &socket); + rc = __sock_create(cifs_net_ns(server), sfamily, SOCK_STREAM, + IPPROTO_TCP, &socket, 1); if (rc < 0) { cERROR(1, "Error %d creating socket", rc); server->ssocket = NULL; -- cgit v1.2.2 From d66bbd441c08fe00ed2add1cf70cb243ebc2b27e Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Fri, 21 Jan 2011 21:16:46 -0800 Subject: ceph: avoid picking MDS that is not active Ignore replication or auth frag data if it indicates an MDS that is not active. This can happen if the MDS shuts down and the client has stale data about the namespace distribution across the MDS cluster. If that's the case, fall back to directing the request based on the auth cap (which should always be accurate). Signed-off-by: Sage Weil --- fs/ceph/mds_client.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 509339ceef72..a6949cc7c69a 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -693,9 +693,11 @@ static int __choose_mds(struct ceph_mds_client *mdsc, dout("choose_mds %p %llx.%llx " "frag %u mds%d (%d/%d)\n", inode, ceph_vinop(inode), - frag.frag, frag.mds, + frag.frag, mds, (int)r, frag.ndist); - return mds; + if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >= + CEPH_MDS_STATE_ACTIVE) + return mds; } /* since this file/dir wasn't known to be @@ -708,7 +710,9 @@ static int __choose_mds(struct ceph_mds_client *mdsc, dout("choose_mds %p %llx.%llx " "frag %u mds%d (auth)\n", inode, ceph_vinop(inode), frag.frag, mds); - return mds; + if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >= + CEPH_MDS_STATE_ACTIVE) + return mds; } } } -- cgit v1.2.2 From 93c100c0b423266c0ee28497e90fdf27c05e6b8e Mon Sep 17 00:00:00 2001 From: Steve French Date: Tue, 25 Jan 2011 19:28:43 +0000 Subject: [CIFS] Replace cifs md5 hashing functions with kernel crypto APIs Replace remaining use of md5 hash functions local to cifs module with kernel crypto APIs. Remove header and source file containing those local functions. Signed-off-by: Shirish Pargaonkar Reviewed-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/Makefile | 2 +- fs/cifs/cifsencrypt.c | 1 - fs/cifs/link.c | 59 ++++++-- fs/cifs/md5.c | 366 -------------------------------------------------- fs/cifs/md5.h | 38 ------ fs/cifs/smbencrypt.c | 1 - 6 files changed, 51 insertions(+), 416 deletions(-) delete mode 100644 fs/cifs/md5.c delete mode 100644 fs/cifs/md5.h (limited to 'fs') diff --git a/fs/cifs/Makefile b/fs/cifs/Makefile index 43b19dd39191..e1322296cb69 100644 --- a/fs/cifs/Makefile +++ b/fs/cifs/Makefile @@ -5,7 +5,7 @@ obj-$(CONFIG_CIFS) += cifs.o cifs-y := cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o inode.o \ link.o misc.o netmisc.o smbdes.o smbencrypt.o transport.o asn1.o \ - md4.o md5.o cifs_unicode.o nterr.o xattr.o cifsencrypt.o \ + md4.o cifs_unicode.o nterr.o xattr.o cifsencrypt.o \ readdir.o ioctl.o sess.o export.o cifs-$(CONFIG_CIFS_ACL) += cifsacl.o diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c index 66f3d50d0676..35bf329c90e1 100644 --- a/fs/cifs/cifsencrypt.c +++ b/fs/cifs/cifsencrypt.c @@ -24,7 +24,6 @@ #include "cifspdu.h" #include "cifsglob.h" #include "cifs_debug.h" -#include "md5.h" #include "cifs_unicode.h" #include "cifsproto.h" #include "ntlmssp.h" diff --git a/fs/cifs/link.c b/fs/cifs/link.c index 306769de2fb5..d3444ea6ac71 100644 --- a/fs/cifs/link.c +++ b/fs/cifs/link.c @@ -28,7 +28,6 @@ #include "cifsproto.h" #include "cifs_debug.h" #include "cifs_fs_sb.h" -#include "md5.h" #define CIFS_MF_SYMLINK_LEN_OFFSET (4+1) #define CIFS_MF_SYMLINK_MD5_OFFSET (CIFS_MF_SYMLINK_LEN_OFFSET+(4+1)) @@ -46,6 +45,45 @@ md5_hash[8], md5_hash[9], md5_hash[10], md5_hash[11],\ md5_hash[12], md5_hash[13], md5_hash[14], md5_hash[15] +static int +symlink_hash(unsigned int link_len, const char *link_str, u8 *md5_hash) +{ + int rc; + unsigned int size; + struct crypto_shash *md5; + struct sdesc *sdescmd5; + + md5 = crypto_alloc_shash("md5", 0, 0); + if (!md5 || IS_ERR(md5)) { + rc = PTR_ERR(md5); + cERROR(1, "%s: Crypto md5 allocation error %d\n", __func__, rc); + return rc; + } + size = sizeof(struct shash_desc) + crypto_shash_descsize(md5); + sdescmd5 = kmalloc(size, GFP_KERNEL); + if (!sdescmd5) { + rc = -ENOMEM; + cERROR(1, "%s: Memory allocation failure\n", __func__); + goto symlink_hash_err; + } + sdescmd5->shash.tfm = md5; + sdescmd5->shash.flags = 0x0; + + rc = crypto_shash_init(&sdescmd5->shash); + if (rc) { + cERROR(1, "%s: Could not init md5 shash\n", __func__); + goto symlink_hash_err; + } + crypto_shash_update(&sdescmd5->shash, link_str, link_len); + rc = crypto_shash_final(&sdescmd5->shash, md5_hash); + +symlink_hash_err: + crypto_free_shash(md5); + kfree(sdescmd5); + + return rc; +} + static int CIFSParseMFSymlink(const u8 *buf, unsigned int buf_len, @@ -56,7 +94,6 @@ CIFSParseMFSymlink(const u8 *buf, unsigned int link_len; const char *md5_str1; const char *link_str; - struct MD5Context md5_ctx; u8 md5_hash[16]; char md5_str2[34]; @@ -70,9 +107,11 @@ CIFSParseMFSymlink(const u8 *buf, if (rc != 1) return -EINVAL; - cifs_MD5_init(&md5_ctx); - cifs_MD5_update(&md5_ctx, (const u8 *)link_str, link_len); - cifs_MD5_final(md5_hash, &md5_ctx); + rc = symlink_hash(link_len, link_str, md5_hash); + if (rc) { + cFYI(1, "%s: MD5 hash failure: %d\n", __func__, rc); + return rc; + } snprintf(md5_str2, sizeof(md5_str2), CIFS_MF_SYMLINK_MD5_FORMAT, @@ -94,9 +133,9 @@ CIFSParseMFSymlink(const u8 *buf, static int CIFSFormatMFSymlink(u8 *buf, unsigned int buf_len, const char *link_str) { + int rc; unsigned int link_len; unsigned int ofs; - struct MD5Context md5_ctx; u8 md5_hash[16]; if (buf_len != CIFS_MF_SYMLINK_FILE_SIZE) @@ -107,9 +146,11 @@ CIFSFormatMFSymlink(u8 *buf, unsigned int buf_len, const char *link_str) if (link_len > CIFS_MF_SYMLINK_LINK_MAXLEN) return -ENAMETOOLONG; - cifs_MD5_init(&md5_ctx); - cifs_MD5_update(&md5_ctx, (const u8 *)link_str, link_len); - cifs_MD5_final(md5_hash, &md5_ctx); + rc = symlink_hash(link_len, link_str, md5_hash); + if (rc) { + cFYI(1, "%s: MD5 hash failure: %d\n", __func__, rc); + return rc; + } snprintf(buf, buf_len, CIFS_MF_SYMLINK_LEN_FORMAT CIFS_MF_SYMLINK_MD5_FORMAT, diff --git a/fs/cifs/md5.c b/fs/cifs/md5.c deleted file mode 100644 index 98b66a54c319..000000000000 --- a/fs/cifs/md5.c +++ /dev/null @@ -1,366 +0,0 @@ -/* - * This code implements the MD5 message-digest algorithm. - * The algorithm is due to Ron Rivest. This code was - * written by Colin Plumb in 1993, no copyright is claimed. - * This code is in the public domain; do with it what you wish. - * - * Equivalent code is available from RSA Data Security, Inc. - * This code has been tested against that, and is equivalent, - * except that you don't need to include two pages of legalese - * with every copy. - * - * To compute the message digest of a chunk of bytes, declare an - * MD5Context structure, pass it to cifs_MD5_init, call cifs_MD5_update as - * needed on buffers full of bytes, and then call cifs_MD5_final, which - * will fill a supplied 16-byte array with the digest. - */ - -/* This code slightly modified to fit into Samba by - abartlet@samba.org Jun 2001 - and to fit the cifs vfs by - Steve French sfrench@us.ibm.com */ - -#include -#include "md5.h" - -static void MD5Transform(__u32 buf[4], __u32 const in[16]); - -/* - * Note: this code is harmless on little-endian machines. - */ -static void -byteReverse(unsigned char *buf, unsigned longs) -{ - __u32 t; - do { - t = (__u32) ((unsigned) buf[3] << 8 | buf[2]) << 16 | - ((unsigned) buf[1] << 8 | buf[0]); - *(__u32 *) buf = t; - buf += 4; - } while (--longs); -} - -/* - * Start MD5 accumulation. Set bit count to 0 and buffer to mysterious - * initialization constants. - */ -void -cifs_MD5_init(struct MD5Context *ctx) -{ - ctx->buf[0] = 0x67452301; - ctx->buf[1] = 0xefcdab89; - ctx->buf[2] = 0x98badcfe; - ctx->buf[3] = 0x10325476; - - ctx->bits[0] = 0; - ctx->bits[1] = 0; -} - -/* - * Update context to reflect the concatenation of another buffer full - * of bytes. - */ -void -cifs_MD5_update(struct MD5Context *ctx, unsigned char const *buf, unsigned len) -{ - register __u32 t; - - /* Update bitcount */ - - t = ctx->bits[0]; - if ((ctx->bits[0] = t + ((__u32) len << 3)) < t) - ctx->bits[1]++; /* Carry from low to high */ - ctx->bits[1] += len >> 29; - - t = (t >> 3) & 0x3f; /* Bytes already in shsInfo->data */ - - /* Handle any leading odd-sized chunks */ - - if (t) { - unsigned char *p = (unsigned char *) ctx->in + t; - - t = 64 - t; - if (len < t) { - memmove(p, buf, len); - return; - } - memmove(p, buf, t); - byteReverse(ctx->in, 16); - MD5Transform(ctx->buf, (__u32 *) ctx->in); - buf += t; - len -= t; - } - /* Process data in 64-byte chunks */ - - while (len >= 64) { - memmove(ctx->in, buf, 64); - byteReverse(ctx->in, 16); - MD5Transform(ctx->buf, (__u32 *) ctx->in); - buf += 64; - len -= 64; - } - - /* Handle any remaining bytes of data. */ - - memmove(ctx->in, buf, len); -} - -/* - * Final wrapup - pad to 64-byte boundary with the bit pattern - * 1 0* (64-bit count of bits processed, MSB-first) - */ -void -cifs_MD5_final(unsigned char digest[16], struct MD5Context *ctx) -{ - unsigned int count; - unsigned char *p; - - /* Compute number of bytes mod 64 */ - count = (ctx->bits[0] >> 3) & 0x3F; - - /* Set the first char of padding to 0x80. This is safe since there is - always at least one byte free */ - p = ctx->in + count; - *p++ = 0x80; - - /* Bytes of padding needed to make 64 bytes */ - count = 64 - 1 - count; - - /* Pad out to 56 mod 64 */ - if (count < 8) { - /* Two lots of padding: Pad the first block to 64 bytes */ - memset(p, 0, count); - byteReverse(ctx->in, 16); - MD5Transform(ctx->buf, (__u32 *) ctx->in); - - /* Now fill the next block with 56 bytes */ - memset(ctx->in, 0, 56); - } else { - /* Pad block to 56 bytes */ - memset(p, 0, count - 8); - } - byteReverse(ctx->in, 14); - - /* Append length in bits and transform */ - ((__u32 *) ctx->in)[14] = ctx->bits[0]; - ((__u32 *) ctx->in)[15] = ctx->bits[1]; - - MD5Transform(ctx->buf, (__u32 *) ctx->in); - byteReverse((unsigned char *) ctx->buf, 4); - memmove(digest, ctx->buf, 16); - memset(ctx, 0, sizeof(*ctx)); /* In case it's sensitive */ -} - -/* The four core functions - F1 is optimized somewhat */ - -/* #define F1(x, y, z) (x & y | ~x & z) */ -#define F1(x, y, z) (z ^ (x & (y ^ z))) -#define F2(x, y, z) F1(z, x, y) -#define F3(x, y, z) (x ^ y ^ z) -#define F4(x, y, z) (y ^ (x | ~z)) - -/* This is the central step in the MD5 algorithm. */ -#define MD5STEP(f, w, x, y, z, data, s) \ - (w += f(x, y, z) + data, w = w<>(32-s), w += x) - -/* - * The core of the MD5 algorithm, this alters an existing MD5 hash to - * reflect the addition of 16 longwords of new data. cifs_MD5_update blocks - * the data and converts bytes into longwords for this routine. - */ -static void -MD5Transform(__u32 buf[4], __u32 const in[16]) -{ - register __u32 a, b, c, d; - - a = buf[0]; - b = buf[1]; - c = buf[2]; - d = buf[3]; - - MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7); - MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12); - MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17); - MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22); - MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7); - MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12); - MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17); - MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22); - MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7); - MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12); - MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17); - MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22); - MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7); - MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12); - MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17); - MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22); - - MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5); - MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9); - MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14); - MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20); - MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5); - MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9); - MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14); - MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20); - MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5); - MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9); - MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14); - MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20); - MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5); - MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9); - MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14); - MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20); - - MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4); - MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11); - MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16); - MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23); - MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4); - MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11); - MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16); - MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23); - MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4); - MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11); - MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16); - MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23); - MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4); - MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11); - MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16); - MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23); - - MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6); - MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10); - MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15); - MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21); - MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6); - MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10); - MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15); - MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21); - MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6); - MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10); - MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15); - MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21); - MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6); - MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10); - MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15); - MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21); - - buf[0] += a; - buf[1] += b; - buf[2] += c; - buf[3] += d; -} - -#if 0 /* currently unused */ -/*********************************************************************** - the rfc 2104 version of hmac_md5 initialisation. -***********************************************************************/ -static void -hmac_md5_init_rfc2104(unsigned char *key, int key_len, - struct HMACMD5Context *ctx) -{ - int i; - - /* if key is longer than 64 bytes reset it to key=MD5(key) */ - if (key_len > 64) { - unsigned char tk[16]; - struct MD5Context tctx; - - cifs_MD5_init(&tctx); - cifs_MD5_update(&tctx, key, key_len); - cifs_MD5_final(tk, &tctx); - - key = tk; - key_len = 16; - } - - /* start out by storing key in pads */ - memset(ctx->k_ipad, 0, sizeof(ctx->k_ipad)); - memset(ctx->k_opad, 0, sizeof(ctx->k_opad)); - memcpy(ctx->k_ipad, key, key_len); - memcpy(ctx->k_opad, key, key_len); - - /* XOR key with ipad and opad values */ - for (i = 0; i < 64; i++) { - ctx->k_ipad[i] ^= 0x36; - ctx->k_opad[i] ^= 0x5c; - } - - cifs_MD5_init(&ctx->ctx); - cifs_MD5_update(&ctx->ctx, ctx->k_ipad, 64); -} -#endif - -/*********************************************************************** - the microsoft version of hmac_md5 initialisation. -***********************************************************************/ -void -hmac_md5_init_limK_to_64(const unsigned char *key, int key_len, - struct HMACMD5Context *ctx) -{ - int i; - - /* if key is longer than 64 bytes truncate it */ - if (key_len > 64) - key_len = 64; - - /* start out by storing key in pads */ - memset(ctx->k_ipad, 0, sizeof(ctx->k_ipad)); - memset(ctx->k_opad, 0, sizeof(ctx->k_opad)); - memcpy(ctx->k_ipad, key, key_len); - memcpy(ctx->k_opad, key, key_len); - - /* XOR key with ipad and opad values */ - for (i = 0; i < 64; i++) { - ctx->k_ipad[i] ^= 0x36; - ctx->k_opad[i] ^= 0x5c; - } - - cifs_MD5_init(&ctx->ctx); - cifs_MD5_update(&ctx->ctx, ctx->k_ipad, 64); -} - -/*********************************************************************** - update hmac_md5 "inner" buffer -***********************************************************************/ -void -hmac_md5_update(const unsigned char *text, int text_len, - struct HMACMD5Context *ctx) -{ - cifs_MD5_update(&ctx->ctx, text, text_len); /* then text of datagram */ -} - -/*********************************************************************** - finish off hmac_md5 "inner" buffer and generate outer one. -***********************************************************************/ -void -hmac_md5_final(unsigned char *digest, struct HMACMD5Context *ctx) -{ - struct MD5Context ctx_o; - - cifs_MD5_final(digest, &ctx->ctx); - - cifs_MD5_init(&ctx_o); - cifs_MD5_update(&ctx_o, ctx->k_opad, 64); - cifs_MD5_update(&ctx_o, digest, 16); - cifs_MD5_final(digest, &ctx_o); -} - -/*********************************************************** - single function to calculate an HMAC MD5 digest from data. - use the microsoft hmacmd5 init method because the key is 16 bytes. -************************************************************/ -#if 0 /* currently unused */ -static void -hmac_md5(unsigned char key[16], unsigned char *data, int data_len, - unsigned char *digest) -{ - struct HMACMD5Context ctx; - hmac_md5_init_limK_to_64(key, 16, &ctx); - if (data_len != 0) - hmac_md5_update(data, data_len, &ctx); - - hmac_md5_final(digest, &ctx); -} -#endif diff --git a/fs/cifs/md5.h b/fs/cifs/md5.h deleted file mode 100644 index 6fba8cb402fd..000000000000 --- a/fs/cifs/md5.h +++ /dev/null @@ -1,38 +0,0 @@ -#ifndef MD5_H -#define MD5_H -#ifndef HEADER_MD5_H -/* Try to avoid clashes with OpenSSL */ -#define HEADER_MD5_H -#endif - -struct MD5Context { - __u32 buf[4]; - __u32 bits[2]; - unsigned char in[64]; -}; -#endif /* !MD5_H */ - -#ifndef _HMAC_MD5_H -struct HMACMD5Context { - struct MD5Context ctx; - unsigned char k_ipad[65]; - unsigned char k_opad[65]; -}; -#endif /* _HMAC_MD5_H */ - -void cifs_MD5_init(struct MD5Context *context); -void cifs_MD5_update(struct MD5Context *context, unsigned char const *buf, - unsigned len); -void cifs_MD5_final(unsigned char digest[16], struct MD5Context *context); - -/* The following definitions come from lib/hmacmd5.c */ - -/* void hmac_md5_init_rfc2104(unsigned char *key, int key_len, - struct HMACMD5Context *ctx);*/ -void hmac_md5_init_limK_to_64(const unsigned char *key, int key_len, - struct HMACMD5Context *ctx); -void hmac_md5_update(const unsigned char *text, int text_len, - struct HMACMD5Context *ctx); -void hmac_md5_final(unsigned char *digest, struct HMACMD5Context *ctx); -/* void hmac_md5(unsigned char key[16], unsigned char *data, int data_len, - unsigned char *digest);*/ diff --git a/fs/cifs/smbencrypt.c b/fs/cifs/smbencrypt.c index 192ea51af20f..30135005e4f3 100644 --- a/fs/cifs/smbencrypt.c +++ b/fs/cifs/smbencrypt.c @@ -32,7 +32,6 @@ #include "cifs_unicode.h" #include "cifspdu.h" #include "cifsglob.h" -#include "md5.h" #include "cifs_debug.h" #include "cifsencrypt.h" -- cgit v1.2.2 From 72432ffcf555decbbae47f1be338e1d2f210aa69 Mon Sep 17 00:00:00 2001 From: Pavel Shilovsky Date: Mon, 24 Jan 2011 14:16:35 -0500 Subject: CIFS: Implement cifs_strict_writev (try #4) If we don't have Exclusive oplock we write a data to the server. Also set invalidate_mapping flag on the inode if we wrote something to the server. Add cifs_iovec_write to let the client write iovec buffers through CIFSSMBWrite2. Signed-off-by: Pavel Shilovsky Reviewed-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/cifsfs.c | 15 ++-- fs/cifs/cifsfs.h | 4 +- fs/cifs/cifsproto.h | 2 + fs/cifs/file.c | 202 +++++++++++++++++++++++++++++++++++++++++++++++++++- 4 files changed, 217 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index a8323f1dc1c4..f2970136d17d 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -600,10 +600,17 @@ static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov, { struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode; ssize_t written; + int rc; written = generic_file_aio_write(iocb, iov, nr_segs, pos); - if (!CIFS_I(inode)->clientCanCacheAll) - filemap_fdatawrite(inode->i_mapping); + + if (CIFS_I(inode)->clientCanCacheAll) + return written; + + rc = filemap_fdatawrite(inode->i_mapping); + if (rc) + cFYI(1, "cifs_file_aio_write: %d rc on %p inode", rc, inode); + return written; } @@ -737,7 +744,7 @@ const struct file_operations cifs_file_strict_ops = { .read = do_sync_read, .write = do_sync_write, .aio_read = cifs_strict_readv, - .aio_write = cifs_file_aio_write, + .aio_write = cifs_strict_writev, .open = cifs_open, .release = cifs_close, .lock = cifs_lock, @@ -793,7 +800,7 @@ const struct file_operations cifs_file_strict_nobrl_ops = { .read = do_sync_read, .write = do_sync_write, .aio_read = cifs_strict_readv, - .aio_write = cifs_file_aio_write, + .aio_write = cifs_strict_writev, .open = cifs_open, .release = cifs_close, .fsync = cifs_strict_fsync, diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index f23206d46531..14789a97304e 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -85,7 +85,9 @@ extern ssize_t cifs_user_read(struct file *file, char __user *read_data, extern ssize_t cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos); extern ssize_t cifs_user_write(struct file *file, const char __user *write_data, - size_t write_size, loff_t *poffset); + size_t write_size, loff_t *poffset); +extern ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov, + unsigned long nr_segs, loff_t pos); extern int cifs_lock(struct file *, int, struct file_lock *); extern int cifs_fsync(struct file *, int); extern int cifs_strict_fsync(struct file *, int); diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index 982895fa7615..35c989f4924f 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -85,6 +85,8 @@ extern int checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length); extern bool is_valid_oplock_break(struct smb_hdr *smb, struct TCP_Server_Info *); extern bool is_size_safe_to_change(struct cifsInodeInfo *, __u64 eof); +extern void cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset, + unsigned int bytes_written); extern struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *, bool); extern struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *, bool); extern unsigned int smbCalcSize(struct smb_hdr *ptr); diff --git a/fs/cifs/file.c b/fs/cifs/file.c index d7d65a70678e..0de17c1db608 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -848,7 +848,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) } /* update the file size (if needed) after a write */ -static void +void cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset, unsigned int bytes_written) { @@ -1619,6 +1619,206 @@ int cifs_flush(struct file *file, fl_owner_t id) return rc; } +static int +cifs_write_allocate_pages(struct page **pages, unsigned long num_pages) +{ + int rc = 0; + unsigned long i; + + for (i = 0; i < num_pages; i++) { + pages[i] = alloc_page(__GFP_HIGHMEM); + if (!pages[i]) { + /* + * save number of pages we have already allocated and + * return with ENOMEM error + */ + num_pages = i; + rc = -ENOMEM; + goto error; + } + } + + return rc; + +error: + for (i = 0; i < num_pages; i++) + put_page(pages[i]); + return rc; +} + +static inline +size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len) +{ + size_t num_pages; + size_t clen; + + clen = min_t(const size_t, len, wsize); + num_pages = clen / PAGE_CACHE_SIZE; + if (clen % PAGE_CACHE_SIZE) + num_pages++; + + if (cur_len) + *cur_len = clen; + + return num_pages; +} + +static ssize_t +cifs_iovec_write(struct file *file, const struct iovec *iov, + unsigned long nr_segs, loff_t *poffset) +{ + size_t total_written = 0, written = 0; + unsigned long num_pages, npages; + size_t copied, len, cur_len, i; + struct kvec *to_send; + struct page **pages; + struct iov_iter it; + struct inode *inode; + struct cifsFileInfo *open_file; + struct cifsTconInfo *pTcon; + struct cifs_sb_info *cifs_sb; + int xid, rc; + + len = iov_length(iov, nr_segs); + if (!len) + return 0; + + rc = generic_write_checks(file, poffset, &len, 0); + if (rc) + return rc; + + cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); + num_pages = get_numpages(cifs_sb->wsize, len, &cur_len); + + pages = kmalloc(sizeof(struct pages *)*num_pages, GFP_KERNEL); + if (!pages) + return -ENOMEM; + + to_send = kmalloc(sizeof(struct kvec)*(num_pages + 1), GFP_KERNEL); + if (!to_send) { + kfree(pages); + return -ENOMEM; + } + + rc = cifs_write_allocate_pages(pages, num_pages); + if (rc) { + kfree(pages); + kfree(to_send); + return rc; + } + + xid = GetXid(); + open_file = file->private_data; + pTcon = tlink_tcon(open_file->tlink); + inode = file->f_path.dentry->d_inode; + + iov_iter_init(&it, iov, nr_segs, len, 0); + npages = num_pages; + + do { + size_t save_len = cur_len; + for (i = 0; i < npages; i++) { + copied = min_t(const size_t, cur_len, PAGE_CACHE_SIZE); + copied = iov_iter_copy_from_user(pages[i], &it, 0, + copied); + cur_len -= copied; + iov_iter_advance(&it, copied); + to_send[i+1].iov_base = kmap(pages[i]); + to_send[i+1].iov_len = copied; + } + + cur_len = save_len - cur_len; + + do { + if (open_file->invalidHandle) { + rc = cifs_reopen_file(open_file, false); + if (rc != 0) + break; + } + rc = CIFSSMBWrite2(xid, pTcon, open_file->netfid, + cur_len, *poffset, &written, + to_send, npages, 0); + } while (rc == -EAGAIN); + + for (i = 0; i < npages; i++) + kunmap(pages[i]); + + if (written) { + len -= written; + total_written += written; + cifs_update_eof(CIFS_I(inode), *poffset, written); + *poffset += written; + } else if (rc < 0) { + if (!total_written) + total_written = rc; + break; + } + + /* get length and number of kvecs of the next write */ + npages = get_numpages(cifs_sb->wsize, len, &cur_len); + } while (len > 0); + + if (total_written > 0) { + spin_lock(&inode->i_lock); + if (*poffset > inode->i_size) + i_size_write(inode, *poffset); + spin_unlock(&inode->i_lock); + } + + cifs_stats_bytes_written(pTcon, total_written); + mark_inode_dirty_sync(inode); + + for (i = 0; i < num_pages; i++) + put_page(pages[i]); + kfree(to_send); + kfree(pages); + FreeXid(xid); + return total_written; +} + +static ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov, + unsigned long nr_segs, loff_t pos) +{ + ssize_t written; + struct inode *inode; + + inode = iocb->ki_filp->f_path.dentry->d_inode; + + /* + * BB - optimize the way when signing is disabled. We can drop this + * extra memory-to-memory copying and use iovec buffers for constructing + * write request. + */ + + written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos); + if (written > 0) { + CIFS_I(inode)->invalid_mapping = true; + iocb->ki_pos = pos; + } + + return written; +} + +ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov, + unsigned long nr_segs, loff_t pos) +{ + struct inode *inode; + + inode = iocb->ki_filp->f_path.dentry->d_inode; + + if (CIFS_I(inode)->clientCanCacheAll) + return generic_file_aio_write(iocb, iov, nr_segs, pos); + + /* + * In strict cache mode we need to write the data to the server exactly + * from the pos to pos+len-1 rather than flush all affected pages + * because it may cause a error with mandatory locks on these pages but + * not on the region from pos to ppos+len-1. + */ + + return cifs_user_writev(iocb, iov, nr_segs, pos); +} + static ssize_t cifs_iovec_read(struct file *file, const struct iovec *iov, unsigned long nr_segs, loff_t *poffset) -- cgit v1.2.2 From d39454ffe4a3c85428483b8a8a8e5e797b6363d5 Mon Sep 17 00:00:00 2001 From: Pavel Shilovsky Date: Mon, 24 Jan 2011 14:16:35 -0500 Subject: CIFS: Add strictcache mount option Use for switching on strict cache mode. In this mode the client reads from the cache all the time it has Oplock Level II, otherwise - read from the server. As for write - the client stores a data in the cache in Exclusive Oplock case, otherwise - write directly to the server. Signed-off-by: Pavel Shilovsky Reviewed-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/README | 5 +++++ fs/cifs/connect.c | 5 +++++ 2 files changed, 10 insertions(+) (limited to 'fs') diff --git a/fs/cifs/README b/fs/cifs/README index 46af99ab3614..fe1683590828 100644 --- a/fs/cifs/README +++ b/fs/cifs/README @@ -452,6 +452,11 @@ A partial list of the supported mount options follows: if oplock (caching token) is granted and held. Note that direct allows write operations larger than page size to be sent to the server. + strictcache Use for switching on strict cache mode. In this mode the + client read from the cache all the time it has Oplock Level II, + otherwise - read from the server. All written data are stored + in the cache, but if the client doesn't have Exclusive Oplock, + it writes the data to the server. acl Allow setfacl and getfacl to manage posix ACLs if server supports them. (default) noacl Do not allow setfacl and getfacl calls on this mount diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 0cc3b81c2e84..47034af67b09 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -87,6 +87,7 @@ struct smb_vol { bool no_xattr:1; /* set if xattr (EA) support should be disabled*/ bool server_ino:1; /* use inode numbers from server ie UniqueId */ bool direct_io:1; + bool strict_io:1; /* strict cache behavior */ bool remap:1; /* set to remap seven reserved chars in filenames */ bool posix_paths:1; /* unset to not ask for posix pathnames. */ bool no_linux_ext:1; @@ -1344,6 +1345,8 @@ cifs_parse_mount_options(char *options, const char *devname, vol->direct_io = 1; } else if (strnicmp(data, "forcedirectio", 13) == 0) { vol->direct_io = 1; + } else if (strnicmp(data, "strictcache", 11) == 0) { + vol->strict_io = 1; } else if (strnicmp(data, "noac", 4) == 0) { printk(KERN_WARNING "CIFS: Mount option noac not " "supported. Instead set " @@ -2584,6 +2587,8 @@ static void setup_cifs_sb(struct smb_vol *pvolume_info, if (pvolume_info->multiuser) cifs_sb->mnt_cifs_flags |= (CIFS_MOUNT_MULTIUSER | CIFS_MOUNT_NO_PERM); + if (pvolume_info->strict_io) + cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_STRICT_IO; if (pvolume_info->direct_io) { cFYI(1, "mounting share using direct i/o"); cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DIRECT_IO; -- cgit v1.2.2 From ad3d2eedf0ed3611f5f86b9e4d0d15cc76c63465 Mon Sep 17 00:00:00 2001 From: Jesper Juhl Date: Mon, 17 Jan 2011 18:41:50 +0000 Subject: NFS4: Avoid potential NULL pointer dereference in decode_and_add_ds(). On Mon, 17 Jan 2011, Mi Jinlong wrote: > > > Jesper Juhl: > > strrchr() can return NULL if nothing is found. If this happens we'll > > dereference a NULL pointer in > > fs/nfs/nfs4filelayoutdev.c::decode_and_add_ds(). > > > > I tried to find some other code that guarantees that this can never > > happen but I was unsuccessful. So, unless someone else can point to some > > code that ensures this can never be a problem, I believe this patch is > > needed. > > > > While I was changing this code I also noticed that all the dprintk() > > statements, except one, start with "%s:". The one missing the ":" I added > > it to. > > Maybe another one also should be changed at decode_and_add_ds() at line 243: > > 243 printk("%s Decoded address and port %s\n", __func__, buf); > Missed that one. Thanks. Signed-off-by: Jesper Juhl Signed-off-by: Trond Myklebust --- fs/nfs/nfs4filelayoutdev.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c index 51fe64ace55a..f5c9b125e8cc 100644 --- a/fs/nfs/nfs4filelayoutdev.c +++ b/fs/nfs/nfs4filelayoutdev.c @@ -214,7 +214,7 @@ decode_and_add_ds(__be32 **pp, struct inode *inode) /* ipv6 length plus port is legal */ if (rlen > INET6_ADDRSTRLEN + 8) { - dprintk("%s Invalid address, length %d\n", __func__, + dprintk("%s: Invalid address, length %d\n", __func__, rlen); goto out_err; } @@ -225,6 +225,11 @@ decode_and_add_ds(__be32 **pp, struct inode *inode) /* replace the port dots with dashes for the in4_pton() delimiter*/ for (i = 0; i < 2; i++) { char *res = strrchr(buf, '.'); + if (!res) { + dprintk("%s: Failed finding expected dots in port\n", + __func__); + goto out_free; + } *res = '-'; } @@ -240,7 +245,7 @@ decode_and_add_ds(__be32 **pp, struct inode *inode) port = htons((tmp[0] << 8) | (tmp[1])); ds = nfs4_pnfs_ds_add(inode, ip_addr, port); - dprintk("%s Decoded address and port %s\n", __func__, buf); + dprintk("%s: Decoded address and port %s\n", __func__, buf); out_free: kfree(buf); out_err: -- cgit v1.2.2 From 839f7ad6932d95f4d5ae7267b95c574714ff3d5b Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Fri, 21 Jan 2011 15:54:57 +0000 Subject: NFS: Fix "kernel BUG at fs/aio.c:554!" Nick Piggin reports: > I'm getting use after frees in aio code in NFS > > [ 2703.396766] Call Trace: > [ 2703.396858] [] ? native_sched_clock+0x27/0x80 > [ 2703.396959] [] ? put_lock_stats+0xe/0x40 > [ 2703.397058] [] ? lock_release_holdtime+0xa8/0x140 > [ 2703.397159] [] lock_acquire+0x95/0x1b0 > [ 2703.397260] [] ? aio_put_req+0x2b/0x60 > [ 2703.397361] [] ? get_parent_ip+0x11/0x50 > [ 2703.397464] [] _raw_spin_lock_irq+0x41/0x80 > [ 2703.397564] [] ? aio_put_req+0x2b/0x60 > [ 2703.397662] [] aio_put_req+0x2b/0x60 > [ 2703.397761] [] do_io_submit+0x2be/0x7c0 > [ 2703.397895] [] sys_io_submit+0xb/0x10 > [ 2703.397995] [] system_call_fastpath+0x16/0x1b > > Adding some tracing, it is due to nfs completing the request then > returning something other than -EIOCBQUEUED, so aio.c > also completes the request. To address this, prevent the NFS direct I/O engine from completing async iocbs when the forward path returns an error without starting any I/O. This fix appears to survive ^C during both "xfstest no. 208" and "fsx -Z." It's likely this bug has existed for a very long while, as we are seeing very similar symptoms in OEL 5. Copying stable. Cc: Stable Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- fs/nfs/direct.c | 34 ++++++++++++++++++++-------------- 1 file changed, 20 insertions(+), 14 deletions(-) (limited to 'fs') diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index e6ace0d93c71..9943a75bb6d1 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -407,15 +407,18 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq, pos += vec->iov_len; } + /* + * If no bytes were started, return the error, and let the + * generic layer handle the completion. + */ + if (requested_bytes == 0) { + nfs_direct_req_release(dreq); + return result < 0 ? result : -EIO; + } + if (put_dreq(dreq)) nfs_direct_complete(dreq); - - if (requested_bytes != 0) - return 0; - - if (result < 0) - return result; - return -EIO; + return 0; } static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov, @@ -841,15 +844,18 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq, pos += vec->iov_len; } + /* + * If no bytes were started, return the error, and let the + * generic layer handle the completion. + */ + if (requested_bytes == 0) { + nfs_direct_req_release(dreq); + return result < 0 ? result : -EIO; + } + if (put_dreq(dreq)) nfs_direct_write_complete(dreq, dreq->inode); - - if (requested_bytes != 0) - return 0; - - if (result < 0) - return result; - return -EIO; + return 0; } static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov, -- cgit v1.2.2 From ee5dc7732bd557bae6d10873a0aac606d2c551fb Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Fri, 21 Jan 2011 03:05:18 +0000 Subject: NFS: Fix "kernel BUG at fs/nfs/nfs3xdr.c:1338!" Milan Broz reports: > on today Linus' tree I get OOps if using nfs. > > server (2.6.36) exports dir: > /dir 172.16.1.0/24(rw,async,all_squash,no_subtree_check,anonuid=500,anongid=500) > > on client it is mounted in fstab > server:/dir /mnt/tst nfs rw,soft 0 0 > > and these commands OOpses it (simplified from a configure script): > > cd /dir > touch x > install x y > > [ 105.327701] ------------[ cut here ]------------ > [ 105.327979] kernel BUG at fs/nfs/nfs3xdr.c:1338! > [ 105.328075] invalid opcode: 0000 [#1] PREEMPT SMP > [ 105.328223] last sysfs file: /sys/devices/virtual/bdi/0:16/uevent > [ 105.328349] Modules linked in: usbcore dm_mod > [ 105.328553] > [ 105.328678] Pid: 3710, comm: install Not tainted 2.6.37+ #423 440BX Desktop Reference Platform/VMware Virtual Platform > [ 105.328853] EIP: 0060:[] EFLAGS: 00010282 CPU: 0 > [ 105.329152] EIP is at nfs3_xdr_enc_setacl3args+0x61/0x98 > [ 105.329249] EAX: ffffffea EBX: ce941d98 ECX: 00000000 EDX: 00000004 > [ 105.329340] ESI: ce941cd0 EDI: 000000a4 EBP: ce941cc0 ESP: ce941cb4 > [ 105.329431] DS: 007b ES: 007b FS: 00d8 GS: 00e0 SS: 0068 > [ 105.329525] Process install (pid: 3710, ti=ce940000 task=ced36f20 task.ti=ce940000) > [ 105.336600] Stack: > [ 105.336693] ce941cd0 ce9dc000 00000000 ce941cf8 c12ecd02 c12f43e0 c116c00b cf754158 > [ 105.336982] ce9dc004 cf754284 ce9dc004 cf7ffee8 ceff9978 ce9dc000 cf7ffee8 ce9dc000 > [ 105.337182] ce9dc000 ce941d14 c12e698d cf75412c ce941d98 cf7ffee8 cf7fff20 00000000 > [ 105.337405] Call Trace: > [ 105.337695] [] rpcauth_wrap_req+0x75/0x7f > [ 105.337806] [] ? xdr_encode_opaque+0x12/0x15 > [ 105.337898] [] ? nfs3_xdr_enc_setacl3args+0x0/0x98 > [ 105.337988] [] call_transmit+0x17e/0x1e8 > [ 105.338072] [] __rpc_execute+0x6d/0x1a6 > [ 105.338155] [] rpc_execute+0x34/0x37 > [ 105.338235] [] rpc_run_task+0xb5/0xbd > [ 105.338316] [] rpc_call_sync+0x3d/0x58 > [ 105.338402] [] nfs3_proc_setacls+0x18e/0x24f > [ 105.338493] [] ? __kmalloc+0x148/0x1c4 > [ 105.338579] [] ? posix_acl_alloc+0x12/0x22 > [ 105.338665] [] nfs3_proc_setacl+0xa0/0xca > [ 105.338748] [] nfs3_setxattr+0x62/0x88 > [ 105.338834] [] ? sub_preempt_count+0x7c/0x89 > [ 105.338926] [] ? nfs3_setxattr+0x0/0x88 > [ 105.339026] [] __vfs_setxattr_noperm+0x26/0x95 > [ 105.339114] [] vfs_setxattr+0x5b/0x76 > [ 105.339211] [] setxattr+0x9d/0xc3 > [ 105.339298] [] ? handle_pte_fault+0x258/0x5cb > [ 105.339428] [] ? __free_pages+0x1a/0x23 > [ 105.339517] [] ? up_read+0x16/0x2c > [ 105.339599] [] ? fget+0x0/0xa3 > [ 105.339677] [] ? fget+0x0/0xa3 > [ 105.339760] [] ? get_parent_ip+0xb/0x31 > [ 105.339843] [] ? sub_preempt_count+0x7c/0x89 > [ 105.339931] [] sys_fsetxattr+0x51/0x79 > [ 105.340014] [] sysenter_do_call+0x12/0x32 > [ 105.340133] Code: 2e 76 18 00 58 31 d2 8b 7f 28 f6 43 04 01 74 03 8b 53 08 6a 00 8b 46 04 6a 01 8b 0b 52 89 fa e8 85 10 f8 ff 83 c4 0c 85 c0 79 04 <0f> 0b eb fe 31 c9 f6 43 04 04 74 03 8b 4b 0c 68 00 10 00 00 8d > [ 105.350321] EIP: [] nfs3_xdr_enc_setacl3args+0x61/0x98 SS:ESP 0068:ce941cb4 > [ 105.364385] ---[ end trace 01fcfe7f0f7f6e4a ]--- nfs3_xdr_enc_setacl3args() is not properly setting up the target buffer before nfsacl_encode() attempts to encode the ACL. Introduced by commit d9c407b1 "NFS: Introduce new-style XDR encoding functions for NFSv3." Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- fs/nfs/nfs3xdr.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c index 01c5e8b1941d..183c6b123d0f 100644 --- a/fs/nfs/nfs3xdr.c +++ b/fs/nfs/nfs3xdr.c @@ -1328,10 +1328,13 @@ static void nfs3_xdr_enc_setacl3args(struct rpc_rqst *req, encode_nfs_fh3(xdr, NFS_FH(args->inode)); encode_uint32(xdr, args->mask); + + base = req->rq_slen; if (args->npages != 0) xdr_write_pages(xdr, args->pages, 0, args->len); + else + xdr_reserve_space(xdr, NFS_ACL_INLINE_BUFSIZE); - base = req->rq_slen; error = nfsacl_encode(xdr->buf, base, args->inode, (args->mask & NFS_ACL) ? args->acl_access : NULL, 1, 0); -- cgit v1.2.2 From 731f3f482ad3b2c58a1af2d0a9a634a82803706a Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Fri, 21 Jan 2011 03:05:28 +0000 Subject: NFS: nfsacl_{encode,decode} should return signed integer Clean up. The nfsacl_encode() and nfsacl_decode() functions return negative errno values, and each call site verifies that the returned value is not negative. Change the synopsis of both of these functions to reflect this usage. Document the synopsis and return values. Reported-by: Trond Myklebust Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- fs/nfs_common/nfsacl.c | 32 ++++++++++++++++++++++++++------ 1 file changed, 26 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/nfs_common/nfsacl.c b/fs/nfs_common/nfsacl.c index fc1c52571c03..a3e78bd18679 100644 --- a/fs/nfs_common/nfsacl.c +++ b/fs/nfs_common/nfsacl.c @@ -72,9 +72,20 @@ xdr_nfsace_encode(struct xdr_array2_desc *desc, void *elem) return 0; } -unsigned int -nfsacl_encode(struct xdr_buf *buf, unsigned int base, struct inode *inode, - struct posix_acl *acl, int encode_entries, int typeflag) +/** + * nfsacl_encode - Encode an NFSv3 ACL + * + * @buf: destination xdr_buf to contain XDR encoded ACL + * @base: byte offset in xdr_buf where XDR'd ACL begins + * @inode: inode of file whose ACL this is + * @acl: posix_acl to encode + * @encode_entries: whether to encode ACEs as well + * @typeflag: ACL type: NFS_ACL_DEFAULT or zero + * + * Returns size of encoded ACL in bytes or a negative errno value. + */ +int nfsacl_encode(struct xdr_buf *buf, unsigned int base, struct inode *inode, + struct posix_acl *acl, int encode_entries, int typeflag) { int entries = (acl && acl->a_count) ? max_t(int, acl->a_count, 4) : 0; struct nfsacl_encode_desc nfsacl_desc = { @@ -224,9 +235,18 @@ posix_acl_from_nfsacl(struct posix_acl *acl) return 0; } -unsigned int -nfsacl_decode(struct xdr_buf *buf, unsigned int base, unsigned int *aclcnt, - struct posix_acl **pacl) +/** + * nfsacl_decode - Decode an NFSv3 ACL + * + * @buf: xdr_buf containing XDR'd ACL data to decode + * @base: byte offset in xdr_buf where XDR'd ACL begins + * @aclcnt: count of ACEs in decoded posix_acl + * @pacl: buffer in which to place decoded posix_acl + * + * Returns the length of the decoded ACL in bytes, or a negative errno value. + */ +int nfsacl_decode(struct xdr_buf *buf, unsigned int base, unsigned int *aclcnt, + struct posix_acl **pacl) { struct nfsacl_decode_desc nfsacl_desc = { .desc = { -- cgit v1.2.2 From f61f6da0d53842e849bab7f69e1431bd3de1136d Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Fri, 21 Jan 2011 03:05:38 +0000 Subject: NFS: Prevent memory allocation failure in nfsacl_encode() nfsacl_encode() allocates memory in certain cases. This of course is not guaranteed to work. Since commit 9f06c719 "SUNRPC: New xdr_streams XDR encoder API", the kernel's XDR encoders can't return a result indicating possibly a failure, so a memory allocation failure in nfsacl_encode() has become fatal (ie, the XDR code Oopses) in some cases. However, the allocated memory is a tiny fixed amount, on the order of 40-50 bytes. We can easily use a stack-allocated buffer for this, with only a wee bit of nose-holding. Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- fs/nfs/nfs3acl.c | 4 ++-- fs/nfs_common/nfsacl.c | 22 +++++++++++++++------- fs/posix_acl.c | 17 +++++++++++++---- 3 files changed, 30 insertions(+), 13 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c index 9f88c5f4c7e2..274342771655 100644 --- a/fs/nfs/nfs3acl.c +++ b/fs/nfs/nfs3acl.c @@ -311,8 +311,8 @@ static int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl, if (!nfs_server_capable(inode, NFS_CAP_ACLS)) goto out; - /* We are doing this here, because XDR marshalling can only - return -ENOMEM. */ + /* We are doing this here because XDR marshalling does not + * return any results, it BUGs. */ status = -ENOSPC; if (acl != NULL && acl->a_count > NFS_ACL_MAX_ENTRIES) goto out; diff --git a/fs/nfs_common/nfsacl.c b/fs/nfs_common/nfsacl.c index a3e78bd18679..84c27d69d421 100644 --- a/fs/nfs_common/nfsacl.c +++ b/fs/nfs_common/nfsacl.c @@ -42,6 +42,11 @@ struct nfsacl_encode_desc { gid_t gid; }; +struct nfsacl_simple_acl { + struct posix_acl acl; + struct posix_acl_entry ace[4]; +}; + static int xdr_nfsace_encode(struct xdr_array2_desc *desc, void *elem) { @@ -99,17 +104,22 @@ int nfsacl_encode(struct xdr_buf *buf, unsigned int base, struct inode *inode, .uid = inode->i_uid, .gid = inode->i_gid, }; + struct nfsacl_simple_acl aclbuf; int err; - struct posix_acl *acl2 = NULL; if (entries > NFS_ACL_MAX_ENTRIES || xdr_encode_word(buf, base, entries)) return -EINVAL; if (encode_entries && acl && acl->a_count == 3) { - /* Fake up an ACL_MASK entry. */ - acl2 = posix_acl_alloc(4, GFP_KERNEL); - if (!acl2) - return -ENOMEM; + struct posix_acl *acl2 = &aclbuf.acl; + + /* Avoid the use of posix_acl_alloc(). nfsacl_encode() is + * invoked in contexts where a memory allocation failure is + * fatal. Fortunately this fake ACL is small enough to + * construct on the stack. */ + memset(acl2, 0, sizeof(acl2)); + posix_acl_init(acl2, 4); + /* Insert entries in canonical order: other orders seem to confuse Solaris VxFS. */ acl2->a_entries[0] = acl->a_entries[0]; /* ACL_USER_OBJ */ @@ -120,8 +130,6 @@ int nfsacl_encode(struct xdr_buf *buf, unsigned int base, struct inode *inode, nfsacl_desc.acl = acl2; } err = xdr_encode_array2(buf, base + 4, &nfsacl_desc.desc); - if (acl2) - posix_acl_release(acl2); if (!err) err = 8 + nfsacl_desc.desc.elem_size * nfsacl_desc.desc.array_len; diff --git a/fs/posix_acl.c b/fs/posix_acl.c index 39df95a0ec25..b1cf6bf4b41d 100644 --- a/fs/posix_acl.c +++ b/fs/posix_acl.c @@ -22,6 +22,7 @@ #include +EXPORT_SYMBOL(posix_acl_init); EXPORT_SYMBOL(posix_acl_alloc); EXPORT_SYMBOL(posix_acl_clone); EXPORT_SYMBOL(posix_acl_valid); @@ -31,6 +32,16 @@ EXPORT_SYMBOL(posix_acl_create_masq); EXPORT_SYMBOL(posix_acl_chmod_masq); EXPORT_SYMBOL(posix_acl_permission); +/* + * Init a fresh posix_acl + */ +void +posix_acl_init(struct posix_acl *acl, int count) +{ + atomic_set(&acl->a_refcount, 1); + acl->a_count = count; +} + /* * Allocate a new ACL with the specified number of entries. */ @@ -40,10 +51,8 @@ posix_acl_alloc(int count, gfp_t flags) const size_t size = sizeof(struct posix_acl) + count * sizeof(struct posix_acl_entry); struct posix_acl *acl = kmalloc(size, flags); - if (acl) { - atomic_set(&acl->a_refcount, 1); - acl->a_count = count; - } + if (acl) + posix_acl_init(acl, count); return acl; } -- cgit v1.2.2 From 80c30e8de4f81851b1f712bcc596e11d53bc76f1 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 24 Jan 2011 20:50:26 +0000 Subject: NLM: Fix "kernel BUG at fs/lockd/host.c:417!" or ".../host.c:283!" Nick Bowler reports: > We were just having some NFS server troubles, and my client machine > running 2.6.38-rc1+ (specifically, commit 2b1caf6ed7b888c95) crashed > hard (syslog output appended to this mail). > > I'm not sure what the exact timeline was or how to reproduce this, > but the server was rebooted during all this. Since I've never seen > this happen before, it is possibly a regression from previous kernel > releases. However, I recently updated my nfs-utils (on the client) to > version 1.2.3, so that might be related as well. [ BUG output redacted ] When done searching, the for_each_host loop in next_host_state() falls through and returns the final host on the host chain without bumping it's reference count. Since the host's ref count is only one at that point, releasing the host in nlm_host_rebooted() attempts to destroy the host prematurely, and therefore hits a BUG(). Likely, the original intent of the for_each_host behavior in next_host_state() was to handle the case when the host chain is empty. Searching the chain and finding no suitable host to return needs to be handled as well. Defensively restructure next_host_state() always to return NULL when the loop falls through. Introduced by commit b10e30f6 "lockd: reorganize nlm_host_rebooted". Cc: J. Bruce Fields Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- fs/lockd/host.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/lockd/host.c b/fs/lockd/host.c index 5f1bcb2f06f3..b7c99bfb3da6 100644 --- a/fs/lockd/host.c +++ b/fs/lockd/host.c @@ -520,7 +520,7 @@ static struct nlm_host *next_host_state(struct hlist_head *cache, struct nsm_handle *nsm, const struct nlm_reboot *info) { - struct nlm_host *host = NULL; + struct nlm_host *host; struct hlist_head *chain; struct hlist_node *pos; @@ -532,12 +532,13 @@ static struct nlm_host *next_host_state(struct hlist_head *cache, host->h_state++; nlm_get_host(host); - goto out; + mutex_unlock(&nlm_host_mutex); + return host; } } -out: + mutex_unlock(&nlm_host_mutex); - return host; + return NULL; } /** -- cgit v1.2.2 From 778be232a207e79088ba70d832ac25dfea6fbf1a Mon Sep 17 00:00:00 2001 From: Andy Adamson Date: Tue, 25 Jan 2011 15:38:01 +0000 Subject: NFS do not find client in NFSv4 pg_authenticate The information required to find the nfs_client cooresponding to the incoming back channel request is contained in the NFS layer. Perform minimal checking in the RPC layer pg_authenticate method, and push more detailed checking into the NFS layer where the nfs_client can be found. Signed-off-by: Andy Adamson Signed-off-by: Trond Myklebust --- fs/nfs/callback.c | 109 +++++++++++++------------------------------------ fs/nfs/callback.h | 4 +- fs/nfs/callback_proc.c | 10 +---- fs/nfs/callback_xdr.c | 5 +-- fs/nfs/client.c | 15 +++---- fs/nfs/internal.h | 3 +- fs/nfs/nfs4state.c | 6 --- 7 files changed, 41 insertions(+), 111 deletions(-) (limited to 'fs') diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index 199016528fcb..e3d294269058 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c @@ -134,33 +134,6 @@ out_err: } #if defined(CONFIG_NFS_V4_1) -/* - * * CB_SEQUENCE operations will fail until the callback sessionid is set. - * */ -int nfs4_set_callback_sessionid(struct nfs_client *clp) -{ - struct svc_serv *serv = clp->cl_rpcclient->cl_xprt->bc_serv; - struct nfs4_sessionid *bc_sid; - - if (!serv->sv_bc_xprt) - return -EINVAL; - - /* on success freed in xprt_free */ - bc_sid = kmalloc(sizeof(struct nfs4_sessionid), GFP_KERNEL); - if (!bc_sid) - return -ENOMEM; - memcpy(bc_sid->data, &clp->cl_session->sess_id.data, - NFS4_MAX_SESSIONID_LEN); - spin_lock_bh(&serv->sv_cb_lock); - serv->sv_bc_xprt->xpt_bc_sid = bc_sid; - spin_unlock_bh(&serv->sv_cb_lock); - dprintk("%s set xpt_bc_sid=%u:%u:%u:%u for sv_bc_xprt %p\n", __func__, - ((u32 *)bc_sid->data)[0], ((u32 *)bc_sid->data)[1], - ((u32 *)bc_sid->data)[2], ((u32 *)bc_sid->data)[3], - serv->sv_bc_xprt); - return 0; -} - /* * The callback service for NFSv4.1 callbacks */ @@ -266,10 +239,6 @@ static inline void nfs_callback_bc_serv(u32 minorversion, struct rpc_xprt *xprt, struct nfs_callback_data *cb_info) { } -int nfs4_set_callback_sessionid(struct nfs_client *clp) -{ - return 0; -} #endif /* CONFIG_NFS_V4_1 */ /* @@ -359,78 +328,58 @@ void nfs_callback_down(int minorversion) mutex_unlock(&nfs_callback_mutex); } -static int check_gss_callback_principal(struct nfs_client *clp, - struct svc_rqst *rqstp) +/* Boolean check of RPC_AUTH_GSS principal */ +int +check_gss_callback_principal(struct nfs_client *clp, struct svc_rqst *rqstp) { struct rpc_clnt *r = clp->cl_rpcclient; char *p = svc_gss_principal(rqstp); + if (rqstp->rq_authop->flavour != RPC_AUTH_GSS) + return 1; + /* No RPC_AUTH_GSS on NFSv4.1 back channel yet */ if (clp->cl_minorversion != 0) - return SVC_DROP; + return 0; /* * It might just be a normal user principal, in which case * userspace won't bother to tell us the name at all. */ if (p == NULL) - return SVC_DENIED; + return 0; /* Expect a GSS_C_NT_HOSTBASED_NAME like "nfs@serverhostname" */ if (memcmp(p, "nfs@", 4) != 0) - return SVC_DENIED; + return 0; p += 4; if (strcmp(p, r->cl_server) != 0) - return SVC_DENIED; - return SVC_OK; + return 0; + return 1; } -/* pg_authenticate method helper */ -static struct nfs_client *nfs_cb_find_client(struct svc_rqst *rqstp) -{ - struct nfs4_sessionid *sessionid = bc_xprt_sid(rqstp); - int is_cb_compound = rqstp->rq_proc == CB_COMPOUND ? 1 : 0; - - dprintk("--> %s rq_proc %d\n", __func__, rqstp->rq_proc); - if (svc_is_backchannel(rqstp)) - /* Sessionid (usually) set after CB_NULL ping */ - return nfs4_find_client_sessionid(svc_addr(rqstp), sessionid, - is_cb_compound); - else - /* No callback identifier in pg_authenticate */ - return nfs4_find_client_no_ident(svc_addr(rqstp)); -} - -/* pg_authenticate method for nfsv4 callback threads. */ +/* + * pg_authenticate method for nfsv4 callback threads. + * + * The authflavor has been negotiated, so an incorrect flavor is a server + * bug. Drop packets with incorrect authflavor. + * + * All other checking done after NFS decoding where the nfs_client can be + * found in nfs4_callback_compound + */ static int nfs_callback_authenticate(struct svc_rqst *rqstp) { - struct nfs_client *clp; - RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]); - int ret = SVC_OK; - - /* Don't talk to strangers */ - clp = nfs_cb_find_client(rqstp); - if (clp == NULL) - return SVC_DROP; - - dprintk("%s: %s NFSv4 callback!\n", __func__, - svc_print_addr(rqstp, buf, sizeof(buf))); - switch (rqstp->rq_authop->flavour) { - case RPC_AUTH_NULL: - if (rqstp->rq_proc != CB_NULL) - ret = SVC_DENIED; - break; - case RPC_AUTH_UNIX: - break; - case RPC_AUTH_GSS: - ret = check_gss_callback_principal(clp, rqstp); - break; - default: - ret = SVC_DENIED; + case RPC_AUTH_NULL: + if (rqstp->rq_proc != CB_NULL) + return SVC_DROP; + break; + case RPC_AUTH_GSS: + /* No RPC_AUTH_GSS support yet in NFSv4.1 */ + if (svc_is_backchannel(rqstp)) + return SVC_DROP; } - nfs_put_client(clp); - return ret; + return SVC_OK; } /* diff --git a/fs/nfs/callback.h b/fs/nfs/callback.h index d3b44f9bd747..46d93ce7311b 100644 --- a/fs/nfs/callback.h +++ b/fs/nfs/callback.h @@ -7,6 +7,7 @@ */ #ifndef __LINUX_FS_NFS_CALLBACK_H #define __LINUX_FS_NFS_CALLBACK_H +#include #define NFS4_CALLBACK 0x40000000 #define NFS4_CALLBACK_XDRSIZE 2048 @@ -37,7 +38,6 @@ enum nfs4_callback_opnum { struct cb_process_state { __be32 drc_status; struct nfs_client *clp; - struct nfs4_sessionid *svc_sid; /* v4.1 callback service sessionid */ }; struct cb_compound_hdr_arg { @@ -168,7 +168,7 @@ extern unsigned nfs4_callback_layoutrecall( extern void nfs4_check_drain_bc_complete(struct nfs4_session *ses); extern void nfs4_cb_take_slot(struct nfs_client *clp); #endif /* CONFIG_NFS_V4_1 */ - +extern int check_gss_callback_principal(struct nfs_client *, struct svc_rqst *); extern __be32 nfs4_callback_getattr(struct cb_getattrargs *args, struct cb_getattrres *res, struct cb_process_state *cps); diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c index 4bb91cb2620d..829f406e91dd 100644 --- a/fs/nfs/callback_proc.c +++ b/fs/nfs/callback_proc.c @@ -373,17 +373,11 @@ __be32 nfs4_callback_sequence(struct cb_sequenceargs *args, { struct nfs_client *clp; int i; - __be32 status; + __be32 status = htonl(NFS4ERR_BADSESSION); cps->clp = NULL; - status = htonl(NFS4ERR_BADSESSION); - /* Incoming session must match the callback session */ - if (memcmp(&args->csa_sessionid, cps->svc_sid, NFS4_MAX_SESSIONID_LEN)) - goto out; - - clp = nfs4_find_client_sessionid(args->csa_addr, - &args->csa_sessionid, 1); + clp = nfs4_find_client_sessionid(args->csa_addr, &args->csa_sessionid); if (clp == NULL) goto out; diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c index 23112c263f81..14e0f9371d14 100644 --- a/fs/nfs/callback_xdr.c +++ b/fs/nfs/callback_xdr.c @@ -794,10 +794,9 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r if (hdr_arg.minorversion == 0) { cps.clp = nfs4_find_client_ident(hdr_arg.cb_ident); - if (!cps.clp) + if (!cps.clp || !check_gss_callback_principal(cps.clp, rqstp)) return rpc_drop_reply; - } else - cps.svc_sid = bc_xprt_sid(rqstp); + } hdr_res.taglen = hdr_arg.taglen; hdr_res.tag = hdr_arg.tag; diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 192f2f860265..bd3ca32879e7 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -1206,16 +1206,11 @@ nfs4_find_client_ident(int cb_ident) * For CB_COMPOUND calls, find a client by IP address, protocol version, * minorversion, and sessionID * - * CREATE_SESSION triggers a CB_NULL ping from servers. The callback service - * sessionid can only be set after the CREATE_SESSION return, so a CB_NULL - * can arrive before the callback sessionid is set. For CB_NULL calls, - * find a client by IP address protocol version, and minorversion. - * * Returns NULL if no such client */ struct nfs_client * nfs4_find_client_sessionid(const struct sockaddr *addr, - struct nfs4_sessionid *sid, int is_cb_compound) + struct nfs4_sessionid *sid) { struct nfs_client *clp; @@ -1227,9 +1222,9 @@ nfs4_find_client_sessionid(const struct sockaddr *addr, if (!nfs4_has_session(clp)) continue; - /* Match sessionid unless cb_null call*/ - if (is_cb_compound && (memcmp(clp->cl_session->sess_id.data, - sid->data, NFS4_MAX_SESSIONID_LEN) != 0)) + /* Match sessionid*/ + if (memcmp(clp->cl_session->sess_id.data, + sid->data, NFS4_MAX_SESSIONID_LEN) != 0) continue; atomic_inc(&clp->cl_count); @@ -1244,7 +1239,7 @@ nfs4_find_client_sessionid(const struct sockaddr *addr, struct nfs_client * nfs4_find_client_sessionid(const struct sockaddr *addr, - struct nfs4_sessionid *sid, int is_cb_compound) + struct nfs4_sessionid *sid) { return NULL; } diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 4644f04b4b46..cf9fdbdabc67 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -133,8 +133,7 @@ extern void nfs_put_client(struct nfs_client *); extern struct nfs_client *nfs4_find_client_no_ident(const struct sockaddr *); extern struct nfs_client *nfs4_find_client_ident(int); extern struct nfs_client * -nfs4_find_client_sessionid(const struct sockaddr *, struct nfs4_sessionid *, - int); +nfs4_find_client_sessionid(const struct sockaddr *, struct nfs4_sessionid *); extern struct nfs_server *nfs_create_server( const struct nfs_parsed_mount_data *, struct nfs_fh *); diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 2336d532cf66..e6742b57a04c 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -232,12 +232,6 @@ int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred) status = nfs4_proc_create_session(clp); if (status != 0) goto out; - status = nfs4_set_callback_sessionid(clp); - if (status != 0) { - printk(KERN_WARNING "Sessionid not set. No callback service\n"); - nfs_callback_down(1); - status = 0; - } nfs41_setup_state_renewal(clp); nfs_mark_client_ready(clp, NFS_CS_READY); out: -- cgit v1.2.2 From 2c4cdf8f6d3cfb48036400952329555099c8c92c Mon Sep 17 00:00:00 2001 From: Andy Adamson Date: Tue, 25 Jan 2011 15:38:02 +0000 Subject: NFS fix cb_sequence error processing Always assign the cb_process_state nfs_client pointer so a processing error in cb_sequence after the nfs_client is found and referenced returns a non-NULL cb_process_state nfs_client and the matching nfs_put_client in nfs4_callback_compound dereferences the client. Signed-off-by: Andy Adamson Signed-off-by: Trond Myklebust --- fs/nfs/callback_proc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c index 829f406e91dd..89587573fe50 100644 --- a/fs/nfs/callback_proc.c +++ b/fs/nfs/callback_proc.c @@ -408,9 +408,9 @@ __be32 nfs4_callback_sequence(struct cb_sequenceargs *args, res->csr_highestslotid = NFS41_BC_MAX_CALLBACKS - 1; res->csr_target_highestslotid = NFS41_BC_MAX_CALLBACKS - 1; nfs4_cb_take_slot(clp); - cps->clp = clp; /* put in nfs4_callback_compound */ out: + cps->clp = clp; /* put in nfs4_callback_compound */ for (i = 0; i < args->csa_nrclists; i++) kfree(args->csa_rclists[i].rcl_refcalls); kfree(args->csa_rclists); -- cgit v1.2.2 From b2a2897dc4a59684321de425652061c62a0569d0 Mon Sep 17 00:00:00 2001 From: Andy Adamson Date: Tue, 25 Jan 2011 15:38:03 +0000 Subject: NFS improve pnfs_put_deviceid_cache debug print What we really want to know is the ref count. Signed-off-by: Andy Adamson Signed-off-by: Trond Myklebust --- fs/nfs/pnfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index bc4089769735..1b1bc1a0fb0a 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -951,7 +951,7 @@ pnfs_put_deviceid_cache(struct nfs_client *clp) { struct pnfs_deviceid_cache *local = clp->cl_devid_cache; - dprintk("--> %s cl_devid_cache %p\n", __func__, clp->cl_devid_cache); + dprintk("--> %s ({%d})\n", __func__, atomic_read(&local->dc_ref)); if (atomic_dec_and_lock(&local->dc_ref, &clp->cl_lock)) { int i; /* Verify cache is empty */ -- cgit v1.2.2 From 27dc1cd3ad9300f81e1219e5fc305d91d85353f8 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 25 Jan 2011 15:28:21 -0500 Subject: NFS: nfs_wcc_update_inode() should set nfsi->attr_gencount If the call to nfs_wcc_update_inode() results in an attribute update, we need to ensure that the inode's attr_gencount gets bumped too, otherwise we are not protected against races with other GETATTR calls. Signed-off-by: Trond Myklebust --- fs/nfs/inode.c | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index d8512423ba72..1cc600e77bb4 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -881,9 +881,10 @@ out: return ret; } -static void nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr *fattr) +static unsigned long nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr *fattr) { struct nfs_inode *nfsi = NFS_I(inode); + unsigned long ret = 0; if ((fattr->valid & NFS_ATTR_FATTR_PRECHANGE) && (fattr->valid & NFS_ATTR_FATTR_CHANGE) @@ -891,25 +892,32 @@ static void nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr *fattr) nfsi->change_attr = fattr->change_attr; if (S_ISDIR(inode->i_mode)) nfsi->cache_validity |= NFS_INO_INVALID_DATA; + ret |= NFS_INO_INVALID_ATTR; } /* If we have atomic WCC data, we may update some attributes */ if ((fattr->valid & NFS_ATTR_FATTR_PRECTIME) && (fattr->valid & NFS_ATTR_FATTR_CTIME) - && timespec_equal(&inode->i_ctime, &fattr->pre_ctime)) - memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime)); + && timespec_equal(&inode->i_ctime, &fattr->pre_ctime)) { + memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime)); + ret |= NFS_INO_INVALID_ATTR; + } if ((fattr->valid & NFS_ATTR_FATTR_PREMTIME) && (fattr->valid & NFS_ATTR_FATTR_MTIME) && timespec_equal(&inode->i_mtime, &fattr->pre_mtime)) { - memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime)); - if (S_ISDIR(inode->i_mode)) - nfsi->cache_validity |= NFS_INO_INVALID_DATA; + memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime)); + if (S_ISDIR(inode->i_mode)) + nfsi->cache_validity |= NFS_INO_INVALID_DATA; + ret |= NFS_INO_INVALID_ATTR; } if ((fattr->valid & NFS_ATTR_FATTR_PRESIZE) && (fattr->valid & NFS_ATTR_FATTR_SIZE) && i_size_read(inode) == nfs_size_to_loff_t(fattr->pre_size) - && nfsi->npages == 0) - i_size_write(inode, nfs_size_to_loff_t(fattr->size)); + && nfsi->npages == 0) { + i_size_write(inode, nfs_size_to_loff_t(fattr->size)); + ret |= NFS_INO_INVALID_ATTR; + } + return ret; } /** @@ -1223,7 +1231,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) | NFS_INO_REVAL_PAGECACHE); /* Do atomic weak cache consistency updates */ - nfs_wcc_update_inode(inode, fattr); + invalid |= nfs_wcc_update_inode(inode, fattr); /* More cache consistency checks */ if (fattr->valid & NFS_ATTR_FATTR_CHANGE) { -- cgit v1.2.2 From 3689456b4bd36027022b3215eb2acba51cd0e6b5 Mon Sep 17 00:00:00 2001 From: Phillip Lougher Date: Tue, 25 Jan 2011 15:07:34 -0800 Subject: squashfs: fix use of uninitialised variable in zlib & xz decompressors Fix potential use of uninitialised variable caused by recent decompressor code optimisations. In zlib_uncompress (zlib_wrapper.c) we have int zlib_err, zlib_init = 0; ... do { ... if (avail == 0) { offset = 0; put_bh(bh[k++]); continue; } ... zlib_err = zlib_inflate(stream, Z_SYNC_FLUSH); ... } while (zlib_err == Z_OK); If continue is executed (avail == 0) then the while condition will be evaluated testing zlib_err, which is uninitialised first time around the loop. Fix this by getting rid of the 'if (avail == 0)' condition test, this edge condition should not be being handled in the decompressor code, and instead handle it generically in the caller code. Similarly for xz_wrapper.c. Incidentally, on most architectures (bar Mips and Parisc), no uninitialised variable warning is generated by gcc, this is because the while condition test on continue is optimised out and not performed (when executing continue zlib_err has not been changed since entering the loop, and logically if the while condition was true previously, then it's still true). Signed-off-by: Phillip Lougher Reported-by: Jesper Juhl Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/squashfs/block.c | 8 ++++++++ fs/squashfs/xz_wrapper.c | 6 ------ fs/squashfs/zlib_wrapper.c | 6 ------ 3 files changed, 8 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c index 2fb2882f0fa7..8ab48bc2fa7d 100644 --- a/fs/squashfs/block.c +++ b/fs/squashfs/block.c @@ -63,6 +63,14 @@ static struct buffer_head *get_block_length(struct super_block *sb, *length = (unsigned char) bh->b_data[*offset] | (unsigned char) bh->b_data[*offset + 1] << 8; *offset += 2; + + if (*offset == msblk->devblksize) { + put_bh(bh); + bh = sb_bread(sb, ++(*cur_index)); + if (bh == NULL) + return NULL; + *offset = 0; + } } return bh; diff --git a/fs/squashfs/xz_wrapper.c b/fs/squashfs/xz_wrapper.c index 856756ca5ee4..c4eb40018256 100644 --- a/fs/squashfs/xz_wrapper.c +++ b/fs/squashfs/xz_wrapper.c @@ -95,12 +95,6 @@ static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void **buffer, if (!buffer_uptodate(bh[k])) goto release_mutex; - if (avail == 0) { - offset = 0; - put_bh(bh[k++]); - continue; - } - stream->buf.in = bh[k]->b_data + offset; stream->buf.in_size = avail; stream->buf.in_pos = 0; diff --git a/fs/squashfs/zlib_wrapper.c b/fs/squashfs/zlib_wrapper.c index 818a5e063faf..4661ae2b1cec 100644 --- a/fs/squashfs/zlib_wrapper.c +++ b/fs/squashfs/zlib_wrapper.c @@ -82,12 +82,6 @@ static int zlib_uncompress(struct squashfs_sb_info *msblk, void **buffer, if (!buffer_uptodate(bh[k])) goto release_mutex; - if (avail == 0) { - offset = 0; - put_bh(bh[k++]); - continue; - } - stream->next_in = bh[k]->b_data + offset; stream->avail_in = avail; offset = 0; -- cgit v1.2.2 From ac751efa6a0d70f2c9daef5c7e3a92270f5c2dff Mon Sep 17 00:00:00 2001 From: Torben Hohn Date: Tue, 25 Jan 2011 15:07:35 -0800 Subject: console: rename acquire/release_console_sem() to console_lock/unlock() The -rt patches change the console_semaphore to console_mutex. As a result, a quite large chunk of the patches changes all acquire/release_console_sem() to acquire/release_console_mutex() This commit makes things use more neutral function names which dont make implications about the underlying lock. The only real change is the return value of console_trylock which is inverted from try_acquire_console_sem() This patch also paves the way to switching console_sem from a semaphore to a mutex. [akpm@linux-foundation.org: coding-style fixes] [akpm@linux-foundation.org: make console_trylock return 1 on success, per Geert] Signed-off-by: Torben Hohn Cc: Thomas Gleixner Cc: Greg KH Cc: Ingo Molnar Cc: Geert Uytterhoeven Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/consoles.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/proc/consoles.c b/fs/proc/consoles.c index eafc22ab1fdd..b701eaa482bf 100644 --- a/fs/proc/consoles.c +++ b/fs/proc/consoles.c @@ -67,7 +67,7 @@ static void *c_start(struct seq_file *m, loff_t *pos) struct console *con; loff_t off = 0; - acquire_console_sem(); + console_lock(); for_each_console(con) if (off++ == *pos) break; @@ -84,7 +84,7 @@ static void *c_next(struct seq_file *m, void *v, loff_t *pos) static void c_stop(struct seq_file *m, void *v) { - release_console_sem(); + console_unlock(); } static const struct seq_operations consoles_op = { -- cgit v1.2.2 From c7a360b05b5430ac1d75dc7d53c586ada60a05cb Mon Sep 17 00:00:00 2001 From: Andy Adamson Date: Tue, 25 Jan 2011 19:15:32 -0500 Subject: NFS construct consistent co_ownerid for v4.1 As stated in section 2.4 of RFC 5661, subsequent instances of the client need to present the same co_ownerid. Concatinate the client's IP dot address, host name, and the rpc_auth pseudoflavor to form the co_ownerid. Signed-off-by: Andy Adamson Signed-off-by: Trond Myklebust --- fs/nfs/nfs4proc.c | 30 ++++++++++-------------------- 1 file changed, 10 insertions(+), 20 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 9d992b0346e3..78936a8f40ab 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -50,6 +50,7 @@ #include #include #include +#include #include "nfs4_fs.h" #include "delegation.h" @@ -4572,27 +4573,16 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred) *p = htonl((u32)clp->cl_boot_time.tv_nsec); args.verifier = &verifier; - while (1) { - args.id_len = scnprintf(args.id, sizeof(args.id), - "%s/%s %u", - clp->cl_ipaddr, - rpc_peeraddr2str(clp->cl_rpcclient, - RPC_DISPLAY_ADDR), - clp->cl_id_uniquifier); - - status = rpc_call_sync(clp->cl_rpcclient, &msg, 0); - - if (status != -NFS4ERR_CLID_INUSE) - break; - - if (signalled()) - break; - - if (++clp->cl_id_uniquifier == 0) - break; - } + args.id_len = scnprintf(args.id, sizeof(args.id), + "%s/%s.%s/%u", + clp->cl_ipaddr, + init_utsname()->nodename, + init_utsname()->domainname, + clp->cl_rpcclient->cl_auth->au_flavor); - status = nfs4_check_cl_exchange_flags(clp->cl_exchange_flags); + status = rpc_call_sync(clp->cl_rpcclient, &msg, 0); + if (!status) + status = nfs4_check_cl_exchange_flags(clp->cl_exchange_flags); dprintk("<-- %s status= %d\n", __func__, status); return status; } -- cgit v1.2.2 From 8eb2d829ffea3677c21bd038f19e5d8ca6b43e36 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Tue, 9 Nov 2010 14:48:01 +0800 Subject: btrfs: Fix threshold calculation for block groups smaller than 1GB If a block group is smaller than 1GB, the extent entry threadhold calculation will always set the threshold to 0. So as free space gets fragmented, btrfs will switch to use bitmap to manage free space, but then will never switch back to extents due to this bug. Reviewed-by: Josef Bacik Signed-off-by: Li Zefan --- fs/btrfs/free-space-cache.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 60d684266959..42f4015988ec 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1016,14 +1016,18 @@ static void recalculate_thresholds(struct btrfs_block_group_cache *block_group) u64 max_bytes; u64 bitmap_bytes; u64 extent_bytes; + u64 size = block_group->key.offset; /* * The goal is to keep the total amount of memory used per 1gb of space * at or below 32k, so we need to adjust how much memory we allow to be * used by extent based free space tracking */ - max_bytes = MAX_CACHE_BYTES_PER_GIG * - (div64_u64(block_group->key.offset, 1024 * 1024 * 1024)); + if (size < 1024 * 1024 * 1024) + max_bytes = MAX_CACHE_BYTES_PER_GIG; + else + max_bytes = MAX_CACHE_BYTES_PER_GIG * + div64_u64(size, 1024 * 1024 * 1024); /* * we want to account for 1 more bitmap than what we have so we can make -- cgit v1.2.2 From edf6e2d1ddbac7f326b34a27adbca71ece53ccce Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Tue, 9 Nov 2010 14:50:07 +0800 Subject: btrfs: Add helper function free_bitmap() Remove some duplicated code. This prepares for the next patch. Reviewed-by: Josef Bacik Signed-off-by: Li Zefan --- fs/btrfs/free-space-cache.c | 37 ++++++++++++++++--------------------- 1 file changed, 16 insertions(+), 21 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 42f4015988ec..850104f05178 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1175,6 +1175,16 @@ static void add_new_bitmap(struct btrfs_block_group_cache *block_group, recalculate_thresholds(block_group); } +static void free_bitmap(struct btrfs_block_group_cache *block_group, + struct btrfs_free_space *bitmap_info) +{ + unlink_free_space(block_group, bitmap_info); + kfree(bitmap_info->bitmap); + kfree(bitmap_info); + block_group->total_bitmaps--; + recalculate_thresholds(block_group); +} + static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_group, struct btrfs_free_space *bitmap_info, u64 *offset, u64 *bytes) @@ -1215,13 +1225,8 @@ again: if (*bytes) { struct rb_node *next = rb_next(&bitmap_info->offset_index); - if (!bitmap_info->bytes) { - unlink_free_space(block_group, bitmap_info); - kfree(bitmap_info->bitmap); - kfree(bitmap_info); - block_group->total_bitmaps--; - recalculate_thresholds(block_group); - } + if (!bitmap_info->bytes) + free_bitmap(block_group, bitmap_info); /* * no entry after this bitmap, but we still have bytes to @@ -1254,13 +1259,8 @@ again: return -EAGAIN; goto again; - } else if (!bitmap_info->bytes) { - unlink_free_space(block_group, bitmap_info); - kfree(bitmap_info->bitmap); - kfree(bitmap_info); - block_group->total_bitmaps--; - recalculate_thresholds(block_group); - } + } else if (!bitmap_info->bytes) + free_bitmap(block_group, bitmap_info); return 0; } @@ -1689,13 +1689,8 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, ret = offset; if (entry->bitmap) { bitmap_clear_bits(block_group, entry, offset, bytes); - if (!entry->bytes) { - unlink_free_space(block_group, entry); - kfree(entry->bitmap); - kfree(entry); - block_group->total_bitmaps--; - recalculate_thresholds(block_group); - } + if (!entry->bytes) + free_bitmap(block_group, entry); } else { unlink_free_space(block_group, entry); entry->offset += bytes; -- cgit v1.2.2 From 70b7da304f9f9bbf1566085155895e32e775a745 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Tue, 9 Nov 2010 14:51:45 +0800 Subject: btrfs: Free fully occupied bitmap in cluster If there's no more free space in a bitmap, we should free it. Reviewed-by: Josef Bacik Signed-off-by: Li Zefan --- fs/btrfs/free-space-cache.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 850104f05178..cb0137e4047f 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1788,6 +1788,8 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, ret = search_start; bitmap_clear_bits(block_group, entry, ret, bytes); + if (entry->bytes == 0) + free_bitmap(block_group, entry); out: spin_unlock(&cluster->lock); spin_unlock(&block_group->tree_lock); -- cgit v1.2.2 From 5e71b5d5ec07e4b3fb4c78c4e4b108ff667f123f Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Tue, 9 Nov 2010 14:55:34 +0800 Subject: btrfs: Update stats when allocating from a cluster When allocating extent entry from a cluster, we should update the free_space and free_extents fields of the block group. Reviewed-by: Josef Bacik Signed-off-by: Li Zefan --- fs/btrfs/free-space-cache.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index cb0137e4047f..2974c4744d5c 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1843,15 +1843,26 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, entry->offset += bytes; entry->bytes -= bytes; - if (entry->bytes == 0) { + if (entry->bytes == 0) rb_erase(&entry->offset_index, &cluster->root); - kfree(entry); - } break; } out: spin_unlock(&cluster->lock); + if (!ret) + return 0; + + spin_lock(&block_group->tree_lock); + + block_group->free_space -= bytes; + if (entry->bytes == 0) { + block_group->free_extents--; + kfree(entry); + } + + spin_unlock(&block_group->tree_lock); + return ret; } -- cgit v1.2.2 From 120d66eec0dcb966fbd03f743598b2ff2513436b Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Tue, 9 Nov 2010 14:56:50 +0800 Subject: btrfs: Add a helper try_merge_free_space() When adding a new extent, we'll firstly see if we can merge this extent to the left or/and right extent. Extract this as a helper try_merge_free_space(). As a side effect, we fix a small bug that if the new extent has non-bitmap left entry but is unmergeble, we'll directly link the extent without trying to drop it into bitmap. This also prepares for the next patch. Reviewed-by: Josef Bacik Signed-off-by: Li Zefan --- fs/btrfs/free-space-cache.c | 75 ++++++++++++++++++++++++++------------------- 1 file changed, 43 insertions(+), 32 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 2974c4744d5c..cf67dc3b7bf8 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1363,22 +1363,14 @@ out: return ret; } -int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, - u64 offset, u64 bytes) +bool try_merge_free_space(struct btrfs_block_group_cache *block_group, + struct btrfs_free_space *info) { - struct btrfs_free_space *right_info = NULL; - struct btrfs_free_space *left_info = NULL; - struct btrfs_free_space *info = NULL; - int ret = 0; - - info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS); - if (!info) - return -ENOMEM; - - info->offset = offset; - info->bytes = bytes; - - spin_lock(&block_group->tree_lock); + struct btrfs_free_space *left_info; + struct btrfs_free_space *right_info; + bool merged = false; + u64 offset = info->offset; + u64 bytes = info->bytes; /* * first we want to see if there is free space adjacent to the range we @@ -1392,27 +1384,11 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, else left_info = tree_search_offset(block_group, offset - 1, 0, 0); - /* - * If there was no extent directly to the left or right of this new - * extent then we know we're going to have to allocate a new extent, so - * before we do that see if we need to drop this into a bitmap - */ - if ((!left_info || left_info->bitmap) && - (!right_info || right_info->bitmap)) { - ret = insert_into_bitmap(block_group, info); - - if (ret < 0) { - goto out; - } else if (ret) { - ret = 0; - goto out; - } - } - if (right_info && !right_info->bitmap) { unlink_free_space(block_group, right_info); info->bytes += right_info->bytes; kfree(right_info); + merged = true; } if (left_info && !left_info->bitmap && @@ -1421,8 +1397,43 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, info->offset = left_info->offset; info->bytes += left_info->bytes; kfree(left_info); + merged = true; } + return merged; +} + +int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, + u64 offset, u64 bytes) +{ + struct btrfs_free_space *info; + int ret = 0; + + info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS); + if (!info) + return -ENOMEM; + + info->offset = offset; + info->bytes = bytes; + + spin_lock(&block_group->tree_lock); + + if (try_merge_free_space(block_group, info)) + goto link; + + /* + * There was no extent directly to the left or right of this new + * extent then we know we're going to have to allocate a new extent, so + * before we do that see if we need to drop this into a bitmap + */ + ret = insert_into_bitmap(block_group, info); + if (ret < 0) { + goto out; + } else if (ret) { + ret = 0; + goto out; + } +link: ret = link_free_space(block_group, info); if (ret) kfree(info); -- cgit v1.2.2 From f333adb5d64bc1c4d6099072fc341c3c8f84e0cf Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Tue, 9 Nov 2010 14:57:39 +0800 Subject: btrfs: Check mergeable free space when removing a cluster After returing extents from a cluster to the block group, some extents in the block group may be mergeable. Reviewed-by: Josef Bacik Signed-off-by: Li Zefan --- fs/btrfs/free-space-cache.c | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index cf67dc3b7bf8..a5501edc3c9f 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -987,11 +987,18 @@ tree_search_offset(struct btrfs_block_group_cache *block_group, return entry; } -static void unlink_free_space(struct btrfs_block_group_cache *block_group, - struct btrfs_free_space *info) +static inline void +__unlink_free_space(struct btrfs_block_group_cache *block_group, + struct btrfs_free_space *info) { rb_erase(&info->offset_index, &block_group->free_space_offset); block_group->free_extents--; +} + +static void unlink_free_space(struct btrfs_block_group_cache *block_group, + struct btrfs_free_space *info) +{ + __unlink_free_space(block_group, info); block_group->free_space -= info->bytes; } @@ -1364,7 +1371,7 @@ out: } bool try_merge_free_space(struct btrfs_block_group_cache *block_group, - struct btrfs_free_space *info) + struct btrfs_free_space *info, bool update_stat) { struct btrfs_free_space *left_info; struct btrfs_free_space *right_info; @@ -1385,7 +1392,10 @@ bool try_merge_free_space(struct btrfs_block_group_cache *block_group, left_info = tree_search_offset(block_group, offset - 1, 0, 0); if (right_info && !right_info->bitmap) { - unlink_free_space(block_group, right_info); + if (update_stat) + unlink_free_space(block_group, right_info); + else + __unlink_free_space(block_group, right_info); info->bytes += right_info->bytes; kfree(right_info); merged = true; @@ -1393,7 +1403,10 @@ bool try_merge_free_space(struct btrfs_block_group_cache *block_group, if (left_info && !left_info->bitmap && left_info->offset + left_info->bytes == offset) { - unlink_free_space(block_group, left_info); + if (update_stat) + unlink_free_space(block_group, left_info); + else + __unlink_free_space(block_group, left_info); info->offset = left_info->offset; info->bytes += left_info->bytes; kfree(left_info); @@ -1418,7 +1431,7 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, spin_lock(&block_group->tree_lock); - if (try_merge_free_space(block_group, info)) + if (try_merge_free_space(block_group, info, true)) goto link; /* @@ -1636,6 +1649,7 @@ __btrfs_return_cluster_to_free_space( node = rb_next(&entry->offset_index); rb_erase(&entry->offset_index, &cluster->root); BUG_ON(entry->bitmap); + try_merge_free_space(block_group, entry, false); tree_insert_offset(&block_group->free_space_offset, entry->offset, &entry->offset_index, 0); } -- cgit v1.2.2 From 83a4d54840c88a4a45c49670f044b8c7ddeaa8c7 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Mon, 27 Dec 2010 16:19:53 +0800 Subject: Btrfs: Fix memory leak at umount fs_info, which is allocated in open_ctree(), should be freed in close_ctree(). Signed-off-by: Li Zefan --- fs/btrfs/disk-io.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index a5d2249e6da5..089871e5cd5a 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2513,6 +2513,8 @@ int close_ctree(struct btrfs_root *root) kfree(fs_info->chunk_root); kfree(fs_info->dev_root); kfree(fs_info->csum_root); + kfree(fs_info); + return 0; } -- cgit v1.2.2 From bdc924bb4cdac92b945945c3149ab8191c92d75d Mon Sep 17 00:00:00 2001 From: Ian Kent Date: Mon, 27 Dec 2010 16:33:15 +0800 Subject: Btrfs: Fix memory leak on finding existing super We missed a memory deallocation in commit 450ba0ea. If an existing super block is found at mount and there is no error condition then the pre-allocated tree_root and fs_info are no not used and are not freeded. Signed-off-by: Ian Kent Signed-off-by: Li Zefan --- fs/btrfs/super.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs') diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 61bd79abb805..f50253c2279d 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -654,6 +654,8 @@ static int btrfs_get_sb(struct file_system_type *fs_type, int flags, } btrfs_close_devices(fs_devices); + kfree(fs_info); + kfree(tree_root); } else { char b[BDEVNAME_SIZE]; -- cgit v1.2.2 From 3f3d0bc0df041236fad4ffa82188a6e4ef9af75e Mon Sep 17 00:00:00 2001 From: Tero Roponen Date: Mon, 27 Dec 2010 16:43:13 +0800 Subject: Btrfs: Free correct pointer after using strsep We must save and free the original kstrdup()'ed pointer because strsep() modifies its first argument. Signed-off-by: Tero Roponen Signed-off-by: Li Zefan --- fs/btrfs/super.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index f50253c2279d..78ee681465af 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -277,7 +277,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags, struct btrfs_fs_devices **fs_devices) { substring_t args[MAX_OPT_ARGS]; - char *opts, *p; + char *opts, *orig, *p; int error = 0; int intarg; @@ -291,6 +291,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags, opts = kstrdup(options, GFP_KERNEL); if (!opts) return -ENOMEM; + orig = opts; while ((p = strsep(&opts, ",")) != NULL) { int token; @@ -326,7 +327,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags, } out_free_opts: - kfree(opts); + kfree(orig); out: /* * If no subvolume name is specified we use the default one. Allocate -- cgit v1.2.2 From d0f69686c2ae775529aadc7a8acc6f13ad41de66 Mon Sep 17 00:00:00 2001 From: Miao Xie Date: Tue, 25 Jan 2011 15:46:17 +0800 Subject: Btrfs: Don't return acl info when mounting with noacl option Steps to reproduce: # mkfs.btrfs /dev/sda2 # mount /dev/sda2 /mnt # touch /mnt/file0 # setfacl -m 'u:root:x,g::x,o::x' /mnt/file0 # umount /mnt # mount /dev/sda2 -o noacl /mnt # getfacl /mnt/file0 ... user::rw- user:root:--x group::--x mask::--x other::--x The output should be: user::rw- group::--x other::--x Signed-off-by: Miao Xie Signed-off-by: Li Zefan --- fs/btrfs/acl.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'fs') diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c index 2222d161c7b6..3c52fc8afe29 100644 --- a/fs/btrfs/acl.c +++ b/fs/btrfs/acl.c @@ -37,6 +37,9 @@ static struct posix_acl *btrfs_get_acl(struct inode *inode, int type) char *value = NULL; struct posix_acl *acl; + if (!IS_POSIXACL(inode)) + return NULL; + acl = get_cached_acl(inode, type); if (acl != ACL_NOT_CACHED) return acl; @@ -82,6 +85,9 @@ static int btrfs_xattr_acl_get(struct dentry *dentry, const char *name, struct posix_acl *acl; int ret = 0; + if (!IS_POSIXACL(dentry->d_inode)) + return -EOPNOTSUPP; + acl = btrfs_get_acl(dentry->d_inode, type); if (IS_ERR(acl)) -- cgit v1.2.2 From b897abec032deb7cc3ce67392a1f544ac965ddea Mon Sep 17 00:00:00 2001 From: Miao Xie Date: Wed, 26 Jan 2011 16:19:22 +0800 Subject: Btrfs: Fix memory leak in writepage fixup work fixup, which is allocated when starting page write to fix up the extent without ORDERED bit set, should be freed after this work is done. Signed-off-by: Miao Xie Signed-off-by: Li Zefan --- fs/btrfs/inode.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 5f9194438f7c..3a6edc4c5642 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1544,6 +1544,7 @@ out: out_page: unlock_page(page); page_cache_release(page); + kfree(fixup); } /* -- cgit v1.2.2 From 4d728ec7aefdca5419d2ebfb28c147e81a4b59f4 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Wed, 26 Jan 2011 14:10:43 +0800 Subject: Btrfs: Fix file clone when source offset is not 0 Suppose: - the source extent is: [0, 100] - the src offset is 10 - the clone length is 90 - the dest offset is 0 This statement: new_key.offset = key.offset + destoff - off will produce such an extent for the dest file: [ino, BTRFS_EXTENT_DATA_KEY, -10] , which is obviously wrong. Signed-off-by: Li Zefan --- fs/btrfs/ioctl.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index f87552a1d7ea..1b61dab64062 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -1788,7 +1788,10 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, memcpy(&new_key, &key, sizeof(new_key)); new_key.objectid = inode->i_ino; - new_key.offset = key.offset + destoff - off; + if (off <= key.offset) + new_key.offset = key.offset + destoff - off; + else + new_key.offset = destoff; trans = btrfs_start_transaction(root, 1); if (IS_ERR(trans)) { -- cgit v1.2.2 From 7db37c5e6575b229a5051be1d3ef15257ae0ba5d Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Thu, 27 Jan 2011 12:02:00 +1100 Subject: xfs: fix log ticket leak on forced shutdown. The kmemleak detector shows this after test 139: unreferenced object 0xffff880079b88bb0 (size 264): comm "xfs_io", pid 4904, jiffies 4294909382 (age 276.824s) hex dump (first 32 bytes): 00 00 00 00 ad 4e ad de ff ff ff ff 00 00 00 00 .....N.......... ff ff ff ff ff ff ff ff 48 7b c9 82 ff ff ff ff ........H{...... backtrace: [] kmemleak_alloc+0x2d/0x60 [] kmem_cache_alloc+0x13f/0x2b0 [] kmem_zone_alloc+0x77/0xf0 [] kmem_zone_zalloc+0x1e/0x50 [] xlog_ticket_alloc+0x34/0x170 [] xlog_cil_push+0xa4/0x3f0 [] xlog_cil_force_lsn+0x15a/0x160 [] _xfs_log_force_lsn+0x75/0x2d0 [] _xfs_trans_commit+0x2bd/0x2f0 [] xfs_iomap_write_allocate+0x1ad/0x350 [] xfs_map_blocks+0x21f/0x370 [] xfs_vm_writepage+0x1c7/0x550 [] __writepage+0x1a/0x50 [] write_cache_pages+0x1c2/0x4c0 [] generic_writepages+0x27/0x30 [] xfs_vm_writepages+0x5d/0x80 By inspection, the leak occurs when xlog_write() returns and error and we jump to the abort path without dropping the reference on the active ticket. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig Reviewed-by: Alex Elder --- fs/xfs/xfs_log_cil.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c index 9dc8125d04e5..c7eac5acbfea 100644 --- a/fs/xfs/xfs_log_cil.c +++ b/fs/xfs/xfs_log_cil.c @@ -543,7 +543,7 @@ xlog_cil_push( error = xlog_write(log, &lvhdr, tic, &ctx->start_lsn, NULL, 0); if (error) - goto out_abort; + goto out_abort_free_ticket; /* * now that we've written the checkpoint into the log, strictly @@ -569,8 +569,9 @@ restart: } spin_unlock(&cil->xc_cil_lock); + /* xfs_log_done always frees the ticket on error. */ commit_lsn = xfs_log_done(log->l_mp, tic, &commit_iclog, 0); - if (error || commit_lsn == -1) + if (commit_lsn == -1) goto out_abort; /* attach all the transactions w/ busy extents to iclog */ @@ -600,6 +601,8 @@ out_free_ticket: kmem_free(new_ctx); return 0; +out_abort_free_ticket: + xfs_log_ticket_put(tic); out_abort: xlog_cil_committed(ctx, XFS_LI_ABORTED); return XFS_ERROR(EIO); -- cgit v1.2.2 From ee2c9258501f83d3ed0fd09ce5df1cec53312cf0 Mon Sep 17 00:00:00 2001 From: Shirish Pargaonkar Date: Thu, 27 Jan 2011 09:58:04 -0600 Subject: cifs: More crypto cleanup (try #2) Replaced md4 hashing function local to cifs module with kernel crypto APIs. As a result, md4 hashing function and its supporting functions in file md4.c are not needed anymore. Cleaned up function declarations, removed forward function declarations, and removed a header file that is being deleted from being included. Verified that sec=ntlm/i, sec=ntlmv2/i, and sec=ntlmssp/i work correctly. Signed-off-by: Shirish Pargaonkar Reviewed-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/Makefile | 2 +- fs/cifs/cifsencrypt.c | 32 +++++--- fs/cifs/cifsencrypt.h | 33 -------- fs/cifs/cifsproto.h | 9 ++- fs/cifs/connect.c | 6 +- fs/cifs/link.c | 5 +- fs/cifs/md4.c | 205 -------------------------------------------------- fs/cifs/smbdes.c | 1 - fs/cifs/smbencrypt.c | 90 +++++++++++++++------- 9 files changed, 97 insertions(+), 286 deletions(-) delete mode 100644 fs/cifs/cifsencrypt.h delete mode 100644 fs/cifs/md4.c (limited to 'fs') diff --git a/fs/cifs/Makefile b/fs/cifs/Makefile index e1322296cb69..d87558448e3d 100644 --- a/fs/cifs/Makefile +++ b/fs/cifs/Makefile @@ -5,7 +5,7 @@ obj-$(CONFIG_CIFS) += cifs.o cifs-y := cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o inode.o \ link.o misc.o netmisc.o smbdes.o smbencrypt.o transport.o asn1.o \ - md4.o cifs_unicode.o nterr.o xattr.o cifsencrypt.o \ + cifs_unicode.o nterr.o xattr.o cifsencrypt.o \ readdir.o ioctl.o sess.o export.o cifs-$(CONFIG_CIFS_ACL) += cifsacl.o diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c index 35bf329c90e1..0db5f1de0227 100644 --- a/fs/cifs/cifsencrypt.c +++ b/fs/cifs/cifsencrypt.c @@ -36,11 +36,6 @@ /* Note that the smb header signature field on input contains the sequence number before this function is called */ -extern void mdfour(unsigned char *out, unsigned char *in, int n); -extern void E_md4hash(const unsigned char *passwd, unsigned char *p16); -extern void SMBencrypt(unsigned char *passwd, const unsigned char *c8, - unsigned char *p24); - static int cifs_calculate_signature(const struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server, char *signature) { @@ -233,6 +228,7 @@ int cifs_verify_signature(struct smb_hdr *cifs_pdu, /* first calculate 24 bytes ntlm response and then 16 byte session key */ int setup_ntlm_response(struct cifsSesInfo *ses) { + int rc = 0; unsigned int temp_len = CIFS_SESS_KEY_SIZE + CIFS_AUTH_RESP_SIZE; char temp_key[CIFS_SESS_KEY_SIZE]; @@ -246,13 +242,26 @@ int setup_ntlm_response(struct cifsSesInfo *ses) } ses->auth_key.len = temp_len; - SMBNTencrypt(ses->password, ses->server->cryptkey, + rc = SMBNTencrypt(ses->password, ses->server->cryptkey, ses->auth_key.response + CIFS_SESS_KEY_SIZE); + if (rc) { + cFYI(1, "%s Can't generate NTLM response, error: %d", + __func__, rc); + return rc; + } + + rc = E_md4hash(ses->password, temp_key); + if (rc) { + cFYI(1, "%s Can't generate NT hash, error: %d", __func__, rc); + return rc; + } - E_md4hash(ses->password, temp_key); - mdfour(ses->auth_key.response, temp_key, CIFS_SESS_KEY_SIZE); + rc = mdfour(ses->auth_key.response, temp_key, CIFS_SESS_KEY_SIZE); + if (rc) + cFYI(1, "%s Can't generate NTLM session key, error: %d", + __func__, rc); - return 0; + return rc; } #ifdef CONFIG_CIFS_WEAK_PW_HASH @@ -699,14 +708,13 @@ cifs_crypto_shash_allocate(struct TCP_Server_Info *server) unsigned int size; server->secmech.hmacmd5 = crypto_alloc_shash("hmac(md5)", 0, 0); - if (!server->secmech.hmacmd5 || - IS_ERR(server->secmech.hmacmd5)) { + if (IS_ERR(server->secmech.hmacmd5)) { cERROR(1, "could not allocate crypto hmacmd5\n"); return PTR_ERR(server->secmech.hmacmd5); } server->secmech.md5 = crypto_alloc_shash("md5", 0, 0); - if (!server->secmech.md5 || IS_ERR(server->secmech.md5)) { + if (IS_ERR(server->secmech.md5)) { cERROR(1, "could not allocate crypto md5\n"); rc = PTR_ERR(server->secmech.md5); goto crypto_allocate_md5_fail; diff --git a/fs/cifs/cifsencrypt.h b/fs/cifs/cifsencrypt.h deleted file mode 100644 index 15d2ec006474..000000000000 --- a/fs/cifs/cifsencrypt.h +++ /dev/null @@ -1,33 +0,0 @@ -/* - * fs/cifs/cifsencrypt.h - * - * Copyright (c) International Business Machines Corp., 2005 - * Author(s): Steve French (sfrench@us.ibm.com) - * - * Externs for misc. small encryption routines - * so we do not have to put them in cifsproto.h - * - * This library is free software; you can redistribute it and/or modify - * it under the terms of the GNU Lesser General Public License as published - * by the Free Software Foundation; either version 2.1 of the License, or - * (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See - * the GNU Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this library; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -/* md4.c */ -extern void mdfour(unsigned char *out, unsigned char *in, int n); -/* smbdes.c */ -extern void E_P16(unsigned char *p14, unsigned char *p16); -extern void E_P24(unsigned char *p21, const unsigned char *c8, - unsigned char *p24); - - - diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index 35c989f4924f..8096f27ad9a8 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -375,7 +375,7 @@ extern int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *, extern int cifs_verify_signature(struct smb_hdr *, struct TCP_Server_Info *server, __u32 expected_sequence_number); -extern void SMBNTencrypt(unsigned char *, unsigned char *, unsigned char *); +extern int SMBNTencrypt(unsigned char *, unsigned char *, unsigned char *); extern int setup_ntlm_response(struct cifsSesInfo *); extern int setup_ntlmv2_rsp(struct cifsSesInfo *, const struct nls_table *); extern int cifs_crypto_shash_allocate(struct TCP_Server_Info *); @@ -425,4 +425,11 @@ extern bool CIFSCouldBeMFSymlink(const struct cifs_fattr *fattr); extern int CIFSCheckMFSymlink(struct cifs_fattr *fattr, const unsigned char *path, struct cifs_sb_info *cifs_sb, int xid); +extern int mdfour(unsigned char *, unsigned char *, int); +extern int E_md4hash(const unsigned char *passwd, unsigned char *p16); +extern void SMBencrypt(unsigned char *passwd, const unsigned char *c8, + unsigned char *p24); +extern void E_P16(unsigned char *p14, unsigned char *p16); +extern void E_P24(unsigned char *p21, const unsigned char *c8, + unsigned char *p24); #endif /* _CIFSPROTO_H */ diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 47034af67b09..47d8ff623683 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -55,9 +55,6 @@ /* SMB echo "timeout" -- FIXME: tunable? */ #define SMB_ECHO_INTERVAL (60 * HZ) -extern void SMBNTencrypt(unsigned char *passwd, unsigned char *c8, - unsigned char *p24); - extern mempool_t *cifs_req_poolp; struct smb_vol { @@ -2990,7 +2987,8 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses, bcc_ptr); else #endif /* CIFS_WEAK_PW_HASH */ - SMBNTencrypt(tcon->password, ses->server->cryptkey, bcc_ptr); + rc = SMBNTencrypt(tcon->password, ses->server->cryptkey, + bcc_ptr); bcc_ptr += CIFS_AUTH_RESP_SIZE; if (ses->capabilities & CAP_UNICODE) { diff --git a/fs/cifs/link.c b/fs/cifs/link.c index d3444ea6ac71..02cd60aefbff 100644 --- a/fs/cifs/link.c +++ b/fs/cifs/link.c @@ -54,10 +54,9 @@ symlink_hash(unsigned int link_len, const char *link_str, u8 *md5_hash) struct sdesc *sdescmd5; md5 = crypto_alloc_shash("md5", 0, 0); - if (!md5 || IS_ERR(md5)) { - rc = PTR_ERR(md5); + if (IS_ERR(md5)) { cERROR(1, "%s: Crypto md5 allocation error %d\n", __func__, rc); - return rc; + return PTR_ERR(md5); } size = sizeof(struct shash_desc) + crypto_shash_descsize(md5); sdescmd5 = kmalloc(size, GFP_KERNEL); diff --git a/fs/cifs/md4.c b/fs/cifs/md4.c deleted file mode 100644 index a725c2609d67..000000000000 --- a/fs/cifs/md4.c +++ /dev/null @@ -1,205 +0,0 @@ -/* - Unix SMB/Netbios implementation. - Version 1.9. - a implementation of MD4 designed for use in the SMB authentication protocol - Copyright (C) Andrew Tridgell 1997-1998. - Modified by Steve French (sfrench@us.ibm.com) 2002-2003 - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. -*/ -#include -#include -#include "cifsencrypt.h" - -/* NOTE: This code makes no attempt to be fast! */ - -static __u32 -F(__u32 X, __u32 Y, __u32 Z) -{ - return (X & Y) | ((~X) & Z); -} - -static __u32 -G(__u32 X, __u32 Y, __u32 Z) -{ - return (X & Y) | (X & Z) | (Y & Z); -} - -static __u32 -H(__u32 X, __u32 Y, __u32 Z) -{ - return X ^ Y ^ Z; -} - -static __u32 -lshift(__u32 x, int s) -{ - x &= 0xFFFFFFFF; - return ((x << s) & 0xFFFFFFFF) | (x >> (32 - s)); -} - -#define ROUND1(a,b,c,d,k,s) (*a) = lshift((*a) + F(*b,*c,*d) + X[k], s) -#define ROUND2(a,b,c,d,k,s) (*a) = lshift((*a) + G(*b,*c,*d) + X[k] + (__u32)0x5A827999,s) -#define ROUND3(a,b,c,d,k,s) (*a) = lshift((*a) + H(*b,*c,*d) + X[k] + (__u32)0x6ED9EBA1,s) - -/* this applies md4 to 64 byte chunks */ -static void -mdfour64(__u32 *M, __u32 *A, __u32 *B, __u32 *C, __u32 *D) -{ - int j; - __u32 AA, BB, CC, DD; - __u32 X[16]; - - - for (j = 0; j < 16; j++) - X[j] = M[j]; - - AA = *A; - BB = *B; - CC = *C; - DD = *D; - - ROUND1(A, B, C, D, 0, 3); - ROUND1(D, A, B, C, 1, 7); - ROUND1(C, D, A, B, 2, 11); - ROUND1(B, C, D, A, 3, 19); - ROUND1(A, B, C, D, 4, 3); - ROUND1(D, A, B, C, 5, 7); - ROUND1(C, D, A, B, 6, 11); - ROUND1(B, C, D, A, 7, 19); - ROUND1(A, B, C, D, 8, 3); - ROUND1(D, A, B, C, 9, 7); - ROUND1(C, D, A, B, 10, 11); - ROUND1(B, C, D, A, 11, 19); - ROUND1(A, B, C, D, 12, 3); - ROUND1(D, A, B, C, 13, 7); - ROUND1(C, D, A, B, 14, 11); - ROUND1(B, C, D, A, 15, 19); - - ROUND2(A, B, C, D, 0, 3); - ROUND2(D, A, B, C, 4, 5); - ROUND2(C, D, A, B, 8, 9); - ROUND2(B, C, D, A, 12, 13); - ROUND2(A, B, C, D, 1, 3); - ROUND2(D, A, B, C, 5, 5); - ROUND2(C, D, A, B, 9, 9); - ROUND2(B, C, D, A, 13, 13); - ROUND2(A, B, C, D, 2, 3); - ROUND2(D, A, B, C, 6, 5); - ROUND2(C, D, A, B, 10, 9); - ROUND2(B, C, D, A, 14, 13); - ROUND2(A, B, C, D, 3, 3); - ROUND2(D, A, B, C, 7, 5); - ROUND2(C, D, A, B, 11, 9); - ROUND2(B, C, D, A, 15, 13); - - ROUND3(A, B, C, D, 0, 3); - ROUND3(D, A, B, C, 8, 9); - ROUND3(C, D, A, B, 4, 11); - ROUND3(B, C, D, A, 12, 15); - ROUND3(A, B, C, D, 2, 3); - ROUND3(D, A, B, C, 10, 9); - ROUND3(C, D, A, B, 6, 11); - ROUND3(B, C, D, A, 14, 15); - ROUND3(A, B, C, D, 1, 3); - ROUND3(D, A, B, C, 9, 9); - ROUND3(C, D, A, B, 5, 11); - ROUND3(B, C, D, A, 13, 15); - ROUND3(A, B, C, D, 3, 3); - ROUND3(D, A, B, C, 11, 9); - ROUND3(C, D, A, B, 7, 11); - ROUND3(B, C, D, A, 15, 15); - - *A += AA; - *B += BB; - *C += CC; - *D += DD; - - *A &= 0xFFFFFFFF; - *B &= 0xFFFFFFFF; - *C &= 0xFFFFFFFF; - *D &= 0xFFFFFFFF; - - for (j = 0; j < 16; j++) - X[j] = 0; -} - -static void -copy64(__u32 *M, unsigned char *in) -{ - int i; - - for (i = 0; i < 16; i++) - M[i] = (in[i * 4 + 3] << 24) | (in[i * 4 + 2] << 16) | - (in[i * 4 + 1] << 8) | (in[i * 4 + 0] << 0); -} - -static void -copy4(unsigned char *out, __u32 x) -{ - out[0] = x & 0xFF; - out[1] = (x >> 8) & 0xFF; - out[2] = (x >> 16) & 0xFF; - out[3] = (x >> 24) & 0xFF; -} - -/* produce a md4 message digest from data of length n bytes */ -void -mdfour(unsigned char *out, unsigned char *in, int n) -{ - unsigned char buf[128]; - __u32 M[16]; - __u32 b = n * 8; - int i; - __u32 A = 0x67452301; - __u32 B = 0xefcdab89; - __u32 C = 0x98badcfe; - __u32 D = 0x10325476; - - while (n > 64) { - copy64(M, in); - mdfour64(M, &A, &B, &C, &D); - in += 64; - n -= 64; - } - - for (i = 0; i < 128; i++) - buf[i] = 0; - memcpy(buf, in, n); - buf[n] = 0x80; - - if (n <= 55) { - copy4(buf + 56, b); - copy64(M, buf); - mdfour64(M, &A, &B, &C, &D); - } else { - copy4(buf + 120, b); - copy64(M, buf); - mdfour64(M, &A, &B, &C, &D); - copy64(M, buf + 64); - mdfour64(M, &A, &B, &C, &D); - } - - for (i = 0; i < 128; i++) - buf[i] = 0; - copy64(M, buf); - - copy4(out, A); - copy4(out + 4, B); - copy4(out + 8, C); - copy4(out + 12, D); - - A = B = C = D = 0; -} diff --git a/fs/cifs/smbdes.c b/fs/cifs/smbdes.c index b6b6dcb500bf..04721485925d 100644 --- a/fs/cifs/smbdes.c +++ b/fs/cifs/smbdes.c @@ -45,7 +45,6 @@ up with a different answer to the one above) */ #include -#include "cifsencrypt.h" #define uchar unsigned char static uchar perm1[56] = { 57, 49, 41, 33, 25, 17, 9, diff --git a/fs/cifs/smbencrypt.c b/fs/cifs/smbencrypt.c index 30135005e4f3..b5450e9f40c0 100644 --- a/fs/cifs/smbencrypt.c +++ b/fs/cifs/smbencrypt.c @@ -33,7 +33,7 @@ #include "cifspdu.h" #include "cifsglob.h" #include "cifs_debug.h" -#include "cifsencrypt.h" +#include "cifsproto.h" #ifndef false #define false 0 @@ -47,14 +47,57 @@ #define SSVALX(buf,pos,val) (CVAL(buf,pos)=(val)&0xFF,CVAL(buf,pos+1)=(val)>>8) #define SSVAL(buf,pos,val) SSVALX((buf),(pos),((__u16)(val))) -/*The following definitions come from libsmb/smbencrypt.c */ +/* produce a md4 message digest from data of length n bytes */ +int +mdfour(unsigned char *md4_hash, unsigned char *link_str, int link_len) +{ + int rc; + unsigned int size; + struct crypto_shash *md4; + struct sdesc *sdescmd4; + + md4 = crypto_alloc_shash("md4", 0, 0); + if (IS_ERR(md4)) { + cERROR(1, "%s: Crypto md4 allocation error %d\n", __func__, rc); + return PTR_ERR(md4); + } + size = sizeof(struct shash_desc) + crypto_shash_descsize(md4); + sdescmd4 = kmalloc(size, GFP_KERNEL); + if (!sdescmd4) { + rc = -ENOMEM; + cERROR(1, "%s: Memory allocation failure\n", __func__); + goto mdfour_err; + } + sdescmd4->shash.tfm = md4; + sdescmd4->shash.flags = 0x0; + + rc = crypto_shash_init(&sdescmd4->shash); + if (rc) { + cERROR(1, "%s: Could not init md4 shash\n", __func__); + goto mdfour_err; + } + crypto_shash_update(&sdescmd4->shash, link_str, link_len); + rc = crypto_shash_final(&sdescmd4->shash, md4_hash); -void SMBencrypt(unsigned char *passwd, const unsigned char *c8, - unsigned char *p24); -void E_md4hash(const unsigned char *passwd, unsigned char *p16); -static void SMBOWFencrypt(unsigned char passwd[16], const unsigned char *c8, - unsigned char p24[24]); -void SMBNTencrypt(unsigned char *passwd, unsigned char *c8, unsigned char *p24); +mdfour_err: + crypto_free_shash(md4); + kfree(sdescmd4); + + return rc; +} + +/* Does the des encryption from the NT or LM MD4 hash. */ +static void +SMBOWFencrypt(unsigned char passwd[16], const unsigned char *c8, + unsigned char p24[24]) +{ + unsigned char p21[21]; + + memset(p21, '\0', 21); + + memcpy(p21, passwd, 16); + E_P24(p21, c8, p24); +} /* This implements the X/Open SMB password encryption @@ -117,9 +160,10 @@ _my_mbstowcs(__u16 *dst, const unsigned char *src, int len) * Creates the MD4 Hash of the users password in NT UNICODE. */ -void +int E_md4hash(const unsigned char *passwd, unsigned char *p16) { + int rc; int len; __u16 wpwd[129]; @@ -138,8 +182,10 @@ E_md4hash(const unsigned char *passwd, unsigned char *p16) /* Calculate length in bytes */ len = _my_wcslen(wpwd) * sizeof(__u16); - mdfour(p16, (unsigned char *) wpwd, len); + rc = mdfour(p16, (unsigned char *) wpwd, len); memset(wpwd, 0, 129 * 2); + + return rc; } #if 0 /* currently unused */ @@ -211,19 +257,6 @@ ntv2_owf_gen(const unsigned char owf[16], const char *user_n, } #endif -/* Does the des encryption from the NT or LM MD4 hash. */ -static void -SMBOWFencrypt(unsigned char passwd[16], const unsigned char *c8, - unsigned char p24[24]) -{ - unsigned char p21[21]; - - memset(p21, '\0', 21); - - memcpy(p21, passwd, 16); - E_P24(p21, c8, p24); -} - /* Does the des encryption from the FIRST 8 BYTES of the NT or LM MD4 hash. */ #if 0 /* currently unused */ static void @@ -241,16 +274,21 @@ NTLMSSPOWFencrypt(unsigned char passwd[8], #endif /* Does the NT MD4 hash then des encryption. */ - -void +int SMBNTencrypt(unsigned char *passwd, unsigned char *c8, unsigned char *p24) { + int rc; unsigned char p21[21]; memset(p21, '\0', 21); - E_md4hash(passwd, p21); + rc = E_md4hash(passwd, p21); + if (rc) { + cFYI(1, "%s Can't generate NT hash, error: %d", __func__, rc); + return rc; + } SMBOWFencrypt(p21, c8, p24); + return rc; } -- cgit v1.2.2 From e34a314c5e49fe6b763568f6576b19f1299c33c2 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Thu, 27 Jan 2011 12:13:35 +1100 Subject: xfs: fix efi item leak on forced shutdown After test 139, kmemleak shows: unreferenced object 0xffff880078b405d8 (size 400): comm "xfs_io", pid 4904, jiffies 4294909383 (age 1186.728s) hex dump (first 32 bytes): 60 c1 17 79 00 88 ff ff 60 c1 17 79 00 88 ff ff `..y....`..y.... 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ backtrace: [] kmemleak_alloc+0x2d/0x60 [] kmem_cache_alloc+0x13f/0x2b0 [] kmem_zone_alloc+0x77/0xf0 [] kmem_zone_zalloc+0x1e/0x50 [] xfs_efi_init+0x4b/0xb0 [] xfs_trans_get_efi+0x58/0x90 [] xfs_bmap_finish+0x8b/0x1d0 [] xfs_itruncate_finish+0x2c4/0x5d0 [] xfs_setattr+0x8df/0xa70 [] xfs_vn_setattr+0x1b/0x20 [] notify_change+0x170/0x2e0 [] do_truncate+0x66/0xa0 [] sys_ftruncate+0xdb/0xe0 [] system_call_fastpath+0x16/0x1b [] 0xffffffffffffffff The cause of the leak is that the "remove" parameter of IOP_UNPIN() is never set when a CIL push is aborted. This means that the EFI item is never freed if it was in the push being cancelled. The problem is specific to delayed logging, but has uncovered a couple of problems with the handling of IOP_UNPIN(remove). Firstly, we cannot safely call xfs_trans_del_item() from IOP_UNPIN() in the CIL commit failure path or the iclog write failure path because for delayed loging we have no transaction context. Hence we must only call xfs_trans_del_item() if the log item being unpinned has an active log item descriptor. Secondly, xfs_trans_uncommit() does not handle log item descriptor freeing during the traversal of log items on a transaction. It can reference a freed log item descriptor when unpinning an EFI item. Hence it needs to use a safe list traversal method to allow items to be removed from the transaction during IOP_UNPIN(). Signed-off-by: Dave Chinner Reviewed-by: Alex Elder --- fs/xfs/xfs_buf_item.c | 12 +++++++----- fs/xfs/xfs_extfree_item.c | 3 ++- fs/xfs/xfs_trans.c | 36 +++++++++++++++++++++++++++++------- 3 files changed, 38 insertions(+), 13 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index 98c6f73b6752..6f8c21ce0d6d 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c @@ -427,13 +427,15 @@ xfs_buf_item_unpin( if (remove) { /* - * We have to remove the log item from the transaction - * as we are about to release our reference to the - * buffer. If we don't, the unlock that occurs later - * in xfs_trans_uncommit() will ry to reference the + * If we are in a transaction context, we have to + * remove the log item from the transaction as we are + * about to release our reference to the buffer. If we + * don't, the unlock that occurs later in + * xfs_trans_uncommit() will try to reference the * buffer which we no longer have a hold on. */ - xfs_trans_del_item(lip); + if (lip->li_desc) + xfs_trans_del_item(lip); /* * Since the transaction no longer refers to the buffer, diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c index 75f2ef60e579..d22e62623437 100644 --- a/fs/xfs/xfs_extfree_item.c +++ b/fs/xfs/xfs_extfree_item.c @@ -138,7 +138,8 @@ xfs_efi_item_unpin( if (remove) { ASSERT(!(lip->li_flags & XFS_LI_IN_AIL)); - xfs_trans_del_item(lip); + if (lip->li_desc) + xfs_trans_del_item(lip); xfs_efi_item_free(efip); return; } diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index 33dbc4e0ad62..29f5e5424897 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c @@ -1446,6 +1446,14 @@ xfs_log_item_batch_insert( * Bulk operation version of xfs_trans_committed that takes a log vector of * items to insert into the AIL. This uses bulk AIL insertion techniques to * minimise lock traffic. + * + * If we are called with the aborted flag set, it is because a log write during + * a CIL checkpoint commit has failed. In this case, all the items in the + * checkpoint have already gone through IOP_COMMITED and IOP_UNLOCK, which + * means that checkpoint commit abort handling is treated exactly the same + * as an iclog write error even though we haven't started any IO yet. Hence in + * this case all we need to do is IOP_COMMITTED processing, followed by an + * IOP_UNPIN(aborted) call. */ void xfs_trans_committed_bulk( @@ -1472,6 +1480,16 @@ xfs_trans_committed_bulk( if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0) continue; + /* + * if we are aborting the operation, no point in inserting the + * object into the AIL as we are in a shutdown situation. + */ + if (aborted) { + ASSERT(XFS_FORCED_SHUTDOWN(ailp->xa_mount)); + IOP_UNPIN(lip, 1); + continue; + } + if (item_lsn != commit_lsn) { /* @@ -1503,20 +1521,24 @@ xfs_trans_committed_bulk( } /* - * Called from the trans_commit code when we notice that - * the filesystem is in the middle of a forced shutdown. + * Called from the trans_commit code when we notice that the filesystem is in + * the middle of a forced shutdown. + * + * When we are called here, we have already pinned all the items in the + * transaction. However, neither IOP_COMMITTING or IOP_UNLOCK has been called + * so we can simply walk the items in the transaction, unpin them with an abort + * flag and then free the items. Note that unpinning the items can result in + * them being freed immediately, so we need to use a safe list traversal method + * here. */ STATIC void xfs_trans_uncommit( struct xfs_trans *tp, uint flags) { - struct xfs_log_item_desc *lidp; + struct xfs_log_item_desc *lidp, *n; - list_for_each_entry(lidp, &tp->t_items, lid_trans) { - /* - * Unpin all but those that aren't dirty. - */ + list_for_each_entry_safe(lidp, n, &tp->t_items, lid_trans) { if (lidp->lid_flags & XFS_LID_DIRTY) IOP_UNPIN(lidp->lid_item, 1); } -- cgit v1.2.2 From b8fc82630ae289bb4e661567808afc59e3298dce Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Thu, 27 Jan 2011 12:14:12 +1100 Subject: xfs: speculative delayed allocation uses rounddown_power_of_2 badly rounddown_power_of_2() returns an undefined result when passed a value of zero. The specualtive delayed allocation code is doing this when the inode is zero length. Hence occasionally the preallocation is much, much larger than is necessary (e.g. 8GB for a 270 _byte_ file). Ensure we don't even pass a zero value to this function so the result of preallocation is always the desired size. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig Reviewed-by: Alex Elder --- fs/xfs/xfs_iomap.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index 55582bd66659..8a0f044750c3 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c @@ -337,7 +337,12 @@ xfs_iomap_prealloc_size( int shift = 0; int64_t freesp; - alloc_blocks = XFS_B_TO_FSB(mp, ip->i_size); + /* + * rounddown_pow_of_two() returns an undefined result + * if we pass in alloc_blocks = 0. Hence the "+ 1" to + * ensure we always pass in a non-zero value. + */ + alloc_blocks = XFS_B_TO_FSB(mp, ip->i_size) + 1; alloc_blocks = XFS_FILEOFF_MIN(MAXEXTLEN, rounddown_pow_of_two(alloc_blocks)); -- cgit v1.2.2 From 14b064ceaa6f51a7426cc45b4b43685b94380658 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Thu, 27 Jan 2011 12:16:28 +1100 Subject: xfs: limit extent length for allocation to AG size Delayed allocation extents can be larger than AGs, so when trying to convert a large range we may scan every AG inside xfs_bmap_alloc_nullfb() trying to find an AG with a size larger than an AG. We should stop when we find the first AG with a maximum possible allocation size. This causes excessive CPU usage when there are lots of AGs. The same problem occurs when doing preallocation of a range larger than an AG. Fix the problem by limiting real allocation lengths to the maximum that an AG can support. This means if we have empty AGs, we'll stop the search at the first of them. If there are no empty AGs, we'll still scan them all, but that is a different problem.... Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig Reviewed-by: Alex Elder --- fs/xfs/xfs_alloc.h | 16 ++++++++++++++++ fs/xfs/xfs_bmap.c | 18 ++++++++++-------- 2 files changed, 26 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_alloc.h b/fs/xfs/xfs_alloc.h index 0ab56b32c7eb..d0b3bc72005b 100644 --- a/fs/xfs/xfs_alloc.h +++ b/fs/xfs/xfs_alloc.h @@ -74,6 +74,22 @@ typedef unsigned int xfs_alloctype_t; */ #define XFS_ALLOC_SET_ASIDE(mp) (4 + ((mp)->m_sb.sb_agcount * 4)) +/* + * When deciding how much space to allocate out of an AG, we limit the + * allocation maximum size to the size the AG. However, we cannot use all the + * blocks in the AG - some are permanently used by metadata. These + * blocks are generally: + * - the AG superblock, AGF, AGI and AGFL + * - the AGF (bno and cnt) and AGI btree root blocks + * - 4 blocks on the AGFL according to XFS_ALLOC_SET_ASIDE() limits + * + * The AG headers are sector sized, so the amount of space they take up is + * dependent on filesystem geometry. The others are all single blocks. + */ +#define XFS_ALLOC_AG_MAX_USABLE(mp) \ + ((mp)->m_sb.sb_agblocks - XFS_BB_TO_FSB(mp, XFS_FSS_TO_BB(mp, 4)) - 7) + + /* * Argument structure for xfs_alloc routines. * This is turned into a structure to avoid having 20 arguments passed diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index 4111cd3966c7..f3a3768189bb 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c @@ -2430,7 +2430,7 @@ xfs_bmap_btalloc_nullfb( startag = ag = 0; pag = xfs_perag_get(mp, ag); - while (*blen < ap->alen) { + while (*blen < args->maxlen) { if (!pag->pagf_init) { error = xfs_alloc_pagf_init(mp, args->tp, ag, XFS_ALLOC_FLAG_TRYLOCK); @@ -2452,7 +2452,7 @@ xfs_bmap_btalloc_nullfb( notinit = 1; if (xfs_inode_is_filestream(ap->ip)) { - if (*blen >= ap->alen) + if (*blen >= args->maxlen) break; if (ap->userdata) { @@ -2498,14 +2498,14 @@ xfs_bmap_btalloc_nullfb( * If the best seen length is less than the request * length, use the best as the minimum. */ - else if (*blen < ap->alen) + else if (*blen < args->maxlen) args->minlen = *blen; /* - * Otherwise we've seen an extent as big as alen, + * Otherwise we've seen an extent as big as maxlen, * use that as the minimum. */ else - args->minlen = ap->alen; + args->minlen = args->maxlen; /* * set the failure fallback case to look in the selected @@ -2573,7 +2573,9 @@ xfs_bmap_btalloc( args.tp = ap->tp; args.mp = mp; args.fsbno = ap->rval; - args.maxlen = MIN(ap->alen, mp->m_sb.sb_agblocks); + + /* Trim the allocation back to the maximum an AG can fit. */ + args.maxlen = MIN(ap->alen, XFS_ALLOC_AG_MAX_USABLE(mp)); args.firstblock = ap->firstblock; blen = 0; if (nullfb) { @@ -2621,7 +2623,7 @@ xfs_bmap_btalloc( /* * Adjust for alignment */ - if (blen > args.alignment && blen <= ap->alen) + if (blen > args.alignment && blen <= args.maxlen) args.minlen = blen - args.alignment; args.minalignslop = 0; } else { @@ -2640,7 +2642,7 @@ xfs_bmap_btalloc( * of minlen+alignment+slop doesn't go up * between the calls. */ - if (blen > mp->m_dalign && blen <= ap->alen) + if (blen > mp->m_dalign && blen <= args.maxlen) nextminlen = blen - mp->m_dalign; else nextminlen = args.minlen; -- cgit v1.2.2 From 4ce159890c00e2cc705e955a939bf1dca7b07ab8 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Thu, 27 Jan 2011 12:17:58 +1100 Subject: xfs: prevent extsize alignment from exceeding maximum extent size When doing delayed allocation, if the allocation size is for a maximally sized extent, extent size alignment can push it over this limit. This results in an assert failure in xfs_bmbt_set_allf() as the extent length is too large to find in the extent record. Fix this by ensuring that we allow for space that extent size alignment requires (up to 2 * (extsize -1) blocks as we have to handle both head and tail alignment) when limiting the maximum size of the extent. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig Reviewed-by: Alex Elder --- fs/xfs/xfs_bmap.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'fs') diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index f3a3768189bb..3e9c278a8f78 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c @@ -4487,6 +4487,16 @@ xfs_bmapi( /* Figure out the extent size, adjust alen */ extsz = xfs_get_extsz_hint(ip); if (extsz) { + /* + * make sure we don't exceed a single + * extent length when we align the + * extent by reducing length we are + * going to allocate by the maximum + * amount extent size aligment may + * require. + */ + alen = XFS_FILBLKS_MIN(len, + MAXEXTLEN - (2 * extsz - 1)); error = xfs_bmap_extsize_align(mp, &got, &prev, extsz, rt, eof, -- cgit v1.2.2 From 5315837daee7ed76c31ef643915f7d76ef8c1aa3 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Thu, 27 Jan 2011 12:18:18 +1100 Subject: xfs: limit extsize to size of AGs and/or MAXEXTLEN MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The extent size hint can be set to larger than an AG. This means that the alignment process can push the range to be allocated outside the bounds of the AG, resulting in assert failures or corrupted bmbt records. Similarly, if the extsize is larger than the maximum extent size supported, the alignment process will produce extents that are too large to fit into the bmbt records, resulting in a different type of assert/corruption failure. Fix this by limiting extsize at the time іt is set firstly to be less than MAXEXTLEN, then to be a maximum of half the size of the AGs in the filesystem for non-realtime inodes. Realtime inodes do not allocate out of AGs, so don't have to be restricted by the size of AGs. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig Reviewed-by: Alex Elder --- fs/xfs/linux-2.6/xfs_ioctl.c | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c index b06ede1d0bed..f5e2a19e0f8e 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl.c +++ b/fs/xfs/linux-2.6/xfs_ioctl.c @@ -985,10 +985,22 @@ xfs_ioctl_setattr( /* * Extent size must be a multiple of the appropriate block - * size, if set at all. + * size, if set at all. It must also be smaller than the + * maximum extent size supported by the filesystem. + * + * Also, for non-realtime files, limit the extent size hint to + * half the size of the AGs in the filesystem so alignment + * doesn't result in extents larger than an AG. */ if (fa->fsx_extsize != 0) { - xfs_extlen_t size; + xfs_extlen_t size; + xfs_fsblock_t extsize_fsb; + + extsize_fsb = XFS_B_TO_FSB(mp, fa->fsx_extsize); + if (extsize_fsb > MAXEXTLEN) { + code = XFS_ERROR(EINVAL); + goto error_return; + } if (XFS_IS_REALTIME_INODE(ip) || ((mask & FSX_XFLAGS) && @@ -997,6 +1009,10 @@ xfs_ioctl_setattr( mp->m_sb.sb_blocklog; } else { size = mp->m_sb.sb_blocksize; + if (extsize_fsb > mp->m_sb.sb_agblocks / 2) { + code = XFS_ERROR(EINVAL); + goto error_return; + } } if (fa->fsx_extsize % size) { -- cgit v1.2.2 From c6f990d1ff8e4e53b12f4175eb7d7ea710c3ca73 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Thu, 27 Jan 2011 13:23:28 +1100 Subject: xfs: handle CIl transaction commit failures correctly Failure to commit a transaction into the CIL is not handled correctly. This currently can only happen when racing with a shutdown and requires an explicit shutdown check, so it rare and can be avoided. Remove the shutdown check and make the CIL commit a void function to indicate it will always succeed, thereby removing the incorrectly handled failure case. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig Reviewed-by: Alex Elder --- fs/xfs/xfs_log.h | 2 +- fs/xfs/xfs_log_cil.c | 8 +------- fs/xfs/xfs_trans.c | 5 +---- 3 files changed, 3 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h index 916eb7db14d9..3bd3291ef8d2 100644 --- a/fs/xfs/xfs_log.h +++ b/fs/xfs/xfs_log.h @@ -191,7 +191,7 @@ void xfs_log_ticket_put(struct xlog_ticket *ticket); xlog_tid_t xfs_log_get_trans_ident(struct xfs_trans *tp); -int xfs_log_commit_cil(struct xfs_mount *mp, struct xfs_trans *tp, +void xfs_log_commit_cil(struct xfs_mount *mp, struct xfs_trans *tp, struct xfs_log_vec *log_vector, xfs_lsn_t *commit_lsn, int flags); bool xfs_log_item_in_current_chkpt(struct xfs_log_item *lip); diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c index c7eac5acbfea..9ca59be08977 100644 --- a/fs/xfs/xfs_log_cil.c +++ b/fs/xfs/xfs_log_cil.c @@ -625,7 +625,7 @@ out_abort: * background commit, returns without it held once background commits are * allowed again. */ -int +void xfs_log_commit_cil( struct xfs_mount *mp, struct xfs_trans *tp, @@ -640,11 +640,6 @@ xfs_log_commit_cil( if (flags & XFS_TRANS_RELEASE_LOG_RES) log_flags = XFS_LOG_REL_PERM_RESERV; - if (XLOG_FORCED_SHUTDOWN(log)) { - xlog_cil_free_logvec(log_vector); - return XFS_ERROR(EIO); - } - /* * do all the hard work of formatting items (including memory * allocation) outside the CIL context lock. This prevents stalling CIL @@ -704,7 +699,6 @@ xfs_log_commit_cil( */ if (push) xlog_cil_push(log, 0); - return 0; } /* diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index 29f5e5424897..76922793f64f 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c @@ -1755,7 +1755,6 @@ xfs_trans_commit_cil( int flags) { struct xfs_log_vec *log_vector; - int error; /* * Get each log item to allocate a vector structure for @@ -1766,9 +1765,7 @@ xfs_trans_commit_cil( if (!log_vector) return ENOMEM; - error = xfs_log_commit_cil(mp, tp, log_vector, commit_lsn, flags); - if (error) - return error; + xfs_log_commit_cil(mp, tp, log_vector, commit_lsn, flags); current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS); xfs_trans_free(tp); -- cgit v1.2.2 From 0fbca4d1c3932c27c4794bf5c2b5fc961cf5a54f Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Fri, 28 Jan 2011 11:20:46 +1100 Subject: xfs: fix dquot shaker deadlock Commit 368e136 ("xfs: remove duplicate code from dquot reclaim") fails to unlock the dquot freelist when the number of loop restarts is exceeded in xfs_qm_dqreclaim_one(). This causes hangs in memory reclaim. Rework the loop control logic into an unwind stack that all the different cases jump into. This means there is only one set of code that processes the loop exit criteria, and simplifies the unlocking of all the items from different points in the loop. It also fixes a double increment of the restart counter from the qi_dqlist_lock case. Reported-by: Malcolm Scott Signed-off-by: Dave Chinner Reviewed-by: Alex Elder --- fs/xfs/quota/xfs_qm.c | 46 +++++++++++++++++++++------------------------- 1 file changed, 21 insertions(+), 25 deletions(-) (limited to 'fs') diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c index f8e854b4fde8..206a2815ced6 100644 --- a/fs/xfs/quota/xfs_qm.c +++ b/fs/xfs/quota/xfs_qm.c @@ -1863,12 +1863,14 @@ xfs_qm_dqreclaim_one(void) xfs_dquot_t *dqpout; xfs_dquot_t *dqp; int restarts; + int startagain; restarts = 0; dqpout = NULL; /* lockorder: hashchainlock, freelistlock, mplistlock, dqlock, dqflock */ -startagain: +again: + startagain = 0; mutex_lock(&xfs_Gqm->qm_dqfrlist_lock); list_for_each_entry(dqp, &xfs_Gqm->qm_dqfrlist, q_freelist) { @@ -1885,13 +1887,10 @@ startagain: ASSERT(! (dqp->dq_flags & XFS_DQ_INACTIVE)); trace_xfs_dqreclaim_want(dqp); - - xfs_dqunlock(dqp); - mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); - if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS) - return NULL; XQM_STATS_INC(xqmstats.xs_qm_dqwants); - goto startagain; + restarts++; + startagain = 1; + goto dqunlock; } /* @@ -1906,23 +1905,20 @@ startagain: ASSERT(list_empty(&dqp->q_mplist)); list_del_init(&dqp->q_freelist); xfs_Gqm->qm_dqfrlist_cnt--; - xfs_dqunlock(dqp); dqpout = dqp; XQM_STATS_INC(xqmstats.xs_qm_dqinact_reclaims); - break; + goto dqunlock; } ASSERT(dqp->q_hash); ASSERT(!list_empty(&dqp->q_mplist)); /* - * Try to grab the flush lock. If this dquot is in the process of - * getting flushed to disk, we don't want to reclaim it. + * Try to grab the flush lock. If this dquot is in the process + * of getting flushed to disk, we don't want to reclaim it. */ - if (!xfs_dqflock_nowait(dqp)) { - xfs_dqunlock(dqp); - continue; - } + if (!xfs_dqflock_nowait(dqp)) + goto dqunlock; /* * We have the flush lock so we know that this is not in the @@ -1944,8 +1940,7 @@ startagain: xfs_fs_cmn_err(CE_WARN, mp, "xfs_qm_dqreclaim: dquot %p flush failed", dqp); } - xfs_dqunlock(dqp); /* dqflush unlocks dqflock */ - continue; + goto dqunlock; } /* @@ -1967,13 +1962,8 @@ startagain: */ if (!mutex_trylock(&mp->m_quotainfo->qi_dqlist_lock)) { restarts++; - mutex_unlock(&dqp->q_hash->qh_lock); - xfs_dqfunlock(dqp); - xfs_dqunlock(dqp); - mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); - if (restarts++ >= XFS_QM_RECLAIM_MAX_RESTARTS) - return NULL; - goto startagain; + startagain = 1; + goto qhunlock; } ASSERT(dqp->q_nrefs == 0); @@ -1986,14 +1976,20 @@ startagain: xfs_Gqm->qm_dqfrlist_cnt--; dqpout = dqp; mutex_unlock(&mp->m_quotainfo->qi_dqlist_lock); +qhunlock: mutex_unlock(&dqp->q_hash->qh_lock); dqfunlock: xfs_dqfunlock(dqp); +dqunlock: xfs_dqunlock(dqp); if (dqpout) break; if (restarts >= XFS_QM_RECLAIM_MAX_RESTARTS) - return NULL; + break; + if (startagain) { + mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); + goto again; + } } mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock); return dqpout; -- cgit v1.2.2 From 24446fc66fdebbdd8baca0f44fd2a47ad77ba580 Mon Sep 17 00:00:00 2001 From: "bpm@sgi.com" Date: Wed, 19 Jan 2011 17:41:58 +0000 Subject: xfs: xfs_bmap_add_extent_delay_real should init br_startblock When filling in the middle of a previous delayed allocation in xfs_bmap_add_extent_delay_real, set br_startblock of the new delay extent to the right to nullstartblock instead of 0 before inserting the extent into the ifork (xfs_iext_insert), rather than setting br_startblock afterward. Adding the extent into the ifork with br_startblock=0 can lead to the extent being copied into the btree by xfs_bmap_extent_to_btree if we happen to convert from extents format to btree format before updating br_startblock with the correct value. The unexpected addition of this delay extent to the btree can cause subsequent XFS_WANT_CORRUPTED_GOTO filesystem shutdown in several xfs_bmap_add_extent_delay_real cases where we are converting a delay extent to real and unexpectedly find an extent already inserted. For example: 911 case BMAP_LEFT_FILLING: 912 /* 913 * Filling in the first part of a previous delayed allocation. 914 * The left neighbor is not contiguous. 915 */ 916 trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); 917 xfs_bmbt_set_startoff(ep, new_endoff); 918 temp = PREV.br_blockcount - new->br_blockcount; 919 xfs_bmbt_set_blockcount(ep, temp); 920 xfs_iext_insert(ip, idx, 1, new, state); 921 ip->i_df.if_lastex = idx; 922 ip->i_d.di_nextents++; 923 if (cur == NULL) 924 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 925 else { 926 rval = XFS_ILOG_CORE; 927 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff, 928 new->br_startblock, new->br_blockcount, 929 &i))) 930 goto done; 931 XFS_WANT_CORRUPTED_GOTO(i == 0, done); With the bogus extent in the btree we shutdown the filesystem at 931. The conversion from extents to btree format happens when the number of extents in the inode increases above ip->i_df.if_ext_max. xfs_bmap_extent_to_btree copies extents from the ifork into the btree, ignoring all delalloc extents which are denoted by br_startblock having some value of nullstartblock. SGI-PV: 1013221 Signed-off-by: Ben Myers Reviewed-by: Dave Chinner Signed-off-by: Alex Elder --- fs/xfs/xfs_bmap.c | 33 +++++++++++++++++++++++++-------- 1 file changed, 25 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index 3e9c278a8f78..dc3afd7739ff 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c @@ -1038,17 +1038,34 @@ xfs_bmap_add_extent_delay_real( * Filling in the middle part of a previous delayed allocation. * Contiguity is impossible here. * This case is avoided almost all the time. + * + * We start with a delayed allocation: + * + * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+ + * PREV @ idx + * + * and we are allocating: + * +rrrrrrrrrrrrrrrrr+ + * new + * + * and we set it up for insertion as: + * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+ + * new + * PREV @ idx LEFT RIGHT + * inserted at idx + 1 */ temp = new->br_startoff - PREV.br_startoff; - trace_xfs_bmap_pre_update(ip, idx, 0, _THIS_IP_); - xfs_bmbt_set_blockcount(ep, temp); - r[0] = *new; - r[1].br_state = PREV.br_state; - r[1].br_startblock = 0; - r[1].br_startoff = new_endoff; temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff; - r[1].br_blockcount = temp2; - xfs_iext_insert(ip, idx + 1, 2, &r[0], state); + trace_xfs_bmap_pre_update(ip, idx, 0, _THIS_IP_); + xfs_bmbt_set_blockcount(ep, temp); /* truncate PREV */ + LEFT = *new; + RIGHT.br_state = PREV.br_state; + RIGHT.br_startblock = nullstartblock( + (int)xfs_bmap_worst_indlen(ip, temp2)); + RIGHT.br_startoff = new_endoff; + RIGHT.br_blockcount = temp2; + /* insert LEFT (r[0]) and RIGHT (r[1]) at the same time */ + xfs_iext_insert(ip, idx + 1, 2, &LEFT, state); ip->i_df.if_lastex = idx + 1; ip->i_d.di_nextents++; if (cur == NULL) -- cgit v1.2.2 From e00b8a24041f37e56b4b8415ce4eba1cbc238065 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 27 Jan 2011 14:55:39 -0500 Subject: NFS: Fix an NFS client lockdep issue There is no reason to be freeing the delegation cred in the rcu callback, and doing so is resulting in a lockdep complaint that rpc_credcache_lock is being called from both softirq and non-softirq contexts. Reported-by: Chuck Lever Signed-off-by: Trond Myklebust Cc: stable@kernel.org --- fs/nfs/delegation.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index 364e4328f392..bbbc6bf5cb2e 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -23,8 +23,6 @@ static void nfs_do_free_delegation(struct nfs_delegation *delegation) { - if (delegation->cred) - put_rpccred(delegation->cred); kfree(delegation); } @@ -37,6 +35,10 @@ static void nfs_free_delegation_callback(struct rcu_head *head) static void nfs_free_delegation(struct nfs_delegation *delegation) { + if (delegation->cred) { + put_rpccred(delegation->cred); + delegation->cred = NULL; + } call_rcu(&delegation->rcu, nfs_free_delegation_callback); } -- cgit v1.2.2 From c08e76d0cd4beb759a73c1835d98f5fccc126ed1 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Fri, 28 Jan 2011 12:40:55 -0500 Subject: NFS: Micro-optimize nfs4_decode_dirent() Make the decoding of NFSv4 directory entries slightly more efficient by: 1. Avoiding unnecessary byte swapping when checking XDR booleans, and 2. Not bumping "p" when its value will be immediately replaced by xdr_inline_decode() This commit makes nfs4_decode_dirent() consistent with similar logic in the other two decode_dirent() functions. Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- fs/nfs/nfs4xdr.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 2ab8e5cb8f59..009aef9e12bc 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -6086,11 +6086,11 @@ int nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, __be32 *p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; - if (!ntohl(*p++)) { + if (*p == xdr_zero) { p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; - if (!ntohl(*p++)) + if (*p == xdr_zero) return -EAGAIN; entry->eof = 1; return -EBADCOOKIE; @@ -6101,7 +6101,7 @@ int nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, goto out_overflow; entry->prev_cookie = entry->cookie; p = xdr_decode_hyper(p, &entry->cookie); - entry->len = ntohl(*p++); + entry->len = be32_to_cpup(p); p = xdr_inline_decode(xdr, entry->len); if (unlikely(!p)) -- cgit v1.2.2 From d1205f87bbb8040c1408bbd9e0a720310b2b0b9b Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Fri, 28 Jan 2011 12:41:05 -0500 Subject: NFS: NFSv4 readdir loses entries On recent 2.6.38-rc kernels, connectathon basic test 6 fails on NFSv4 mounts of OpenSolaris with something like: > ./test6: readdir > ./test6: (/mnt/klimt/matisse.test) didn't read expected 'file.12' dir entry, pass 0 > ./test6: (/mnt/klimt/matisse.test) didn't read expected 'file.82' dir entry, pass 0 > ./test6: (/mnt/klimt/matisse.test) didn't read expected 'file.164' dir entry, pass 0 > ./test6: (/mnt/klimt/matisse.test) Test failed with 3 errors > basic tests failed > Tests failed, leaving /mnt/klimt mounted > [cel@matisse cthon04]$ I narrowed the problem down to nfs4_decode_dirent() reporting that the decode buffer had overflowed while decoding the entries for those missing files. verify_attr_len() assumes both it's pointer arguments reside on the same page. When these arguments point to locations on two different pages, verify_attr_len() can report false errors. This can happen now that a large NFSv4 readdir result can span pages. We have reasonably good checking in nfs4_decode_dirent() anyway, so it should be safe to simply remove the extra checking. At a guess, this was introduced by commit 6650239a, "NFS: Don't use vm_map_ram() in readdir". Cc: stable@kernel.org [2.6.37] Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust --- fs/nfs/nfs4xdr.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 009aef9e12bc..4e2c168b6ee9 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -6132,9 +6132,6 @@ int nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, if (entry->fattr->valid & NFS_ATTR_FATTR_TYPE) entry->d_type = nfs_umode_to_dtype(entry->fattr->mode); - if (verify_attr_len(xdr, p, len) < 0) - goto out_overflow; - return 0; out_overflow: -- cgit v1.2.2 From 6b82ce8d824bd46053e46a895876cde39d9026e4 Mon Sep 17 00:00:00 2001 From: liubo Date: Wed, 26 Jan 2011 06:21:39 +0000 Subject: btrfs: fix uncheck memory allocation in btrfs_submit_compressed_read btrfs_submit_compressed_read() is lack of memory allocation checks and corresponding error route. After this fix, if it comes to "no memory" case, errno will be returned to userland step by step, and tell users this operation cannot go on. Signed-off-by: Liu Bo Signed-off-by: Chris Mason --- fs/btrfs/compression.c | 25 +++++++++++++++++++++++-- fs/btrfs/extent_io.c | 4 ++-- 2 files changed, 25 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index f745287fbf2e..3a932f183da1 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -562,7 +562,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, u64 em_len; u64 em_start; struct extent_map *em; - int ret; + int ret = -ENOMEM; u32 *sums; tree = &BTRFS_I(inode)->io_tree; @@ -577,6 +577,9 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, compressed_len = em->block_len; cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); + if (!cb) + goto out; + atomic_set(&cb->pending_bios, 0); cb->errors = 0; cb->inode = inode; @@ -597,13 +600,18 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, nr_pages = (compressed_len + PAGE_CACHE_SIZE - 1) / PAGE_CACHE_SIZE; - cb->compressed_pages = kmalloc(sizeof(struct page *) * nr_pages, + cb->compressed_pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS); + if (!cb->compressed_pages) + goto fail1; + bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; for (page_index = 0; page_index < nr_pages; page_index++) { cb->compressed_pages[page_index] = alloc_page(GFP_NOFS | __GFP_HIGHMEM); + if (!cb->compressed_pages[page_index]) + goto fail2; } cb->nr_pages = nr_pages; @@ -614,6 +622,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, cb->len = uncompressed_len; comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS); + if (!comp_bio) + goto fail2; comp_bio->bi_private = cb; comp_bio->bi_end_io = end_compressed_bio_read; atomic_inc(&cb->pending_bios); @@ -681,6 +691,17 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, bio_put(comp_bio); return 0; + +fail2: + for (page_index = 0; page_index < nr_pages; page_index++) + free_page((unsigned long)cb->compressed_pages[page_index]); + + kfree(cb->compressed_pages); +fail1: + kfree(cb); +out: + free_extent_map(em); + return ret; } static struct list_head comp_idle_workspace[BTRFS_COMPRESS_TYPES]; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 8b8d3d99ae68..6411ed6ca449 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1865,7 +1865,7 @@ static int submit_one_bio(int rw, struct bio *bio, int mirror_num, bio_get(bio); if (tree->ops && tree->ops->submit_bio_hook) - tree->ops->submit_bio_hook(page->mapping->host, rw, bio, + ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio, mirror_num, bio_flags, start); else submit_bio(rw, bio); @@ -2126,7 +2126,7 @@ int extent_read_full_page(struct extent_io_tree *tree, struct page *page, ret = __extent_read_full_page(tree, page, get_extent, &bio, 0, &bio_flags); if (bio) - submit_one_bio(READ, bio, 0, bio_flags); + ret = submit_one_bio(READ, bio, 0, bio_flags); return ret; } -- cgit v1.2.2 From 2a29edc6b60a5248ccab588e7ba7dad38cef0235 Mon Sep 17 00:00:00 2001 From: liubo Date: Wed, 26 Jan 2011 06:22:08 +0000 Subject: btrfs: fix several uncheck memory allocations To make btrfs more stable, add several missing necessary memory allocation checks, and when no memory, return proper errno. We've checked that some of those -ENOMEM errors will be returned to userspace, and some will be catched by BUG_ON() in the upper callers, and none will be ignored silently. Signed-off-by: Liu Bo Signed-off-by: Chris Mason --- fs/btrfs/export.c | 2 ++ fs/btrfs/file-item.c | 2 ++ fs/btrfs/file.c | 4 ++++ fs/btrfs/tree-log.c | 25 +++++++++++++++++++++++++ 4 files changed, 33 insertions(+) (limited to 'fs') diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c index 6f0444473594..3220ad1aafc8 100644 --- a/fs/btrfs/export.c +++ b/fs/btrfs/export.c @@ -176,6 +176,8 @@ static struct dentry *btrfs_get_parent(struct dentry *child) int ret; path = btrfs_alloc_path(); + if (!path) + return ERR_PTR(-ENOMEM); if (dir->i_ino == BTRFS_FIRST_FREE_OBJECTID) { key.objectid = root->root_key.objectid; diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index a562a250ae77..d0bc72657cd7 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -536,6 +536,8 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, root = root->fs_info->csum_root; path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; while (1) { key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index f903433f5bdf..65b2424a4116 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -945,6 +945,10 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, PAGE_CACHE_SIZE, PAGE_CACHE_SIZE / (sizeof(struct page *))); pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL); + if (!pages) { + ret = -ENOMEM; + goto out; + } /* generic_write_checks can change our pos */ start_pos = pos; diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 054744ac5719..c25a41d86118 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -338,6 +338,12 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans, } dst_copy = kmalloc(item_size, GFP_NOFS); src_copy = kmalloc(item_size, GFP_NOFS); + if (!dst_copy || !src_copy) { + btrfs_release_path(root, path); + kfree(dst_copy); + kfree(src_copy); + return -ENOMEM; + } read_extent_buffer(eb, src_copy, src_ptr, item_size); @@ -665,6 +671,9 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans, btrfs_dir_item_key_to_cpu(leaf, di, &location); name_len = btrfs_dir_name_len(leaf, di); name = kmalloc(name_len, GFP_NOFS); + if (!name) + return -ENOMEM; + read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len); btrfs_release_path(root, path); @@ -744,6 +753,9 @@ static noinline int backref_in_log(struct btrfs_root *log, int match = 0; path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + ret = btrfs_search_slot(NULL, log, key, path, 0, 0); if (ret != 0) goto out; @@ -967,6 +979,8 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, key.offset = (u64)-1; path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; while (1) { ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); @@ -1178,6 +1192,9 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans, name_len = btrfs_dir_name_len(eb, di); name = kmalloc(name_len, GFP_NOFS); + if (!name) + return -ENOMEM; + log_type = btrfs_dir_type(eb, di); read_extent_buffer(eb, name, (unsigned long)(di + 1), name_len); @@ -1692,6 +1709,8 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans, root_owner = btrfs_header_owner(parent); next = btrfs_find_create_tree_block(root, bytenr, blocksize); + if (!next) + return -ENOMEM; if (*level == 1) { wc->process_func(root, next, wc, ptr_gen); @@ -2194,6 +2213,9 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, log = root->log_root; path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + di = btrfs_lookup_dir_item(trans, log, path, dir->i_ino, name, name_len, -1); if (IS_ERR(di)) { @@ -2594,6 +2616,9 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, ins_data = kmalloc(nr * sizeof(struct btrfs_key) + nr * sizeof(u32), GFP_NOFS); + if (!ins_data) + return -ENOMEM; + ins_sizes = (u32 *)ins_data; ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32)); -- cgit v1.2.2 From 333e8105445d4f51101fc3d23199a919d66730b3 Mon Sep 17 00:00:00 2001 From: liubo Date: Wed, 26 Jan 2011 06:22:33 +0000 Subject: btrfs: fix missing break in switch phrase There is a missing break in switch, fix it. Signed-off-by: Liu Bo Signed-off-by: Chris Mason --- fs/btrfs/print-tree.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c index 0d126be22b63..fb2605d998e9 100644 --- a/fs/btrfs/print-tree.c +++ b/fs/btrfs/print-tree.c @@ -260,6 +260,7 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l) #else BUG(); #endif + break; case BTRFS_BLOCK_GROUP_ITEM_KEY: bi = btrfs_item_ptr(l, i, struct btrfs_block_group_item); -- cgit v1.2.2 From 34d19bada00f4825588b338a8ee193820f9ceeb0 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Mon, 24 Jan 2011 19:55:19 +0000 Subject: fs/btrfs/inode.c: Add missing IS_ERR test After the conditional that precedes the following code, inode may be an ERR_PTR value. This can eg result from a memory allocation failure via the call to btrfs_iget, and thus does not imply that root is different than sub_root. Thus, an IS_ERR check is added to ensure that there is no dereference of inode in this case. The semantic match that finds this problem is as follows: (http://coccinelle.lip6.fr/) // @r@ identifier f; @@ f(...) { ... return ERR_PTR(...); } @@ identifier r.f, fld; expression x; statement S1,S2; @@ x = f(...) ... when != IS_ERR(x) ( if (IS_ERR(x) ||...) S1 else S2 | *x->fld ) // Signed-off-by: Julia Lawall Signed-off-by: Chris Mason --- fs/btrfs/inode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 2c9a2f7d5631..2b7d251d6ad1 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -4137,7 +4137,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) } srcu_read_unlock(&root->fs_info->subvol_srcu, index); - if (root != sub_root) { + if (!IS_ERR(inode) && root != sub_root) { down_read(&root->fs_info->cleanup_work_sem); if (!(inode->i_sb->s_flags & MS_RDONLY)) btrfs_orphan_cleanup(sub_root); -- cgit v1.2.2 From 3612b49598c303cfb22a4b609427f829828e2427 Mon Sep 17 00:00:00 2001 From: Tsutomu Itoh Date: Tue, 25 Jan 2011 02:51:38 +0000 Subject: btrfs: fix return value check of btrfs_join_transaction() The error check of btrfs_join_transaction()/btrfs_join_transaction_nolock() is added, and the mistake of the error check in several places is corrected. For more stable Btrfs, I think that we should reduce BUG_ON(). But, I think that long time is necessary for this. So, I propose this patch as a short-term solution. With this patch: - To more stable Btrfs, the part that should be corrected is clarified. - The panic isn't done by the NULL pointer reference etc. (even if BUG_ON() is increased temporarily) - The error code is returned in the place where the error can be easily returned. As a long-term plan: - BUG_ON() is reduced by using the forced-readonly framework, etc. Signed-off-by: Tsutomu Itoh Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 5 +++++ fs/btrfs/extent-tree.c | 2 +- fs/btrfs/inode.c | 24 ++++++++++++++++-------- fs/btrfs/ioctl.c | 2 +- fs/btrfs/relocation.c | 26 +++++++++++++++++++++++--- fs/btrfs/transaction.c | 5 +++++ 6 files changed, 51 insertions(+), 13 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 2887b8be6fdd..b36eeef19194 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1550,6 +1550,7 @@ static int transaction_kthread(void *arg) spin_unlock(&root->fs_info->new_trans_lock); trans = btrfs_join_transaction(root, 1); + BUG_ON(IS_ERR(trans)); if (transid == trans->transid) { ret = btrfs_commit_transaction(trans, root); BUG_ON(ret); @@ -2464,10 +2465,14 @@ int btrfs_commit_super(struct btrfs_root *root) up_write(&root->fs_info->cleanup_work_sem); trans = btrfs_join_transaction(root, 1); + if (IS_ERR(trans)) + return PTR_ERR(trans); ret = btrfs_commit_transaction(trans, root); BUG_ON(ret); /* run commit again to drop the original snapshot */ trans = btrfs_join_transaction(root, 1); + if (IS_ERR(trans)) + return PTR_ERR(trans); btrfs_commit_transaction(trans, root); ret = btrfs_write_and_wait_transaction(NULL, root); BUG_ON(ret); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index bcf303204f7f..98ee139885cc 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -7478,7 +7478,7 @@ int btrfs_drop_dead_reloc_roots(struct btrfs_root *root) BUG_ON(reloc_root->commit_root != NULL); while (1) { trans = btrfs_join_transaction(root, 1); - BUG_ON(!trans); + BUG_ON(IS_ERR(trans)); mutex_lock(&root->fs_info->drop_mutex); ret = btrfs_drop_snapshot(trans, reloc_root); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 2b7d251d6ad1..40fee137dd11 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -416,7 +416,7 @@ again: } if (start == 0) { trans = btrfs_join_transaction(root, 1); - BUG_ON(!trans); + BUG_ON(IS_ERR(trans)); btrfs_set_trans_block_group(trans, inode); trans->block_rsv = &root->fs_info->delalloc_block_rsv; @@ -612,6 +612,7 @@ retry: GFP_NOFS); trans = btrfs_join_transaction(root, 1); + BUG_ON(IS_ERR(trans)); ret = btrfs_reserve_extent(trans, root, async_extent->compressed_size, async_extent->compressed_size, @@ -771,7 +772,7 @@ static noinline int cow_file_range(struct inode *inode, BUG_ON(root == root->fs_info->tree_root); trans = btrfs_join_transaction(root, 1); - BUG_ON(!trans); + BUG_ON(IS_ERR(trans)); btrfs_set_trans_block_group(trans, inode); trans->block_rsv = &root->fs_info->delalloc_block_rsv; @@ -1049,7 +1050,7 @@ static noinline int run_delalloc_nocow(struct inode *inode, } else { trans = btrfs_join_transaction(root, 1); } - BUG_ON(!trans); + BUG_ON(IS_ERR(trans)); cow_start = (u64)-1; cur_offset = start; @@ -1704,7 +1705,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) trans = btrfs_join_transaction_nolock(root, 1); else trans = btrfs_join_transaction(root, 1); - BUG_ON(!trans); + BUG_ON(IS_ERR(trans)); btrfs_set_trans_block_group(trans, inode); trans->block_rsv = &root->fs_info->delalloc_block_rsv; ret = btrfs_update_inode(trans, root, inode); @@ -1721,6 +1722,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) trans = btrfs_join_transaction_nolock(root, 1); else trans = btrfs_join_transaction(root, 1); + BUG_ON(IS_ERR(trans)); btrfs_set_trans_block_group(trans, inode); trans->block_rsv = &root->fs_info->delalloc_block_rsv; @@ -2382,6 +2384,7 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) if (root->orphan_block_rsv || root->orphan_item_inserted) { trans = btrfs_join_transaction(root, 1); + BUG_ON(IS_ERR(trans)); btrfs_end_transaction(trans, root); } @@ -4350,6 +4353,8 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc) trans = btrfs_join_transaction_nolock(root, 1); else trans = btrfs_join_transaction(root, 1); + if (IS_ERR(trans)) + return PTR_ERR(trans); btrfs_set_trans_block_group(trans, inode); if (nolock) ret = btrfs_end_transaction_nolock(trans, root); @@ -4375,6 +4380,7 @@ void btrfs_dirty_inode(struct inode *inode) return; trans = btrfs_join_transaction(root, 1); + BUG_ON(IS_ERR(trans)); btrfs_set_trans_block_group(trans, inode); ret = btrfs_update_inode(trans, root, inode); @@ -5179,6 +5185,8 @@ again: em = NULL; btrfs_release_path(root, path); trans = btrfs_join_transaction(root, 1); + if (IS_ERR(trans)) + return ERR_CAST(trans); goto again; } map = kmap(page); @@ -5283,8 +5291,8 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode, btrfs_drop_extent_cache(inode, start, start + len - 1, 0); trans = btrfs_join_transaction(root, 0); - if (!trans) - return ERR_PTR(-ENOMEM); + if (IS_ERR(trans)) + return ERR_CAST(trans); trans->block_rsv = &root->fs_info->delalloc_block_rsv; @@ -5508,7 +5516,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, * while we look for nocow cross refs */ trans = btrfs_join_transaction(root, 0); - if (!trans) + if (IS_ERR(trans)) goto must_cow; if (can_nocow_odirect(trans, inode, start, len) == 1) { @@ -5643,7 +5651,7 @@ again: BUG_ON(!ordered); trans = btrfs_join_transaction(root, 1); - if (!trans) { + if (IS_ERR(trans)) { err = -ENOMEM; goto out; } diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index edd82becbb9e..04b4fb9144a9 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -203,7 +203,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) trans = btrfs_join_transaction(root, 1); - BUG_ON(!trans); + BUG_ON(IS_ERR(trans)); ret = btrfs_update_inode(trans, root, inode); BUG_ON(ret); diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 045c9c2b2d7e..ea9965430241 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -2147,6 +2147,12 @@ again: } trans = btrfs_join_transaction(rc->extent_root, 1); + if (IS_ERR(trans)) { + if (!err) + btrfs_block_rsv_release(rc->extent_root, + rc->block_rsv, num_bytes); + return PTR_ERR(trans); + } if (!err) { if (num_bytes != rc->merging_rsv_size) { @@ -3222,6 +3228,7 @@ truncate: trans = btrfs_join_transaction(root, 0); if (IS_ERR(trans)) { btrfs_free_path(path); + ret = PTR_ERR(trans); goto out; } @@ -3628,6 +3635,7 @@ int prepare_to_relocate(struct reloc_control *rc) set_reloc_control(rc); trans = btrfs_join_transaction(rc->extent_root, 1); + BUG_ON(IS_ERR(trans)); btrfs_commit_transaction(trans, rc->extent_root); return 0; } @@ -3804,7 +3812,10 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc) /* get rid of pinned extents */ trans = btrfs_join_transaction(rc->extent_root, 1); - btrfs_commit_transaction(trans, rc->extent_root); + if (IS_ERR(trans)) + err = PTR_ERR(trans); + else + btrfs_commit_transaction(trans, rc->extent_root); out_free: btrfs_free_block_rsv(rc->extent_root, rc->block_rsv); btrfs_free_path(path); @@ -4125,6 +4136,11 @@ int btrfs_recover_relocation(struct btrfs_root *root) set_reloc_control(rc); trans = btrfs_join_transaction(rc->extent_root, 1); + if (IS_ERR(trans)) { + unset_reloc_control(rc); + err = PTR_ERR(trans); + goto out_free; + } rc->merge_reloc_tree = 1; @@ -4154,9 +4170,13 @@ int btrfs_recover_relocation(struct btrfs_root *root) unset_reloc_control(rc); trans = btrfs_join_transaction(rc->extent_root, 1); - btrfs_commit_transaction(trans, rc->extent_root); -out: + if (IS_ERR(trans)) + err = PTR_ERR(trans); + else + btrfs_commit_transaction(trans, rc->extent_root); +out_free: kfree(rc); +out: while (!list_empty(&reloc_roots)) { reloc_root = list_entry(reloc_roots.next, struct btrfs_root, root_list); diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index bae5c7b8bbe2..3d73c8d93bbb 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -1161,6 +1161,11 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, INIT_DELAYED_WORK(&ac->work, do_async_commit); ac->root = root; ac->newtrans = btrfs_join_transaction(root, 0); + if (IS_ERR(ac->newtrans)) { + int err = PTR_ERR(ac->newtrans); + kfree(ac); + return err; + } /* take transaction reference */ mutex_lock(&root->fs_info->trans_mutex); -- cgit v1.2.2 From abd30bb0af9d4671506502278e8631bed9e3c35c Mon Sep 17 00:00:00 2001 From: Tsutomu Itoh Date: Mon, 24 Jan 2011 00:57:10 +0000 Subject: btrfs: check return value of btrfs_start_ioctl_transaction() properly btrfs_start_ioctl_transaction() returns ERR_PTR(), not NULL. So, it is necessary to use IS_ERR() to check the return value. Signed-off-by: Tsutomu Itoh Signed-off-by: Chris Mason --- fs/btrfs/ioctl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 04b4fb9144a9..12dabe28cf54 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -2085,7 +2085,7 @@ static long btrfs_ioctl_trans_start(struct file *file) ret = -ENOMEM; trans = btrfs_start_ioctl_transaction(root, 0); - if (!trans) + if (IS_ERR(trans)) goto out_drop; file->private_data = trans; -- cgit v1.2.2 From dedefd7215d3ec451291ca393e5c8e4c1882c8c6 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 24 Jan 2011 21:43:18 +0000 Subject: Btrfs: fix check_path_shared so it returns the right value When running xfstests 224 I kept getting ENOSPC when trying to remove the files, and this is because we were returning ret from check_path_shared while it was uninitalized, which isn't right. Fix this to return 0 properly, and now xfstests 224 doesn't freak out when it tries to clean itself up. Thanks, Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/inode.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 40fee137dd11..5621818921f8 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2718,9 +2718,10 @@ static int check_path_shared(struct btrfs_root *root, struct extent_buffer *eb; int level; u64 refs = 1; - int uninitialized_var(ret); for (level = 0; level < BTRFS_MAX_LEVEL; level++) { + int ret; + if (!path->nodes[level]) break; eb = path->nodes[level]; @@ -2731,7 +2732,7 @@ static int check_path_shared(struct btrfs_root *root, if (refs > 1) return 1; } - return ret; /* XXX callers? */ + return 0; } /* -- cgit v1.2.2 From e9e22899de661af94cb9995885fd04e4c738838b Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 24 Jan 2011 21:43:19 +0000 Subject: Btrfs: do not release more reserved bytes to the global_block_rsv than we need When we do btrfs_block_rsv_release, if global_block_rsv is not full we will release all the extra bytes to global_block_rsv, even if it's only a little short of the amount of space that we need to reserve. This causes us to starve ourselves of reservable space during the transaction which will force us to shrink delalloc bytes and commit the transaction more often than we should. So instead just add the amount of bytes we need to add to the global reserve so reserved == size, and then add the rest back into the space_info for general use. Thanks, Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 98ee139885cc..7af618dcf2c0 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3589,8 +3589,20 @@ void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv, if (num_bytes > 0) { if (dest) { - block_rsv_add_bytes(dest, num_bytes, 0); - } else { + spin_lock(&dest->lock); + if (!dest->full) { + u64 bytes_to_add; + + bytes_to_add = dest->size - dest->reserved; + bytes_to_add = min(num_bytes, bytes_to_add); + dest->reserved += bytes_to_add; + if (dest->reserved >= dest->size) + dest->full = 1; + num_bytes -= bytes_to_add; + } + spin_unlock(&dest->lock); + } + if (num_bytes) { spin_lock(&space_info->lock); space_info->bytes_reserved -= num_bytes; spin_unlock(&space_info->lock); -- cgit v1.2.2 From 68a82277b8619e6d0f2738b1d9b160b627e81e92 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 24 Jan 2011 21:43:20 +0000 Subject: Btrfs: use the global block reserve if we cannot reserve space We call use_block_rsv right before we make an allocation in order to make sure we have enough space. Now normally people have called btrfs_start_transaction() with the appropriate amount of space that we need, so we just use some of that pre-reserved space and move along happily. The problem is where people use btrfs_join_transaction(), which doesn't actually reserve any space. So we try and reserve space here, but we cannot flush delalloc, so this forces us to return -ENOSPC when in reality we have plenty of space. The most common symptom is seeing a bunch of "couldn't dirty inode" messages in syslog. With xfstests 224 we end up falling back to start_transaction and then doing all the flush delalloc stuff which causes to hang for a very long time. So instead steal from the global reserve, which is what this is meant for anyway. With this patch and the other 2 I have sent xfstests 224 now passes successfully. Thanks, Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 7af618dcf2c0..ff6bbfd75cf7 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -5646,6 +5646,7 @@ use_block_rsv(struct btrfs_trans_handle *trans, struct btrfs_root *root, u32 blocksize) { struct btrfs_block_rsv *block_rsv; + struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv; int ret; block_rsv = get_block_rsv(trans, root); @@ -5653,14 +5654,39 @@ use_block_rsv(struct btrfs_trans_handle *trans, if (block_rsv->size == 0) { ret = reserve_metadata_bytes(trans, root, block_rsv, blocksize, 0); - if (ret) + /* + * If we couldn't reserve metadata bytes try and use some from + * the global reserve. + */ + if (ret && block_rsv != global_rsv) { + ret = block_rsv_use_bytes(global_rsv, blocksize); + if (!ret) + return global_rsv; + return ERR_PTR(ret); + } else if (ret) { return ERR_PTR(ret); + } return block_rsv; } ret = block_rsv_use_bytes(block_rsv, blocksize); if (!ret) return block_rsv; + if (ret) { + WARN_ON(1); + ret = reserve_metadata_bytes(trans, root, block_rsv, blocksize, + 0); + if (!ret) { + spin_lock(&block_rsv->lock); + block_rsv->size += blocksize; + spin_unlock(&block_rsv->lock); + return block_rsv; + } else if (ret && block_rsv != global_rsv) { + ret = block_rsv_use_bytes(global_rsv, blocksize); + if (!ret) + return global_rsv; + } + } return ERR_PTR(-ENOSPC); } -- cgit v1.2.2 From ad0397a7a97f55fd7f70998ec208c5d8b90310ff Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 28 Jan 2011 18:44:44 +0000 Subject: Btrfs: do error checking in btrfs_del_csums Got a report of a box panicing because we got a NULL eb in read_extent_buffer. His fs was borked and btrfs_search_path returned EIO, but we don't check for errors so the box paniced. Yes I know this will just make something higher up the stack panic, but that's a problem for future Josef. Thanks, Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/file-item.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'fs') diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index d0bc72657cd7..4f19a3e1bf32 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -550,7 +550,10 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, if (path->slots[0] == 0) goto out; path->slots[0]--; + } else if (ret < 0) { + goto out; } + leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); -- cgit v1.2.2 From 7adf5dfbb3af65a00e20b3ead224c3a1b40e4ec4 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 25 Jan 2011 22:11:54 +0000 Subject: Btrfs: handle no memory properly in prepare_pages Instead of doing a BUG_ON(1) in prepare_pages if grab_cache_page() fails, just loop through the pages we've already grabbed and unlock and release them, then return -ENOMEM like we should. Thanks, Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/file.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 65b2424a4116..9e097fbfc78d 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -792,8 +792,12 @@ again: for (i = 0; i < num_pages; i++) { pages[i] = grab_cache_page(inode->i_mapping, index + i); if (!pages[i]) { - err = -ENOMEM; - BUG_ON(1); + int c; + for (c = i - 1; c >= 0; c--) { + unlock_page(pages[c]); + page_cache_release(pages[c]); + } + return -ENOMEM; } wait_on_page_writeback(pages[i]); } -- cgit v1.2.2 From af5eb745efe97d91d2cbe793029838b3311c15da Mon Sep 17 00:00:00 2001 From: Anton Altaparmakov Date: Fri, 28 Jan 2011 20:45:28 +0000 Subject: NTFS: Fix invalid pointer dereference in ntfs_mft_record_alloc(). In ntfs_mft_record_alloc() when mapping the new extent mft record with map_extent_mft_record() we overwrite @m with the return value and on error, we then try to use the old @m but that is no longer there as @m now contains an error code instead so we crash when dereferencing the error code as if it were a pointer. The simple fix is to use a temporary variable to store the return value thus preserving the original @m for later use. This is a backport from the commercial Tuxera-NTFS driver and is well tested... Thanks go to Julia Lawall for pointing this out (whilst I had fixed it in the commercial driver I had failed to fix it in the Linux kernel). Signed-off-by: Anton Altaparmakov Signed-off-by: Linus Torvalds --- fs/ntfs/mft.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c index b572b6727181..326e7475a22a 100644 --- a/fs/ntfs/mft.c +++ b/fs/ntfs/mft.c @@ -1,7 +1,7 @@ /** * mft.c - NTFS kernel mft record operations. Part of the Linux-NTFS project. * - * Copyright (c) 2001-2006 Anton Altaparmakov + * Copyright (c) 2001-2011 Anton Altaparmakov and Tuxera Inc. * Copyright (c) 2002 Richard Russon * * This program/include file is free software; you can redistribute it and/or @@ -2576,6 +2576,8 @@ mft_rec_already_initialized: flush_dcache_page(page); SetPageUptodate(page); if (base_ni) { + MFT_RECORD *m_tmp; + /* * Setup the base mft record in the extent mft record. This * completes initialization of the allocated extent mft record @@ -2588,11 +2590,11 @@ mft_rec_already_initialized: * attach it to the base inode @base_ni and map, pin, and lock * its, i.e. the allocated, mft record. */ - m = map_extent_mft_record(base_ni, bit, &ni); - if (IS_ERR(m)) { + m_tmp = map_extent_mft_record(base_ni, bit, &ni); + if (IS_ERR(m_tmp)) { ntfs_error(vol->sb, "Failed to map allocated extent " "mft record 0x%llx.", (long long)bit); - err = PTR_ERR(m); + err = PTR_ERR(m_tmp); /* Set the mft record itself not in use. */ m->flags &= cpu_to_le16( ~le16_to_cpu(MFT_RECORD_IN_USE)); @@ -2603,6 +2605,7 @@ mft_rec_already_initialized: ntfs_unmap_page(page); goto undo_mftbmp_alloc; } + BUG_ON(m != m_tmp); /* * Make sure the allocated mft record is written out to disk. * No need to set the inode dirty because the caller is going -- cgit v1.2.2 From ffeb414a59291d5891f09727beb793c109f19f08 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Sat, 29 Jan 2011 07:03:02 -0500 Subject: cifs: fix two compiler warning about uninitialized vars MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit fs/cifs/link.c: In function ‘symlink_hash’: fs/cifs/link.c:58:3: warning: ‘rc’ may be used uninitialized in this function [-Wuninitialized] fs/cifs/smbencrypt.c: In function ‘mdfour’: fs/cifs/smbencrypt.c:61:3: warning: ‘rc’ may be used uninitialized in this function [-Wuninitialized] Reviewed-by: Shirish Pargaonkar Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/link.c | 3 ++- fs/cifs/smbencrypt.c | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/cifs/link.c b/fs/cifs/link.c index 02cd60aefbff..e8804d373404 100644 --- a/fs/cifs/link.c +++ b/fs/cifs/link.c @@ -55,8 +55,9 @@ symlink_hash(unsigned int link_len, const char *link_str, u8 *md5_hash) md5 = crypto_alloc_shash("md5", 0, 0); if (IS_ERR(md5)) { + rc = PTR_ERR(md5); cERROR(1, "%s: Crypto md5 allocation error %d\n", __func__, rc); - return PTR_ERR(md5); + return rc; } size = sizeof(struct shash_desc) + crypto_shash_descsize(md5); sdescmd5 = kmalloc(size, GFP_KERNEL); diff --git a/fs/cifs/smbencrypt.c b/fs/cifs/smbencrypt.c index b5450e9f40c0..b5041c849981 100644 --- a/fs/cifs/smbencrypt.c +++ b/fs/cifs/smbencrypt.c @@ -58,8 +58,9 @@ mdfour(unsigned char *md4_hash, unsigned char *link_str, int link_len) md4 = crypto_alloc_shash("md4", 0, 0); if (IS_ERR(md4)) { + rc = PTR_ERR(md4); cERROR(1, "%s: Crypto md4 allocation error %d\n", __func__, rc); - return PTR_ERR(md4); + return rc; } size = sizeof(struct shash_desc) + crypto_shash_descsize(md4); sdescmd4 = kmalloc(size, GFP_KERNEL); -- cgit v1.2.2 From 1be912dde772b77aaaa21770eeabb0a7a5e297a6 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Fri, 28 Jan 2011 07:08:28 -0500 Subject: cifs: handle cancelled requests better Currently, when a request is cancelled via signal, we delete the mid immediately. If the request was already transmitted however, the client is still likely to receive a response. When it does, it won't recognize it however and will pop a printk. It's also a little dangerous to just delete the mid entry like this. We may end up reusing that mid. If we do then we could potentially get the response from the first request confused with the later one. Prevent the reuse of mids by marking them as cancelled and keeping them on the pending_mid_q list. If the reply comes in, we'll delete it from the list then. If it never comes, then we'll delete it at reconnect or when cifsd comes down. Reviewed-by: Pavel Shilovsky Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/transport.c | 43 ++++++++++++++++++++++++++++++++++++------- 1 file changed, 36 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index c1ccca1a933f..9b2d0373a8a7 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -579,8 +579,17 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, goto out; rc = wait_for_response(ses->server, midQ); - if (rc != 0) - goto out; + if (rc != 0) { + spin_lock(&GlobalMid_Lock); + if (midQ->midState == MID_REQUEST_SUBMITTED) { + midQ->callback = DeleteMidQEntry; + spin_unlock(&GlobalMid_Lock); + atomic_dec(&ses->server->inFlight); + wake_up(&ses->server->request_q); + return rc; + } + spin_unlock(&GlobalMid_Lock); + } rc = sync_mid_result(midQ, ses->server); if (rc != 0) { @@ -724,8 +733,18 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses, goto out; rc = wait_for_response(ses->server, midQ); - if (rc != 0) - goto out; + if (rc != 0) { + spin_lock(&GlobalMid_Lock); + if (midQ->midState == MID_REQUEST_SUBMITTED) { + /* no longer considered to be "in-flight" */ + midQ->callback = DeleteMidQEntry; + spin_unlock(&GlobalMid_Lock); + atomic_dec(&ses->server->inFlight); + wake_up(&ses->server->request_q); + return rc; + } + spin_unlock(&GlobalMid_Lock); + } rc = sync_mid_result(midQ, ses->server); if (rc != 0) { @@ -922,10 +941,20 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon, } } - if (wait_for_response(ses->server, midQ) == 0) { - /* We got the response - restart system call. */ - rstart = 1; + rc = wait_for_response(ses->server, midQ); + if (rc) { + spin_lock(&GlobalMid_Lock); + if (midQ->midState == MID_REQUEST_SUBMITTED) { + /* no longer considered to be "in-flight" */ + midQ->callback = DeleteMidQEntry; + spin_unlock(&GlobalMid_Lock); + return rc; + } + spin_unlock(&GlobalMid_Lock); } + + /* We got the response - restart system call. */ + rstart = 1; } rc = sync_mid_result(midQ, ses->server); -- cgit v1.2.2 From 2db7c5815555d8daabf7d4ab1253ce690852c140 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Fri, 28 Jan 2011 07:08:28 -0500 Subject: cifs: send an NT_CANCEL request when a process is signalled Use the new send_nt_cancel function to send an NT_CANCEL when the process is delivered a fatal signal. This is a "best effort" enterprise however, so don't bother to check the return code. There's nothing we can reasonably do if it fails anyway. Reviewed-by: Pavel Shilovsky Reviewed-by: Suresh Jayaraman Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/transport.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 9b2d0373a8a7..bdaa4aa58b03 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -570,20 +570,25 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, #endif mutex_unlock(&ses->server->srv_mutex); - cifs_small_buf_release(in_buf); - if (rc < 0) + if (rc < 0) { + cifs_small_buf_release(in_buf); goto out; + } - if (long_op == CIFS_ASYNC_OP) + if (long_op == CIFS_ASYNC_OP) { + cifs_small_buf_release(in_buf); goto out; + } rc = wait_for_response(ses->server, midQ); if (rc != 0) { + send_nt_cancel(ses->server, in_buf, midQ); spin_lock(&GlobalMid_Lock); if (midQ->midState == MID_REQUEST_SUBMITTED) { midQ->callback = DeleteMidQEntry; spin_unlock(&GlobalMid_Lock); + cifs_small_buf_release(in_buf); atomic_dec(&ses->server->inFlight); wake_up(&ses->server->request_q); return rc; @@ -591,6 +596,8 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, spin_unlock(&GlobalMid_Lock); } + cifs_small_buf_release(in_buf); + rc = sync_mid_result(midQ, ses->server); if (rc != 0) { atomic_dec(&ses->server->inFlight); @@ -734,6 +741,7 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses, rc = wait_for_response(ses->server, midQ); if (rc != 0) { + send_nt_cancel(ses->server, in_buf, midQ); spin_lock(&GlobalMid_Lock); if (midQ->midState == MID_REQUEST_SUBMITTED) { /* no longer considered to be "in-flight" */ @@ -943,6 +951,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon, rc = wait_for_response(ses->server, midQ); if (rc) { + send_nt_cancel(ses->server, in_buf, midQ); spin_lock(&GlobalMid_Lock); if (midQ->midState == MID_REQUEST_SUBMITTED) { /* no longer considered to be "in-flight" */ -- cgit v1.2.2 From 68abaffa6bbd3cadfaa4b7216d10bcd32406090b Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Fri, 28 Jan 2011 15:05:42 -0500 Subject: cifs: simplify SMB header check routine ...just cleanup. There should be no behavior change. Signed-off-by: Jeff Layton Reviewed-by: Pavel Shilovsky Signed-off-by: Steve French --- fs/cifs/misc.c | 46 ++++++++++++++++++++++++---------------------- 1 file changed, 24 insertions(+), 22 deletions(-) (limited to 'fs') diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index a09e077ba925..72e99ece78cf 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c @@ -381,29 +381,31 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ , } static int -checkSMBhdr(struct smb_hdr *smb, __u16 mid) +check_smb_hdr(struct smb_hdr *smb, __u16 mid) { - /* Make sure that this really is an SMB, that it is a response, - and that the message ids match */ - if ((*(__le32 *) smb->Protocol == cpu_to_le32(0x424d53ff)) && - (mid == smb->Mid)) { - if (smb->Flags & SMBFLG_RESPONSE) - return 0; - else { - /* only one valid case where server sends us request */ - if (smb->Command == SMB_COM_LOCKING_ANDX) - return 0; - else - cERROR(1, "Received Request not response"); - } - } else { /* bad signature or mid */ - if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff)) - cERROR(1, "Bad protocol string signature header %x", - *(unsigned int *) smb->Protocol); - if (mid != smb->Mid) - cERROR(1, "Mids do not match"); + /* does it have the right SMB "signature" ? */ + if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff)) { + cERROR(1, "Bad protocol string signature header 0x%x", + *(unsigned int *)smb->Protocol); + return 1; } - cERROR(1, "bad smb detected. The Mid=%d", smb->Mid); + + /* Make sure that message ids match */ + if (mid != smb->Mid) { + cERROR(1, "Mids do not match. received=%u expected=%u", + smb->Mid, mid); + return 1; + } + + /* if it's a response then accept */ + if (smb->Flags & SMBFLG_RESPONSE) + return 0; + + /* only one valid case where server sends us request */ + if (smb->Command == SMB_COM_LOCKING_ANDX) + return 0; + + cERROR(1, "Server sent request, not response. mid=%u", smb->Mid); return 1; } @@ -448,7 +450,7 @@ checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length) return 1; } - if (checkSMBhdr(smb, mid)) + if (check_smb_hdr(smb, mid)) return 1; clc_len = smbCalcSize_LE(smb); -- cgit v1.2.2 From d804d41d163c0975d2890c82d7135ada7a2f23a4 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Fri, 28 Jan 2011 15:05:43 -0500 Subject: cifs: don't pop a printk when sending on a socket is interrupted If we kill the process while it's sending on a socket then the kernel_sendmsg will return -EINTR. This is normal. No need to spam the ring buffer with this info. Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/transport.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index bdaa4aa58b03..b8c5e2eb43d0 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -236,9 +236,9 @@ smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec) server->tcpStatus = CifsNeedReconnect; } - if (rc < 0) { + if (rc < 0 && rc != -EINTR) cERROR(1, "Error %d sending data on socket to server", rc); - } else + else rc = 0; /* Don't want to modify the buffer as a -- cgit v1.2.2 From 92a4e0f0169498867ecb19c2244510dd4beba149 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Sat, 29 Jan 2011 07:02:28 -0500 Subject: cifs: force a reconnect if there are too many MIDs in flight Currently, we allow the pending_mid_q to grow without bound with SIGKILL'ed processes. This could eventually be a DoS'able problem. An unprivileged user could a process that does a long-running call and then SIGKILL it. If he can also intercept the NT_CANCEL calls or the replies from the server, then the pending_mid_q could grow very large, possibly even to 2^16 entries which might leave GetNextMid in an infinite loop. Fix this by imposing a hard limit of 32k calls per server. If we cross that limit, set the tcpStatus to CifsNeedReconnect to force cifsd to eventually reconnect the socket and clean out the pending_mid_q. While we're at it, clean up the function a bit and eliminate an unnecessary NULL pointer check. Signed-off-by: Jeff Layton Reviewed-by: Shirish Pargaonkar Signed-off-by: Steve French --- fs/cifs/misc.c | 37 ++++++++++++++++++++++++------------- 1 file changed, 24 insertions(+), 13 deletions(-) (limited to 'fs') diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index 72e99ece78cf..24f0a9d97ad8 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c @@ -236,10 +236,7 @@ __u16 GetNextMid(struct TCP_Server_Info *server) { __u16 mid = 0; __u16 last_mid; - int collision; - - if (server == NULL) - return mid; + bool collision; spin_lock(&GlobalMid_Lock); last_mid = server->CurrentMid; /* we do not want to loop forever */ @@ -252,24 +249,38 @@ __u16 GetNextMid(struct TCP_Server_Info *server) (and it would also have to have been a request that did not time out) */ while (server->CurrentMid != last_mid) { - struct list_head *tmp; struct mid_q_entry *mid_entry; + unsigned int num_mids; - collision = 0; + collision = false; if (server->CurrentMid == 0) server->CurrentMid++; - list_for_each(tmp, &server->pending_mid_q) { - mid_entry = list_entry(tmp, struct mid_q_entry, qhead); - - if ((mid_entry->mid == server->CurrentMid) && - (mid_entry->midState == MID_REQUEST_SUBMITTED)) { + num_mids = 0; + list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) { + ++num_mids; + if (mid_entry->mid == server->CurrentMid && + mid_entry->midState == MID_REQUEST_SUBMITTED) { /* This mid is in use, try a different one */ - collision = 1; + collision = true; break; } } - if (collision == 0) { + + /* + * if we have more than 32k mids in the list, then something + * is very wrong. Possibly a local user is trying to DoS the + * box by issuing long-running calls and SIGKILL'ing them. If + * we get to 2^16 mids then we're in big trouble as this + * function could loop forever. + * + * Go ahead and assign out the mid in this situation, but force + * an eventual reconnect to clean out the pending_mid_q. + */ + if (num_mids > 32768) + server->tcpStatus = CifsNeedReconnect; + + if (!collision) { mid = server->CurrentMid; break; } -- cgit v1.2.2 From f855f6cbeb4f94cd4e4a225c2246ee8012c384a2 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Mon, 31 Jan 2011 08:41:36 -0500 Subject: cifs: make CIFS depend on CRYPTO_MD4 Recently CIFS was changed to use the kernel crypto API for MD4 hashes, but the Kconfig dependencies were not changed to reflect this. Signed-off-by: Jeff Layton Reported-and-Tested-by: Suresh Jayaraman Reviewed-by: Shirish Pargaonkar Signed-off-by: Steve French --- fs/cifs/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig index ee45648b0d1a..7cb0f7f847e4 100644 --- a/fs/cifs/Kconfig +++ b/fs/cifs/Kconfig @@ -3,6 +3,7 @@ config CIFS depends on INET select NLS select CRYPTO + select CRYPTO_MD4 select CRYPTO_MD5 select CRYPTO_HMAC select CRYPTO_ARC4 -- cgit v1.2.2 From 31c2659d78c8be970833bc1e633593d291553ed3 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Mon, 31 Jan 2011 07:24:46 -0500 Subject: cifs: clean up some compiler warnings New compiler warnings that I noticed when building a patchset based on recent Fedora kernel: fs/cifs/cifssmb.c: In function 'CIFSSMBSetFileSize': fs/cifs/cifssmb.c:4813:8: warning: variable 'data_offset' set but not used [-Wunused-but-set-variable] fs/cifs/file.c: In function 'cifs_open': fs/cifs/file.c:349:24: warning: variable 'pCifsInode' set but not used [-Wunused-but-set-variable] fs/cifs/file.c: In function 'cifs_partialpagewrite': fs/cifs/file.c:1149:23: warning: variable 'cifs_sb' set but not used [-Wunused-but-set-variable] fs/cifs/file.c: In function 'cifs_iovec_write': fs/cifs/file.c:1740:9: warning: passing argument 6 of 'CIFSSMBWrite2' from incompatible pointer type [enabled by default] fs/cifs/cifsproto.h:337:12: note: expected 'unsigned int *' but argument is of type 'size_t *' fs/cifs/readdir.c: In function 'cifs_readdir': fs/cifs/readdir.c:767:23: warning: variable 'cifs_sb' set but not used [-Wunused-but-set-variable] fs/cifs/cifs_dfs_ref.c: In function 'cifs_dfs_d_automount': fs/cifs/cifs_dfs_ref.c:342:2: warning: 'rc' may be used uninitialized in this function [-Wuninitialized] fs/cifs/cifs_dfs_ref.c:278:6: note: 'rc' was declared here Signed-off-by: Jeff Layton Reviewed-by: Pavel Shilovsky Signed-off-by: Steve French --- fs/cifs/cifs_dfs_ref.c | 9 ++++----- fs/cifs/cifssmb.c | 3 --- fs/cifs/file.c | 8 ++------ fs/cifs/readdir.c | 3 --- 4 files changed, 6 insertions(+), 17 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c index f1c68629f277..0a265ad9e426 100644 --- a/fs/cifs/cifs_dfs_ref.c +++ b/fs/cifs/cifs_dfs_ref.c @@ -282,8 +282,6 @@ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt) cFYI(1, "in %s", __func__); BUG_ON(IS_ROOT(mntpt)); - xid = GetXid(); - /* * The MSDFS spec states that paths in DFS referral requests and * responses must be prefixed by a single '\' character instead of @@ -293,7 +291,7 @@ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt) mnt = ERR_PTR(-ENOMEM); full_path = build_path_from_dentry(mntpt); if (full_path == NULL) - goto free_xid; + goto cdda_exit; cifs_sb = CIFS_SB(mntpt->d_inode->i_sb); tlink = cifs_sb_tlink(cifs_sb); @@ -303,9 +301,11 @@ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt) } ses = tlink_tcon(tlink)->ses; + xid = GetXid(); rc = get_dfs_path(xid, ses, full_path + 1, cifs_sb->local_nls, &num_referrals, &referrals, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); + FreeXid(xid); cifs_put_tlink(tlink); @@ -338,8 +338,7 @@ success: free_dfs_info_array(referrals, num_referrals); free_full_path: kfree(full_path); -free_xid: - FreeXid(xid); +cdda_exit: cFYI(1, "leaving %s" , __func__); return mnt; } diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 3106f5e5c633..46c66ed01af4 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -4914,7 +4914,6 @@ CIFSSMBSetFileSize(const int xid, struct cifsTconInfo *tcon, __u64 size, __u16 fid, __u32 pid_of_opener, bool SetAllocation) { struct smb_com_transaction2_sfi_req *pSMB = NULL; - char *data_offset; struct file_end_of_file_info *parm_data; int rc = 0; __u16 params, param_offset, offset, byte_count, count; @@ -4938,8 +4937,6 @@ CIFSSMBSetFileSize(const int xid, struct cifsTconInfo *tcon, __u64 size, param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4; offset = param_offset + params; - data_offset = (char *) (&pSMB->hdr.Protocol) + offset; - count = sizeof(struct file_end_of_file_info); pSMB->MaxParameterCount = cpu_to_le16(2); /* BB find exact max SMB PDU from sess structure BB */ diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 0de17c1db608..74c0a282d012 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -346,7 +346,6 @@ int cifs_open(struct inode *inode, struct file *file) struct cifsTconInfo *tcon; struct tcon_link *tlink; struct cifsFileInfo *pCifsFile = NULL; - struct cifsInodeInfo *pCifsInode; char *full_path = NULL; bool posix_open_ok = false; __u16 netfid; @@ -361,8 +360,6 @@ int cifs_open(struct inode *inode, struct file *file) } tcon = tlink_tcon(tlink); - pCifsInode = CIFS_I(file->f_path.dentry->d_inode); - full_path = build_path_from_dentry(file->f_path.dentry); if (full_path == NULL) { rc = -ENOMEM; @@ -1146,7 +1143,6 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to) char *write_data; int rc = -EFAULT; int bytes_written = 0; - struct cifs_sb_info *cifs_sb; struct inode *inode; struct cifsFileInfo *open_file; @@ -1154,7 +1150,6 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to) return -EFAULT; inode = page->mapping->host; - cifs_sb = CIFS_SB(inode->i_sb); offset += (loff_t)from; write_data = kmap(page); @@ -1667,7 +1662,8 @@ static ssize_t cifs_iovec_write(struct file *file, const struct iovec *iov, unsigned long nr_segs, loff_t *poffset) { - size_t total_written = 0, written = 0; + size_t total_written = 0; + unsigned int written = 0; unsigned long num_pages, npages; size_t copied, len, cur_len, i; struct kvec *to_send; diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index 7f25cc3d2256..f8e4cd2a7912 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c @@ -764,7 +764,6 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir) { int rc = 0; int xid, i; - struct cifs_sb_info *cifs_sb; struct cifsTconInfo *pTcon; struct cifsFileInfo *cifsFile = NULL; char *current_entry; @@ -775,8 +774,6 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir) xid = GetXid(); - cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); - /* * Ensure FindFirst doesn't fail before doing filldir() for '.' and * '..'. Otherwise we won't be able to notify VFS in case of failure. -- cgit v1.2.2 From 7a8587e7c8e4e32ba778bfbbb822a0a7e8d5f3e3 Mon Sep 17 00:00:00 2001 From: Shirish Pargaonkar Date: Sat, 29 Jan 2011 13:54:58 -0600 Subject: cifs: No need to check crypto blockcipher allocation Missed one change as per earlier suggestion. Signed-off-by: Shirish Pargaonkar Reviewed-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/cifsencrypt.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c index 0db5f1de0227..a51585f9852b 100644 --- a/fs/cifs/cifsencrypt.c +++ b/fs/cifs/cifsencrypt.c @@ -657,9 +657,10 @@ calc_seckey(struct cifsSesInfo *ses) get_random_bytes(sec_key, CIFS_SESS_KEY_SIZE); tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); - if (!tfm_arc4 || IS_ERR(tfm_arc4)) { + if (IS_ERR(tfm_arc4)) { + rc = PTR_ERR(tfm_arc4); cERROR(1, "could not allocate crypto API arc4\n"); - return PTR_ERR(tfm_arc4); + return rc; } desc.tfm = tfm_arc4; -- cgit v1.2.2 From b1953bcec95c189b1eea690a08e89646d7750bda Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Fri, 21 Jan 2011 21:10:01 +0000 Subject: Btrfs: make shrink_delalloc a little friendlier Xfstests 224 will just sit there and spin for ever until eventually we give up flushing delalloc and exit. On my box this took several hours. I could not interrupt this process either, even though we use INTERRUPTIBLE. So do 2 things 1) Keep us from looping over and over again without reclaiming anything 2) If we get interrupted exit the loop I tested this and the test now exits in a reasonable amount of time, and can be interrupted with ctrl+c. Thanks, Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index ff6bbfd75cf7..f96641a93fc9 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3345,8 +3345,10 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, u64 reserved; u64 max_reclaim; u64 reclaimed = 0; + long time_left; int pause = 1; int nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT; + int loops = 0; block_rsv = &root->fs_info->delalloc_block_rsv; space_info = block_rsv->space_info; @@ -3359,7 +3361,7 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, max_reclaim = min(reserved, to_reclaim); - while (1) { + while (loops < 1024) { /* have the flusher threads jump in and do some IO */ smp_mb(); nr_pages = min_t(unsigned long, nr_pages, @@ -3367,8 +3369,12 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages); spin_lock(&space_info->lock); - if (reserved > space_info->bytes_reserved) + if (reserved > space_info->bytes_reserved) { + loops = 0; reclaimed += reserved - space_info->bytes_reserved; + } else { + loops++; + } reserved = space_info->bytes_reserved; spin_unlock(&space_info->lock); @@ -3379,7 +3385,12 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, return -EAGAIN; __set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(pause); + time_left = schedule_timeout(pause); + + /* We were interrupted, exit */ + if (time_left) + break; + pause <<= 1; if (pause > HZ / 10) pause = HZ / 10; -- cgit v1.2.2 From b31eabd86eb68d3c217e6821078249bc045e698a Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Mon, 31 Jan 2011 16:48:24 -0500 Subject: Btrfs: catch errors from btrfs_sync_log btrfs_sync_log returns -EAGAIN when we need full transaction commits instead of small log commits, but sometimes we were dropping the return value. In practice, we check for this a few different ways, but this is still a bug that can leave off full log commits when we really need them. Signed-off-by: Chris Mason --- fs/btrfs/tree-log.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index c25a41d86118..42dfc3077040 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -2051,6 +2051,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, wait_log_commit(trans, log_root_tree, log_root_tree->log_transid); mutex_unlock(&log_root_tree->log_mutex); + ret = 0; goto out; } atomic_set(&log_root_tree->log_commit[index2], 1); @@ -2115,7 +2116,7 @@ out: smp_mb(); if (waitqueue_active(&root->log_commit_wait[index1])) wake_up(&root->log_commit_wait[index1]); - return 0; + return ret; } static void free_log_tree(struct btrfs_trans_handle *trans, -- cgit v1.2.2 From cab6958da0094e36a098751f844409fc9ee26251 Mon Sep 17 00:00:00 2001 From: Steve French Date: Mon, 31 Jan 2011 21:56:35 +0000 Subject: [CIFS] Update cifs minor version Signed-off-by: Steve French --- fs/cifs/cifsfs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index 14789a97304e..4a3330235d55 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -127,5 +127,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); extern const struct export_operations cifs_export_ops; #endif /* EXPERIMENTAL */ -#define CIFS_VERSION "1.69" +#define CIFS_VERSION "1.70" #endif /* _CIFSFS_H */ -- cgit v1.2.2 From 6284644e8de1f4005166c918c3d2aa4c510ab9f6 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Mon, 31 Jan 2011 09:14:17 -0500 Subject: cifs: fix length checks in checkSMB The cERROR message in checkSMB when the calculated length doesn't match the RFC1001 length is incorrect in many cases. It always says that the RFC1001 length is bigger than the SMB, even when it's actually the reverse. Fix the error message to say the reverse of what it does now when the SMB length goes beyond the end of the received data. Also, clarify the error message when the RFC length is too big. Finally, clarify the comments to show that the 512 byte limit on extra data at the end of the packet is arbitrary. Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/misc.c | 33 +++++++++++++++++---------------- 1 file changed, 17 insertions(+), 16 deletions(-) (limited to 'fs') diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index 24f0a9d97ad8..2a930a752a78 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c @@ -478,25 +478,26 @@ checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length) if (((4 + len) & 0xFFFF) == (clc_len & 0xFFFF)) return 0; /* bcc wrapped */ } - cFYI(1, "Calculated size %d vs length %d mismatch for mid %d", + cFYI(1, "Calculated size %u vs length %u mismatch for mid=%u", clc_len, 4 + len, smb->Mid); - /* Windows XP can return a few bytes too much, presumably - an illegal pad, at the end of byte range lock responses - so we allow for that three byte pad, as long as actual - received length is as long or longer than calculated length */ - /* We have now had to extend this more, since there is a - case in which it needs to be bigger still to handle a - malformed response to transact2 findfirst from WinXP when - access denied is returned and thus bcc and wct are zero - but server says length is 0x21 bytes too long as if the server - forget to reset the smb rfc1001 length when it reset the - wct and bcc to minimum size and drop the t2 parms and data */ - if ((4+len > clc_len) && (len <= clc_len + 512)) - return 0; - else { - cERROR(1, "RFC1001 size %d bigger than SMB for Mid=%d", + + if (4 + len < clc_len) { + cERROR(1, "RFC1001 size %u smaller than SMB for mid=%u", len, smb->Mid); return 1; + } else if (len > clc_len + 512) { + /* + * Some servers (Windows XP in particular) send more + * data than the lengths in the SMB packet would + * indicate on certain calls (byte range locks and + * trans2 find first calls in particular). While the + * client can handle such a frame by ignoring the + * trailing data, we choose limit the amount of extra + * data to 512 bytes. + */ + cERROR(1, "RFC1001 size %u more than 512 bytes larger " + "than SMB for mid=%u", len, smb->Mid); + return 1; } } return 0; -- cgit v1.2.2 From c87fb6fdcaf7560940b31a0c78c3e6370e3433cf Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Mon, 31 Jan 2011 19:54:59 -0500 Subject: Btrfs: avoid uninit variable warnings in ordered-data.c This one isn't really an uninit variable, but for pretty obscure reasons. Let's make it clearly correct. Signed-off-by: Chris Mason --- fs/btrfs/ordered-data.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 2b61e1ddcd99..083a55477375 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -141,7 +141,7 @@ static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree, u64 file_offset) { struct rb_root *root = &tree->tree; - struct rb_node *prev; + struct rb_node *prev = NULL; struct rb_node *ret; struct btrfs_ordered_extent *entry; -- cgit v1.2.2 From 5df67083488ccbad925f583b698ab38f8629a016 Mon Sep 17 00:00:00 2001 From: Tsutomu Itoh Date: Tue, 1 Feb 2011 09:17:35 +0000 Subject: btrfs: checking NULL or not in some functions Because NULL is returned when the memory allocation fails, it is checked whether it is NULL. Signed-off-by: Tsutomu Itoh Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 2 ++ fs/btrfs/extent_io.c | 2 ++ fs/btrfs/tree-log.c | 6 ++++++ 3 files changed, 10 insertions(+) (limited to 'fs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index f96641a93fc9..9de4ff03882a 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -6496,6 +6496,8 @@ static noinline int relocate_inode_pages(struct inode *inode, u64 start, int ret = 0; ra = kzalloc(sizeof(*ra), GFP_NOFS); + if (!ra) + return -ENOMEM; mutex_lock(&inode->i_mutex); first_index = start >> PAGE_CACHE_SHIFT; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 6411ed6ca449..8862dda46ff6 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1920,6 +1920,8 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree, nr = bio_get_nr_vecs(bdev); bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH); + if (!bio) + return -ENOMEM; bio_add_page(bio, page, page_size, offset); bio->bi_end_io = end_io_func; diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 42dfc3077040..6d66e5caff97 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -2751,7 +2751,13 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, log = root->log_root; path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; dst_path = btrfs_alloc_path(); + if (!dst_path) { + btrfs_free_path(path); + return -ENOMEM; + } min_key.objectid = inode->i_ino; min_key.type = BTRFS_INODE_ITEM_KEY; -- cgit v1.2.2 From 98d5dc13e7e74b77ca3b4c3cbded9f48d2dbbbb7 Mon Sep 17 00:00:00 2001 From: Tsutomu Itoh Date: Thu, 20 Jan 2011 06:19:37 +0000 Subject: btrfs: fix return value check of btrfs_start_transaction() The error check of btrfs_start_transaction() is added, and the mistake of the error check on several places is corrected. Signed-off-by: Tsutomu Itoh Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 7 +++++-- fs/btrfs/inode.c | 1 + fs/btrfs/ioctl.c | 10 ++++++++-- fs/btrfs/relocation.c | 3 +++ fs/btrfs/super.c | 2 ++ fs/btrfs/tree-log.c | 1 + fs/btrfs/volumes.c | 19 +++++++++++++++++-- 7 files changed, 37 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 9de4ff03882a..f07ba21cbf06 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -6271,6 +6271,8 @@ int btrfs_drop_snapshot(struct btrfs_root *root, BUG_ON(!wc); trans = btrfs_start_transaction(tree_root, 0); + BUG_ON(IS_ERR(trans)); + if (block_rsv) trans->block_rsv = block_rsv; @@ -6368,6 +6370,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root, btrfs_end_transaction_throttle(trans, tree_root); trans = btrfs_start_transaction(tree_root, 0); + BUG_ON(IS_ERR(trans)); if (block_rsv) trans->block_rsv = block_rsv; } @@ -7587,7 +7590,7 @@ int btrfs_cleanup_reloc_trees(struct btrfs_root *root) if (found) { trans = btrfs_start_transaction(root, 1); - BUG_ON(!trans); + BUG_ON(IS_ERR(trans)); ret = btrfs_commit_transaction(trans, root); BUG_ON(ret); } @@ -7831,7 +7834,7 @@ static noinline int relocate_one_extent(struct btrfs_root *extent_root, trans = btrfs_start_transaction(extent_root, 1); - BUG_ON(!trans); + BUG_ON(IS_ERR(trans)); if (extent_key->objectid == 0) { ret = del_extent_zero(trans, extent_root, path, extent_key); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 5621818921f8..36bc3f49ebf9 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2357,6 +2357,7 @@ void btrfs_orphan_cleanup(struct btrfs_root *root) */ if (is_bad_inode(inode)) { trans = btrfs_start_transaction(root, 0); + BUG_ON(IS_ERR(trans)); btrfs_orphan_del(trans, inode); btrfs_end_transaction(trans, root); iput(inode); diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 12dabe28cf54..02d224e8c83f 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -907,6 +907,10 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root, if (new_size > old_size) { trans = btrfs_start_transaction(root, 0); + if (IS_ERR(trans)) { + ret = PTR_ERR(trans); + goto out_unlock; + } ret = btrfs_grow_device(trans, device, new_size); btrfs_commit_transaction(trans, root); } else { @@ -2141,9 +2145,9 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp) path->leave_spinning = 1; trans = btrfs_start_transaction(root, 1); - if (!trans) { + if (IS_ERR(trans)) { btrfs_free_path(path); - return -ENOMEM; + return PTR_ERR(trans); } dir_id = btrfs_super_root_dir(&root->fs_info->super_copy); @@ -2337,6 +2341,8 @@ static noinline long btrfs_ioctl_start_sync(struct file *file, void __user *argp u64 transid; trans = btrfs_start_transaction(root, 0); + if (IS_ERR(trans)) + return PTR_ERR(trans); transid = trans->transid; btrfs_commit_transaction_async(trans, root, 0); diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index ea9965430241..1f5556acb530 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -2028,6 +2028,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc, while (1) { trans = btrfs_start_transaction(root, 0); + BUG_ON(IS_ERR(trans)); trans->block_rsv = rc->block_rsv; ret = btrfs_block_rsv_check(trans, root, rc->block_rsv, @@ -3665,6 +3666,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc) while (1) { trans = btrfs_start_transaction(rc->extent_root, 0); + BUG_ON(IS_ERR(trans)); if (update_backref_cache(trans, &rc->backref_cache)) { btrfs_end_transaction(trans, rc->extent_root); @@ -4033,6 +4035,7 @@ static noinline_for_stack int mark_garbage_root(struct btrfs_root *root) int ret; trans = btrfs_start_transaction(root->fs_info->tree_root, 0); + BUG_ON(IS_ERR(trans)); memset(&root->root_item.drop_progress, 0, sizeof(root->root_item.drop_progress)); diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index f4e45fdded30..0209b5fc772c 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -623,6 +623,8 @@ int btrfs_sync_fs(struct super_block *sb, int wait) btrfs_wait_ordered_extents(root, 0, 0); trans = btrfs_start_transaction(root, 0); + if (IS_ERR(trans)) + return PTR_ERR(trans); ret = btrfs_commit_transaction(trans, root); return ret; } diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 6d66e5caff97..a4bbb854dfd2 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -3112,6 +3112,7 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree) BUG_ON(!path); trans = btrfs_start_transaction(fs_info->tree_root, 0); + BUG_ON(IS_ERR(trans)); wc.trans = trans; wc.pin = 1; diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index f2d2f4ccc738..7cad59353b09 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1212,6 +1212,10 @@ static int btrfs_rm_dev_item(struct btrfs_root *root, return -ENOMEM; trans = btrfs_start_transaction(root, 0); + if (IS_ERR(trans)) { + btrfs_free_path(path); + return PTR_ERR(trans); + } key.objectid = BTRFS_DEV_ITEMS_OBJECTID; key.type = BTRFS_DEV_ITEM_KEY; key.offset = device->devid; @@ -1604,6 +1608,12 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) } trans = btrfs_start_transaction(root, 0); + if (IS_ERR(trans)) { + kfree(device); + ret = PTR_ERR(trans); + goto error; + } + lock_chunks(root); device->barriers = 1; @@ -1872,7 +1882,7 @@ static int btrfs_relocate_chunk(struct btrfs_root *root, return ret; trans = btrfs_start_transaction(root, 0); - BUG_ON(!trans); + BUG_ON(IS_ERR(trans)); lock_chunks(root); @@ -2046,7 +2056,7 @@ int btrfs_balance(struct btrfs_root *dev_root) BUG_ON(ret); trans = btrfs_start_transaction(dev_root, 0); - BUG_ON(!trans); + BUG_ON(IS_ERR(trans)); ret = btrfs_grow_device(trans, device, old_size); BUG_ON(ret); @@ -2212,6 +2222,11 @@ again: /* Shrinking succeeded, else we would be at "done". */ trans = btrfs_start_transaction(root, 0); + if (IS_ERR(trans)) { + ret = PTR_ERR(trans); + goto done; + } + lock_chunks(root); device->disk_total_bytes = new_size; -- cgit v1.2.2 From 9587fcff42f5bece3c0a44066b079235ee73cbb3 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Tue, 1 Feb 2011 08:40:43 -0500 Subject: cifs: fix length vs. total_read confusion in cifs_demultiplex_thread length at this point is the length returned by the last kernel_recvmsg call. total_read is the length of all of the data read so far. length is more or less meaningless at this point, so use total_read for everything. Signed-off-by: Jeff Layton Reviewed-by: Pavel Shilovsky Signed-off-by: Steve French --- fs/cifs/connect.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 47d8ff623683..945b2202275f 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -578,12 +578,12 @@ incomplete_rcv: else if (reconnect == 1) continue; - length += 4; /* account for rfc1002 hdr */ + total_read += 4; /* account for rfc1002 hdr */ - - dump_smb(smb_buffer, length); - if (checkSMB(smb_buffer, smb_buffer->Mid, total_read+4)) { - cifs_dump_mem("Bad SMB: ", smb_buffer, 48); + dump_smb(smb_buffer, total_read); + if (checkSMB(smb_buffer, smb_buffer->Mid, total_read)) { + cifs_dump_mem("Bad SMB: ", smb_buffer, + total_read < 48 ? total_read : 48); continue; } -- cgit v1.2.2 From 0781b909b5586f4db720b5d1838b78f9d8e42f14 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 1 Feb 2011 15:52:35 -0800 Subject: epoll: epoll_wait() should not use timespec_add_ns() commit 95aac7b1cd224f ("epoll: make epoll_wait() use the hrtimer range feature") added a performance regression because it uses timespec_add_ns() with potential very large 'ns' values. [akpm@linux-foundation.org: s/epoll_set_mstimeout/ep_set_mstimeout/, per Davide] Reported-by: Simon Kirby Signed-off-by: Eric Dumazet Cc: Shawn Bohrer Acked-by: Davide Libenzi Cc: [2.6.37.x] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/eventpoll.c | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/eventpoll.c b/fs/eventpoll.c index cc8a9b7d6064..267d0ada4541 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -1114,6 +1114,17 @@ static int ep_send_events(struct eventpoll *ep, return ep_scan_ready_list(ep, ep_send_events_proc, &esed); } +static inline struct timespec ep_set_mstimeout(long ms) +{ + struct timespec now, ts = { + .tv_sec = ms / MSEC_PER_SEC, + .tv_nsec = NSEC_PER_MSEC * (ms % MSEC_PER_SEC), + }; + + ktime_get_ts(&now); + return timespec_add_safe(now, ts); +} + static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, int maxevents, long timeout) { @@ -1121,12 +1132,11 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, unsigned long flags; long slack; wait_queue_t wait; - struct timespec end_time; ktime_t expires, *to = NULL; if (timeout > 0) { - ktime_get_ts(&end_time); - timespec_add_ns(&end_time, (u64)timeout * NSEC_PER_MSEC); + struct timespec end_time = ep_set_mstimeout(timeout); + slack = select_estimate_accuracy(&end_time); to = &expires; *to = timespec_to_ktime(end_time); -- cgit v1.2.2 From 3cd90ea42f2c15f928b70ed66f6d8ed0a8e7aadd Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 1 Feb 2011 15:52:46 -0800 Subject: vfs: sparse: add __FMODE_EXEC FMODE_EXEC is a constant type of fmode_t but was used with normal integer constants. This results in following warnings from sparse. Fix it using new macro __FMODE_EXEC. fs/exec.c:116:58: warning: restricted fmode_t degrades to integer fs/exec.c:689:58: warning: restricted fmode_t degrades to integer fs/fcntl.c:777:9: warning: restricted fmode_t degrades to integer Signed-off-by: Namhyung Kim Cc: Al Viro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/exec.c | 4 ++-- fs/fcntl.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/exec.c b/fs/exec.c index c62efcb959c7..52a447d9b6ab 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -120,7 +120,7 @@ SYSCALL_DEFINE1(uselib, const char __user *, library) goto out; file = do_filp_open(AT_FDCWD, tmp, - O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0, + O_LARGEFILE | O_RDONLY | __FMODE_EXEC, 0, MAY_READ | MAY_EXEC | MAY_OPEN); putname(tmp); error = PTR_ERR(file); @@ -723,7 +723,7 @@ struct file *open_exec(const char *name) int err; file = do_filp_open(AT_FDCWD, name, - O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0, + O_LARGEFILE | O_RDONLY | __FMODE_EXEC, 0, MAY_EXEC | MAY_OPEN); if (IS_ERR(file)) goto out; diff --git a/fs/fcntl.c b/fs/fcntl.c index ecc8b3954ed6..cb1026181bdc 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c @@ -815,7 +815,7 @@ static int __init fcntl_init(void) __O_SYNC | O_DSYNC | FASYNC | O_DIRECT | O_LARGEFILE | O_DIRECTORY | O_NOFOLLOW | O_NOATIME | O_CLOEXEC | - FMODE_EXEC + __FMODE_EXEC )); fasync_cache = kmem_cache_create("fasync_cache", -- cgit v1.2.2 From d54cdc8ca7aabc69e145a62155855db42b04ed0b Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 1 Feb 2011 15:52:47 -0800 Subject: fs: make block fiemap mapping length at least blocksize long Some filesystems don't deal well with being asked to map less than blocksize blocks (GFS2 for example). Since we are always mapping at least blocksize sections anyway, just make sure len is at least as big as a blocksize so we don't trip up any filesystems. Thanks, Signed-off-by: Josef Bacik Cc: Steven Whitehouse Cc: Christoph Hellwig Cc: Al Viro Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/ioctl.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'fs') diff --git a/fs/ioctl.c b/fs/ioctl.c index a59635e295fa..1eebeb72b202 100644 --- a/fs/ioctl.c +++ b/fs/ioctl.c @@ -273,6 +273,13 @@ int __generic_block_fiemap(struct inode *inode, len = isize; } + /* + * Some filesystems can't deal with being asked to map less than + * blocksize, so make sure our len is at least block length. + */ + if (logical_to_blk(inode, len) == 0) + len = blk_to_logical(inode, 1); + start_blk = logical_to_blk(inode, start); last_blk = logical_to_blk(inode, start + len - 1); -- cgit v1.2.2 From 0b0abeaf3d30cec03ac6497fe978b8f7edecc5ae Mon Sep 17 00:00:00 2001 From: Boaz Harrosh Date: Wed, 2 Feb 2011 21:02:12 +0200 Subject: Revert "exofs: Set i_mapping->backing_dev_info anyway" This reverts commit 115e19c53501edc11f730191f7f047736815ae3d. Apparently setting inode->bdi to one's own sb->s_bdi stops VFS from sending *read-aheads*. This problem was bisected to this commit. A revert fixes it. I'll investigate farther why is this happening for the next Kernel, but for now a revert. I'm sending to stable@kernel.org as well, since it exists also in 2.6.37. 2.6.36 is good and does not have this patch. CC: Stable Tree Signed-off-by: Boaz Harrosh Signed-off-by: Linus Torvalds --- fs/exofs/inode.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'fs') diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c index 42685424817b..a7555238c41a 100644 --- a/fs/exofs/inode.c +++ b/fs/exofs/inode.c @@ -1030,7 +1030,6 @@ struct inode *exofs_iget(struct super_block *sb, unsigned long ino) memcpy(oi->i_data, fcb.i_data, sizeof(fcb.i_data)); } - inode->i_mapping->backing_dev_info = sb->s_bdi; if (S_ISREG(inode->i_mode)) { inode->i_op = &exofs_file_inode_operations; inode->i_fop = &exofs_file_operations; @@ -1131,7 +1130,6 @@ struct inode *exofs_new_inode(struct inode *dir, int mode) sbi = sb->s_fs_info; - inode->i_mapping->backing_dev_info = sb->s_bdi; sb->s_dirt = 1; inode_init_owner(inode, dir, mode); inode->i_ino = sbi->s_nextid++; -- cgit v1.2.2 From 8f1f745331c1b560f53c0d60e55a4f4f43f7cce5 Mon Sep 17 00:00:00 2001 From: Eric Sandeen Date: Thu, 3 Feb 2011 14:33:15 -0500 Subject: ext4: fix panic on module unload when stopping lazyinit thread https://bugzilla.kernel.org/show_bug.cgi?id=27652 If the lazyinit thread is running, the teardown function ext4_destroy_lazyinit_thread() has problems: ext4_clear_request_list(); while (ext4_li_info->li_task) { wake_up(&ext4_li_info->li_wait_daemon); wait_event(ext4_li_info->li_wait_task, ext4_li_info->li_task == NULL); } Clearing the request list will cause the thread to exit and free ext4_li_info, so then we're waiting on something which is getting freed. Fix this up by making the thread respond to kthread_stop, and exit, without the need to wait for that exit in some other homegrown way. Cc: stable@kernel.org Reported-and-Tested-by: Tao Ma Signed-off-by: Eric Sandeen Signed-off-by: "Theodore Ts'o" --- fs/ext4/super.c | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) (limited to 'fs') diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 48ce561fafac..3d8cf2cab379 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -77,6 +77,7 @@ static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data); static void ext4_destroy_lazyinit_thread(void); static void ext4_unregister_li_request(struct super_block *sb); +static void ext4_clear_request_list(void); #if !defined(CONFIG_EXT3_FS) && !defined(CONFIG_EXT3_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23) static struct file_system_type ext3_fs_type = { @@ -2716,6 +2717,8 @@ static void ext4_unregister_li_request(struct super_block *sb) mutex_unlock(&ext4_li_info->li_list_mtx); } +static struct task_struct *ext4_lazyinit_task; + /* * This is the function where ext4lazyinit thread lives. It walks * through the request list searching for next scheduled filesystem. @@ -2784,6 +2787,10 @@ cont_thread: if (time_before(jiffies, next_wakeup)) schedule(); finish_wait(&eli->li_wait_daemon, &wait); + if (kthread_should_stop()) { + ext4_clear_request_list(); + goto exit_thread; + } } exit_thread: @@ -2808,6 +2815,7 @@ exit_thread: wake_up(&eli->li_wait_task); kfree(ext4_li_info); + ext4_lazyinit_task = NULL; ext4_li_info = NULL; mutex_unlock(&ext4_li_mtx); @@ -2830,11 +2838,10 @@ static void ext4_clear_request_list(void) static int ext4_run_lazyinit_thread(void) { - struct task_struct *t; - - t = kthread_run(ext4_lazyinit_thread, ext4_li_info, "ext4lazyinit"); - if (IS_ERR(t)) { - int err = PTR_ERR(t); + ext4_lazyinit_task = kthread_run(ext4_lazyinit_thread, + ext4_li_info, "ext4lazyinit"); + if (IS_ERR(ext4_lazyinit_task)) { + int err = PTR_ERR(ext4_lazyinit_task); ext4_clear_request_list(); del_timer_sync(&ext4_li_info->li_timer); kfree(ext4_li_info); @@ -2985,16 +2992,10 @@ static void ext4_destroy_lazyinit_thread(void) * If thread exited earlier * there's nothing to be done. */ - if (!ext4_li_info) + if (!ext4_li_info || !ext4_lazyinit_task) return; - ext4_clear_request_list(); - - while (ext4_li_info->li_task) { - wake_up(&ext4_li_info->li_wait_daemon); - wait_event(ext4_li_info->li_wait_task, - ext4_li_info->li_task == NULL); - } + kthread_stop(ext4_lazyinit_task); } static int ext4_fill_super(struct super_block *sb, void *data, int silent) -- cgit v1.2.2 From 8f021222c1e2756ea4c9dde93b23e1d2a0a4ec37 Mon Sep 17 00:00:00 2001 From: Lukas Czerner Date: Thu, 3 Feb 2011 14:33:33 -0500 Subject: ext4: unregister features interface on module unload Ext4 features interface was not properly unregistered which led to problems while unloading/reloading ext4 module. This commit fixes that by adding proper kobject unregistration code into ext4_exit_fs() as well as fail-path of ext4_init_fs() Reported-by: Eric Sandeen Signed-off-by: Lukas Czerner Signed-off-by: "Theodore Ts'o" Cc: stable@kernel.org --- fs/ext4/super.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 3d8cf2cab379..4898cb1ff606 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -4769,7 +4769,7 @@ static struct file_system_type ext4_fs_type = { .fs_flags = FS_REQUIRES_DEV, }; -int __init ext4_init_feat_adverts(void) +static int __init ext4_init_feat_adverts(void) { struct ext4_features *ef; int ret = -ENOMEM; @@ -4793,6 +4793,13 @@ out: return ret; } +static void ext4_exit_feat_adverts(void) +{ + kobject_put(&ext4_feat->f_kobj); + wait_for_completion(&ext4_feat->f_kobj_unregister); + kfree(ext4_feat); +} + static int __init ext4_init_fs(void) { int err; @@ -4839,7 +4846,7 @@ out1: out2: ext4_exit_mballoc(); out3: - kfree(ext4_feat); + ext4_exit_feat_adverts(); remove_proc_entry("fs/ext4", NULL); kset_unregister(ext4_kset); out4: @@ -4858,6 +4865,7 @@ static void __exit ext4_exit_fs(void) destroy_inodecache(); ext4_exit_xattr(); ext4_exit_mballoc(); + ext4_exit_feat_adverts(); remove_proc_entry("fs/ext4", NULL); kset_unregister(ext4_kset); ext4_exit_system_zone(); -- cgit v1.2.2 From dd68314ccf3fb918c1fb6471817edbc60ece4b52 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Thu, 3 Feb 2011 14:33:49 -0500 Subject: ext4: fix up ext4 error handling Make sure we the correct cleanup happens if we die while trying to load the ext4 file system. Signed-off-by: "Theodore Ts'o" --- fs/ext4/super.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 4898cb1ff606..86b05486dc63 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -4810,13 +4810,17 @@ static int __init ext4_init_fs(void) return err; err = ext4_init_system_zone(); if (err) - goto out5; + goto out7; ext4_kset = kset_create_and_add("ext4", NULL, fs_kobj); if (!ext4_kset) - goto out4; + goto out6; ext4_proc_root = proc_mkdir("fs/ext4", NULL); + if (!ext4_proc_root) + goto out5; err = ext4_init_feat_adverts(); + if (err) + goto out4; err = ext4_init_mballoc(); if (err) @@ -4847,11 +4851,13 @@ out2: ext4_exit_mballoc(); out3: ext4_exit_feat_adverts(); +out4: remove_proc_entry("fs/ext4", NULL); +out5: kset_unregister(ext4_kset); -out4: +out6: ext4_exit_system_zone(); -out5: +out7: ext4_exit_pageio(); return err; } -- cgit v1.2.2 From c5b8d0bce052949e173b5b32f96bd59bceaa2ab0 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 2 Feb 2011 09:32:39 -0700 Subject: hfsplus: fix failed mount handling Currently the error handling in hfsplus_fill_super is a mess, and can lead to accessing fields in the superblock that haven't been even set up yet. Fix this by making sure we do not set up sb->s_root until we have the mount fully set up, and before that do proper step by step unwinding instead of using hfsplus_put_super as a big hammer. Reported-by: Dan Williams Signed-off-by: Christoph Hellwig --- fs/hfsplus/super.c | 106 ++++++++++++++++++++++++++++++----------------------- 1 file changed, 61 insertions(+), 45 deletions(-) (limited to 'fs') diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c index 9a3b4795f43c..b49b55584c84 100644 --- a/fs/hfsplus/super.c +++ b/fs/hfsplus/super.c @@ -338,20 +338,22 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) struct inode *root, *inode; struct qstr str; struct nls_table *nls = NULL; - int err = -EINVAL; + int err; + err = -EINVAL; sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); if (!sbi) - return -ENOMEM; + goto out; sb->s_fs_info = sbi; mutex_init(&sbi->alloc_mutex); mutex_init(&sbi->vh_mutex); hfsplus_fill_defaults(sbi); + + err = -EINVAL; if (!hfsplus_parse_options(data, sbi)) { printk(KERN_ERR "hfs: unable to parse mount options\n"); - err = -EINVAL; - goto cleanup; + goto out_unload_nls; } /* temporarily use utf8 to correctly find the hidden dir below */ @@ -359,16 +361,14 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) sbi->nls = load_nls("utf8"); if (!sbi->nls) { printk(KERN_ERR "hfs: unable to load nls for utf8\n"); - err = -EINVAL; - goto cleanup; + goto out_unload_nls; } /* Grab the volume header */ if (hfsplus_read_wrapper(sb)) { if (!silent) printk(KERN_WARNING "hfs: unable to find HFS+ superblock\n"); - err = -EINVAL; - goto cleanup; + goto out_unload_nls; } vhdr = sbi->s_vhdr; @@ -377,7 +377,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) if (be16_to_cpu(vhdr->version) < HFSPLUS_MIN_VERSION || be16_to_cpu(vhdr->version) > HFSPLUS_CURRENT_VERSION) { printk(KERN_ERR "hfs: wrong filesystem version\n"); - goto cleanup; + goto out_free_vhdr; } sbi->total_blocks = be32_to_cpu(vhdr->total_blocks); sbi->free_blocks = be32_to_cpu(vhdr->free_blocks); @@ -421,19 +421,19 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) sbi->ext_tree = hfs_btree_open(sb, HFSPLUS_EXT_CNID); if (!sbi->ext_tree) { printk(KERN_ERR "hfs: failed to load extents file\n"); - goto cleanup; + goto out_free_vhdr; } sbi->cat_tree = hfs_btree_open(sb, HFSPLUS_CAT_CNID); if (!sbi->cat_tree) { printk(KERN_ERR "hfs: failed to load catalog file\n"); - goto cleanup; + goto out_close_ext_tree; } inode = hfsplus_iget(sb, HFSPLUS_ALLOC_CNID); if (IS_ERR(inode)) { printk(KERN_ERR "hfs: failed to load allocation file\n"); err = PTR_ERR(inode); - goto cleanup; + goto out_close_cat_tree; } sbi->alloc_file = inode; @@ -442,14 +442,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) if (IS_ERR(root)) { printk(KERN_ERR "hfs: failed to load root directory\n"); err = PTR_ERR(root); - goto cleanup; - } - sb->s_d_op = &hfsplus_dentry_operations; - sb->s_root = d_alloc_root(root); - if (!sb->s_root) { - iput(root); - err = -ENOMEM; - goto cleanup; + goto out_put_alloc_file; } str.len = sizeof(HFSP_HIDDENDIR_NAME) - 1; @@ -459,46 +452,69 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) if (!hfs_brec_read(&fd, &entry, sizeof(entry))) { hfs_find_exit(&fd); if (entry.type != cpu_to_be16(HFSPLUS_FOLDER)) - goto cleanup; + goto out_put_root; inode = hfsplus_iget(sb, be32_to_cpu(entry.folder.id)); if (IS_ERR(inode)) { err = PTR_ERR(inode); - goto cleanup; + goto out_put_root; } sbi->hidden_dir = inode; } else hfs_find_exit(&fd); - if (sb->s_flags & MS_RDONLY) - goto out; + if (!(sb->s_flags & MS_RDONLY)) { + /* + * H+LX == hfsplusutils, H+Lx == this driver, H+lx is unused + * all three are registered with Apple for our use + */ + vhdr->last_mount_vers = cpu_to_be32(HFSP_MOUNT_VERSION); + vhdr->modify_date = hfsp_now2mt(); + be32_add_cpu(&vhdr->write_count, 1); + vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_UNMNT); + vhdr->attributes |= cpu_to_be32(HFSPLUS_VOL_INCNSTNT); + hfsplus_sync_fs(sb, 1); - /* H+LX == hfsplusutils, H+Lx == this driver, H+lx is unused - * all three are registered with Apple for our use - */ - vhdr->last_mount_vers = cpu_to_be32(HFSP_MOUNT_VERSION); - vhdr->modify_date = hfsp_now2mt(); - be32_add_cpu(&vhdr->write_count, 1); - vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_UNMNT); - vhdr->attributes |= cpu_to_be32(HFSPLUS_VOL_INCNSTNT); - hfsplus_sync_fs(sb, 1); - - if (!sbi->hidden_dir) { - mutex_lock(&sbi->vh_mutex); - sbi->hidden_dir = hfsplus_new_inode(sb, S_IFDIR); - hfsplus_create_cat(sbi->hidden_dir->i_ino, sb->s_root->d_inode, - &str, sbi->hidden_dir); - mutex_unlock(&sbi->vh_mutex); - - hfsplus_mark_inode_dirty(sbi->hidden_dir, HFSPLUS_I_CAT_DIRTY); + if (!sbi->hidden_dir) { + mutex_lock(&sbi->vh_mutex); + sbi->hidden_dir = hfsplus_new_inode(sb, S_IFDIR); + hfsplus_create_cat(sbi->hidden_dir->i_ino, root, &str, + sbi->hidden_dir); + mutex_unlock(&sbi->vh_mutex); + + hfsplus_mark_inode_dirty(sbi->hidden_dir, + HFSPLUS_I_CAT_DIRTY); + } } -out: + + sb->s_d_op = &hfsplus_dentry_operations; + sb->s_root = d_alloc_root(root); + if (!sb->s_root) { + err = -ENOMEM; + goto out_put_hidden_dir; + } + unload_nls(sbi->nls); sbi->nls = nls; return 0; -cleanup: - hfsplus_put_super(sb); +out_put_hidden_dir: + iput(sbi->hidden_dir); +out_put_root: + iput(sbi->alloc_file); +out_put_alloc_file: + iput(sbi->alloc_file); +out_close_cat_tree: + hfs_btree_close(sbi->cat_tree); +out_close_ext_tree: + hfs_btree_close(sbi->ext_tree); +out_free_vhdr: + kfree(sbi->s_vhdr); + kfree(sbi->s_backup_vhdr); +out_unload_nls: + unload_nls(sbi->nls); unload_nls(nls); + kfree(sbi); +out: return err; } -- cgit v1.2.2 From 14dd01f88319a37b06ca909738044e39ec5bfdee Mon Sep 17 00:00:00 2001 From: Chuck Ebbert Date: Tue, 1 Feb 2011 16:41:55 -0500 Subject: hfsplus: do not leak buffer on error Signed-Off-By: Chuck Ebbert Signed-off-by: Christoph Hellwig --- fs/hfsplus/part_tbl.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/hfsplus/part_tbl.c b/fs/hfsplus/part_tbl.c index d66ad113b1cc..40ad88c12c64 100644 --- a/fs/hfsplus/part_tbl.c +++ b/fs/hfsplus/part_tbl.c @@ -134,7 +134,7 @@ int hfs_part_find(struct super_block *sb, res = hfsplus_submit_bio(sb->s_bdev, *part_start + HFS_PMAP_BLK, data, READ); if (res) - return res; + goto out; switch (be16_to_cpu(*((__be16 *)data))) { case HFS_OLD_PMAP_MAGIC: @@ -147,7 +147,7 @@ int hfs_part_find(struct super_block *sb, res = -ENOENT; break; } - +out: kfree(data); return res; } -- cgit v1.2.2 From a1dbcef0172555464b5329f8ba47d43c98132dfa Mon Sep 17 00:00:00 2001 From: Chuck Ebbert Date: Wed, 2 Feb 2011 10:55:06 -0500 Subject: hfsplus: fix two memory leaks in wrapper.c Signed-Off-By: Chuck Ebbert Signed-off-by: Christoph Hellwig --- fs/hfsplus/wrapper.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c index 196231794f64..3031d81f5f0f 100644 --- a/fs/hfsplus/wrapper.c +++ b/fs/hfsplus/wrapper.c @@ -167,7 +167,7 @@ reread: break; case cpu_to_be16(HFSP_WRAP_MAGIC): if (!hfsplus_read_mdb(sbi->s_vhdr, &wd)) - goto out; + goto out_free_backup_vhdr; wd.ablk_size >>= HFSPLUS_SECTOR_SHIFT; part_start += wd.ablk_start + wd.embed_start * wd.ablk_size; part_size = wd.embed_count * wd.ablk_size; @@ -179,7 +179,7 @@ reread: * (should do this only for cdrom/loop though) */ if (hfs_part_find(sb, &part_start, &part_size)) - goto out; + goto out_free_backup_vhdr; goto reread; } -- cgit v1.2.2 From 1065348d472f97b4b8eb53b60ec67e99148cbbca Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 2 Feb 2011 09:40:33 -0700 Subject: hfsplus: fix up a comparism in hfsplus_file_extend Revert an incorrect hunk from commit b2837fcf4994e699a4def002e26f274d95b387c1, "hfsplus: %L-to-%ll, macro correction, and remove unneeded braces" revert a pointless change of comparism operation argument order, which turned out to not even be equivalent. Reported-by: Joe Perches Signed-off-by: Christoph Hellwig --- fs/hfsplus/extents.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c index 52a0bcaa7b6d..b1991a2a08e0 100644 --- a/fs/hfsplus/extents.c +++ b/fs/hfsplus/extents.c @@ -397,8 +397,8 @@ int hfsplus_file_extend(struct inode *inode) u32 start, len, goal; int res; - if (sbi->total_blocks - sbi->free_blocks + 8 > - sbi->alloc_file->i_size * 8) { + if (sbi->alloc_file->i_size * 8 < + sbi->total_blocks - sbi->free_blocks + 8) { /* extend alloc file */ printk(KERN_ERR "hfs: extend alloc file! " "(%llu,%u,%u)\n", -- cgit v1.2.2 From 76429c148b939f5a6863c0a024eb8960ae91469a Mon Sep 17 00:00:00 2001 From: Pavel Shilovsky Date: Mon, 31 Jan 2011 16:03:08 +0300 Subject: CIFS: Fix variable types in cifs_iovec_read/write (try #2) Variable 'i' should be unsigned long as it's used in circle with num_pages, and bytes_read/total_written should be ssize_t according to return value. Signed-off-by: Pavel Shilovsky Reviewed-by: Shirish Pargaonkar Signed-off-by: Steve French --- fs/cifs/file.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 74c0a282d012..e964b1cd5dd0 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -1662,10 +1662,10 @@ static ssize_t cifs_iovec_write(struct file *file, const struct iovec *iov, unsigned long nr_segs, loff_t *poffset) { - size_t total_written = 0; - unsigned int written = 0; - unsigned long num_pages, npages; - size_t copied, len, cur_len, i; + unsigned int written; + unsigned long num_pages, npages, i; + size_t copied, len, cur_len; + ssize_t total_written = 0; struct kvec *to_send; struct page **pages; struct iov_iter it; @@ -1821,7 +1821,8 @@ cifs_iovec_read(struct file *file, const struct iovec *iov, { int rc; int xid; - unsigned int total_read, bytes_read = 0; + ssize_t total_read; + unsigned int bytes_read = 0; size_t len, cur_len; int iov_offset = 0; struct cifs_sb_info *cifs_sb; -- cgit v1.2.2 From 78d2978874e4e10e97dfd4fd79db45bdc0748550 Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Fri, 4 Feb 2011 18:13:24 +0000 Subject: CRED: Fix kernel panic upon security_file_alloc() failure. In get_empty_filp() since 2.6.29, file_free(f) is called with f->f_cred == NULL when security_file_alloc() returned an error. As a result, kernel will panic() due to put_cred(NULL) call within RCU callback. Fix this bug by assigning f->f_cred before calling security_file_alloc(). Signed-off-by: Tetsuo Handa Signed-off-by: David Howells Signed-off-by: Linus Torvalds --- fs/file_table.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/file_table.c b/fs/file_table.c index c3e89adf53c0..eb36b6b17e26 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -125,13 +125,13 @@ struct file *get_empty_filp(void) goto fail; percpu_counter_inc(&nr_files); + f->f_cred = get_cred(cred); if (security_file_alloc(f)) goto fail_sec; INIT_LIST_HEAD(&f->f_u.fu_list); atomic_long_set(&f->f_count, 1); rwlock_init(&f->f_owner.lock); - f->f_cred = get_cred(cred); spin_lock_init(&f->f_lock); eventpoll_init_file(f); /* f->f_version: 0 */ -- cgit v1.2.2 From 64474bdd07f673cc48509ea0375274422c8f73bf Mon Sep 17 00:00:00 2001 From: Shirish Pargaonkar Date: Thu, 3 Feb 2011 14:31:18 -0600 Subject: cifs: Possible slab memory corruption while updating extended stats (repost) Updating extended statistics here can cause slab memory corruption if a callback function frees slab memory (mid_entry). Signed-off-by: Shirish Pargaonkar Reviewed-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/connect.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 945b2202275f..1f32a2893b5f 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -633,11 +633,11 @@ incomplete_rcv: mid_entry->largeBuf = isLargeBuf; multi_t2_fnd: mid_entry->midState = MID_RESPONSE_RECEIVED; - list_del_init(&mid_entry->qhead); - mid_entry->callback(mid_entry); #ifdef CONFIG_CIFS_STATS2 mid_entry->when_received = jiffies; #endif + list_del_init(&mid_entry->qhead); + mid_entry->callback(mid_entry); break; } mid_entry = NULL; -- cgit v1.2.2 From e3f0dadb2b44746f6223ce4560406d19e02fb1cc Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Fri, 4 Feb 2011 07:21:26 -0500 Subject: cifs: enable signing flag in SMB header when server has it on cifs_sign_smb only generates a signature if the correct Flags2 bit is set. Make sure that it gets set correctly if we're sending an async call. This patch fixes: https://bugzilla.kernel.org/show_bug.cgi?id=28142 Reported-and-Tested-by: JG Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/transport.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'fs') diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index b8c5e2eb43d0..fbc5aace54b1 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -359,6 +359,10 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_hdr *in_buf, if (rc) return rc; + /* enable signing if server requires it */ + if (server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) + in_buf->Flags2 |= SMBFLG2_SECURITY_SIGNATURE; + mutex_lock(&server->srv_mutex); mid = AllocMidQEntry(in_buf, server); if (mid == NULL) { -- cgit v1.2.2 From 247ec9b418ba50c9022280035330059364d54540 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Fri, 4 Feb 2011 17:09:50 -0500 Subject: cifs: don't send an echo request unless NegProt has been done When the socket to the server is disconnected, the client more or less immediately calls cifs_reconnect to reconnect the socket. The NegProt and SessSetup however are not done until an actual call needs to be made. With the addition of the SMB echo code, it's possible that the server will initiate a disconnect on an idle socket. The client will then reconnect the socket but no NegotiateProtocol request is done. The SMBEcho workqueue job will then eventually pop, and an SMBEcho will be sent on the socket. The server will then reject it since no NegProt was done. The ideal fix would be to either have the socket not be reconnected until we plan to use it, or to immediately do a NegProt when the reconnect occurs. The code is not structured for this however. For now we must just settle for not sending any echoes until the NegProt is done. Reported-by: JG Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/connect.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 1f32a2893b5f..257b6d895e20 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -337,8 +337,12 @@ cifs_echo_request(struct work_struct *work) struct TCP_Server_Info *server = container_of(work, struct TCP_Server_Info, echo.work); - /* no need to ping if we got a response recently */ - if (time_before(jiffies, server->lstrp + SMB_ECHO_INTERVAL - HZ)) + /* + * We cannot send an echo until the NEGOTIATE_PROTOCOL request is done. + * Also, no need to ping if we got a response recently + */ + if (server->tcpStatus != CifsGood || + time_before(jiffies, server->lstrp + SMB_ECHO_INTERVAL - HZ)) goto requeue_echo; rc = CIFSSMBEcho(server); -- cgit v1.2.2 From e8e1ba96b207deba1339b09983f8b29f92cb1497 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Fri, 4 Feb 2011 20:45:58 -0800 Subject: ceph: queue cap_snaps once per realm We were forming a dirty list, and then queueing cap_snaps for each realm _and_ its children, regardless of whether the children were already in the dirty list. This meant we did it twice for some realms. Which in turn meant we corrupted mdsc->snap_flush_list when the cap_snap was re-added to the list it was already on, and could trigger an infinite loop. We were also using recursion to do reach all the children, a no-no when stack is limited. Instead, (re)queue any children on the dirty list, avoiding processing anything twice and avoiding any recursion. Signed-off-by: Sage Weil --- fs/ceph/snap.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c index 39c243acd062..f40b9139e437 100644 --- a/fs/ceph/snap.c +++ b/fs/ceph/snap.c @@ -584,10 +584,14 @@ static void queue_realm_cap_snaps(struct ceph_snap_realm *realm) if (lastinode) iput(lastinode); - dout("queue_realm_cap_snaps %p %llx children\n", realm, realm->ino); - list_for_each_entry(child, &realm->children, child_item) - queue_realm_cap_snaps(child); + list_for_each_entry(child, &realm->children, child_item) { + dout("queue_realm_cap_snaps %p %llx queue child %p %llx\n", + realm, realm->ino, child, child->ino); + list_del_init(&child->dirty_item); + list_add(&child->dirty_item, &realm->dirty_item); + } + list_del_init(&realm->dirty_item); dout("queue_realm_cap_snaps %p %llx done\n", realm, realm->ino); } @@ -683,7 +687,9 @@ more: * queue cap snaps _after_ we've built the new snap contexts, * so that i_head_snapc can be set appropriately. */ - list_for_each_entry(realm, &dirty_realms, dirty_item) { + while (!list_empty(&dirty_realms)) { + realm = list_first_entry(&dirty_realms, struct ceph_snap_realm, + dirty_item); queue_realm_cap_snaps(realm); } -- cgit v1.2.2 From 8132b65bc6ce6d9a4baafdfc28c7cd9c258ed6e4 Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Sun, 6 Feb 2011 02:05:28 +0300 Subject: cifs: add check for kmalloc in parse_dacl Exit from parse_dacl if no memory returned from the call to kmalloc. Signed-off-by: Stanislav Fomichev Signed-off-by: Steve French --- fs/cifs/cifsacl.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'fs') diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c index 1e7636b145a8..beeebf194234 100644 --- a/fs/cifs/cifsacl.c +++ b/fs/cifs/cifsacl.c @@ -372,6 +372,10 @@ static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl, ppace = kmalloc(num_aces * sizeof(struct cifs_ace *), GFP_KERNEL); + if (!ppace) { + cERROR(1, "DACL memory allocation error"); + return; + } for (i = 0; i < num_aces; ++i) { ppace[i] = (struct cifs_ace *) (acl_base + acl_size); -- cgit v1.2.2 From 13dbc08987f25d9dba488a34b44b43e3844b027c Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Thu, 3 Feb 2011 02:39:52 +0000 Subject: Btrfs: make sure search_bitmap finds something in remove_from_bitmap When we're cleaning up the tree log we need to be able to remove free space from the block group. The problem is if that free space spans bitmaps we would not find the space since we're looking for too many bytes. So make sure the amount of bytes we search for is limited to either the number of bytes we want, or the number of bytes left in the bitmap. This was tested by a user who was hitting the BUG() after search_bitmap. With this patch he can now mount his fs. Thanks, Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/free-space-cache.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index a5501edc3c9f..a0390657451b 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1216,6 +1216,7 @@ again: */ search_start = *offset; search_bytes = *bytes; + search_bytes = min(search_bytes, end - search_start + 1); ret = search_bitmap(block_group, bitmap_info, &search_start, &search_bytes); BUG_ON(ret < 0 || search_start != *offset); -- cgit v1.2.2 From 3c14874acc71180553fb5aba528e3cf57c5b958b Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 2 Feb 2011 15:53:47 +0000 Subject: Btrfs: exclude super blocks when we read in block groups This has been resulting in a BUT_ON(ret) after btrfs_reserve_extent in btrfs_cow_file_range. The reason is we don't actually calculate the bytes_super for a block group until we go to cache it, which means that the space_info can hand out reservations for space that it doesn't actually have, and we can run out of data space. This is also a problem if you are using space caching since we don't ever calculate bytes_super for the block groups. So instead everytime we read a block group call exclude_super_stripes, which calculates the bytes_super for the block group so it can be left out of the space_info. Then whenever caching completes we just call free_excluded_extents so that the super excluded extents are freed up. Also if we are unmounting and we hit any block groups that haven't been cached we still need to call free_excluded_extents to make sure things are cleaned up properly. Thanks, Reported-by: Arne Jansen Signed-off-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index f07ba21cbf06..565e22d77b1b 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -320,11 +320,6 @@ static int caching_kthread(void *data) if (!path) return -ENOMEM; - exclude_super_stripes(extent_root, block_group); - spin_lock(&block_group->space_info->lock); - block_group->space_info->bytes_readonly += block_group->bytes_super; - spin_unlock(&block_group->space_info->lock); - last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET); /* @@ -467,8 +462,10 @@ static int cache_block_group(struct btrfs_block_group_cache *cache, cache->cached = BTRFS_CACHE_NO; } spin_unlock(&cache->lock); - if (ret == 1) + if (ret == 1) { + free_excluded_extents(fs_info->extent_root, cache); return 0; + } } if (load_cache_only) @@ -4036,6 +4033,7 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes) num_bytes = ALIGN(num_bytes, root->sectorsize); atomic_dec(&BTRFS_I(inode)->outstanding_extents); + WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents) < 0); spin_lock(&BTRFS_I(inode)->accounting_lock); nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents); @@ -8325,6 +8323,13 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) if (block_group->cached == BTRFS_CACHE_STARTED) wait_block_group_cache_done(block_group); + /* + * We haven't cached this block group, which means we could + * possibly have excluded extents on this block group. + */ + if (block_group->cached == BTRFS_CACHE_NO) + free_excluded_extents(info->extent_root, block_group); + btrfs_remove_free_space_cache(block_group); btrfs_put_block_group(block_group); @@ -8439,6 +8444,13 @@ int btrfs_read_block_groups(struct btrfs_root *root) cache->flags = btrfs_block_group_flags(&cache->item); cache->sectorsize = root->sectorsize; + /* + * We need to exclude the super stripes now so that the space + * info has super bytes accounted for, otherwise we'll think + * we have more space than we actually do. + */ + exclude_super_stripes(root, cache); + /* * check for two cases, either we are full, and therefore * don't need to bother with the caching work since we won't @@ -8447,12 +8459,10 @@ int btrfs_read_block_groups(struct btrfs_root *root) * time, particularly in the full case. */ if (found_key.offset == btrfs_block_group_used(&cache->item)) { - exclude_super_stripes(root, cache); cache->last_byte_to_unpin = (u64)-1; cache->cached = BTRFS_CACHE_FINISHED; free_excluded_extents(root, cache); } else if (btrfs_block_group_used(&cache->item) == 0) { - exclude_super_stripes(root, cache); cache->last_byte_to_unpin = (u64)-1; cache->cached = BTRFS_CACHE_FINISHED; add_new_free_space(cache, root->fs_info, -- cgit v1.2.2 From 554233a6e0e8557e8e81e54cc70628d101291122 Mon Sep 17 00:00:00 2001 From: Tsutomu Itoh Date: Thu, 3 Feb 2011 03:16:25 +0000 Subject: btrfs: cleanup error handling in btrfs_unlink_inode() When btrfs_alloc_path() fails, btrfs_free_path() need not be called. Therefore, it changes the branch ahead. Signed-off-by: Tsutomu Itoh Signed-off-by: Chris Mason --- fs/btrfs/inode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 36bc3f49ebf9..c9bc0afdbfc6 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2646,7 +2646,7 @@ int btrfs_unlink_inode(struct btrfs_trans_handle *trans, path = btrfs_alloc_path(); if (!path) { ret = -ENOMEM; - goto err; + goto out; } path->leave_spinning = 1; -- cgit v1.2.2 From 8e4eef7a60eeca0fe7503e5cbd3b24ff4941c732 Mon Sep 17 00:00:00 2001 From: Alexey Charkov Date: Wed, 2 Feb 2011 21:15:35 +0000 Subject: btrfs: Drop __exit attribute on btrfs_exit_compress As this function is called in some error paths while not removing the module, the __exit attribute prevents the kernel image from linking when btrfs is compiled in statically. Signed-off-by: Alexey Charkov Signed-off-by: Chris Mason --- fs/btrfs/compression.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 3a932f183da1..4d2110eafe29 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -921,7 +921,7 @@ int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page, return ret; } -void __exit btrfs_exit_compress(void) +void btrfs_exit_compress(void) { free_workspaces(); } -- cgit v1.2.2 From d402539b8fc3fa21f16eb5e654be742670399e8a Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Mon, 7 Feb 2011 08:54:35 -0500 Subject: cifs: remove checks for ses->status == CifsExiting ses->status is never set to CifsExiting, so these checks are always false. Tested-by: JG Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/cifssmb.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 46c66ed01af4..904aa47e3515 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -136,9 +136,6 @@ cifs_reconnect_tcon(struct cifsTconInfo *tcon, int smb_command) } } - if (ses->status == CifsExiting) - return -EIO; - /* * Give demultiplex thread up to 10 seconds to reconnect, should be * greater than cifs socket timeout which is 7 seconds @@ -156,7 +153,7 @@ cifs_reconnect_tcon(struct cifsTconInfo *tcon, int smb_command) * retrying until process is killed or server comes * back on-line */ - if (!tcon->retry || ses->status == CifsExiting) { + if (!tcon->retry) { cFYI(1, "gave up waiting on reconnect in smb_init"); return -EHOSTDOWN; } -- cgit v1.2.2 From d50bdd5aa55127635fd8a5c74bd2abb256bd34e3 Mon Sep 17 00:00:00 2001 From: Curt Wohlgemuth Date: Mon, 7 Feb 2011 12:46:14 -0500 Subject: ext4: Fix data corruption with multi-block writepages support This fixes a corruption problem with the multi-block writepages submittal change for ext4, from commit bd2d0210cf22f2bd0cef72eb97cf94fc7d31d8cc ("ext4: use bio layer instead of buffer layer in mpage_da_submit_io"). (Note that this corruption is not present in 2.6.37 on ext4, because the corruption was detected after the feature was merged in 2.6.37-rc1, and so it was turned off by adding a non-default mount option, mblk_io_submit. With this commit, which hopefully fixes the last of the bugs with this feature, we'll be able to turn on this performance feature by default in 2.6.38, and remove the mblk_io_submit option.) The ext4 code path to bundle multiple pages for writeback in ext4_bio_write_page() had a bug: we should be clearing buffer head dirty flags *before* we submit the bio, not in the completion routine. The patch below was tested on 2.6.37 under KVM with the postgresql script which was submitted by Jon Nelson as documented in commit 1449032be1. Without the patch, I'd hit the corruption problem about 50-70% of the time. With the patch, I executed the script > 100 times with no corruption seen. I also fixed a bug to make sure ext4_end_bio() doesn't dereference the bio after the bio_put() call. Reported-by: Jon Nelson Reported-by: Matthias Bayer Signed-off-by: Curt Wohlgemuth Signed-off-by: "Theodore Ts'o" Cc: stable@kernel.org --- fs/ext4/page-io.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index 7270dcfca92a..4e9b0a242f4c 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c @@ -190,6 +190,7 @@ static void ext4_end_bio(struct bio *bio, int error) struct inode *inode; unsigned long flags; int i; + sector_t bi_sector = bio->bi_sector; BUG_ON(!io_end); bio->bi_private = NULL; @@ -207,9 +208,7 @@ static void ext4_end_bio(struct bio *bio, int error) if (error) SetPageError(page); BUG_ON(!head); - if (head->b_size == PAGE_CACHE_SIZE) - clear_buffer_dirty(head); - else { + if (head->b_size != PAGE_CACHE_SIZE) { loff_t offset; loff_t io_end_offset = io_end->offset + io_end->size; @@ -221,7 +220,6 @@ static void ext4_end_bio(struct bio *bio, int error) if (error) buffer_io_error(bh); - clear_buffer_dirty(bh); } if (buffer_delay(bh)) partial_write = 1; @@ -257,7 +255,7 @@ static void ext4_end_bio(struct bio *bio, int error) (unsigned long long) io_end->offset, (long) io_end->size, (unsigned long long) - bio->bi_sector >> (inode->i_blkbits - 9)); + bi_sector >> (inode->i_blkbits - 9)); } /* Add the io_end to per-inode completed io list*/ @@ -380,6 +378,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io, blocksize = 1 << inode->i_blkbits; + BUG_ON(!PageLocked(page)); BUG_ON(PageWriteback(page)); set_page_writeback(page); ClearPageError(page); @@ -397,12 +396,14 @@ int ext4_bio_write_page(struct ext4_io_submit *io, for (bh = head = page_buffers(page), block_start = 0; bh != head || !block_start; block_start = block_end, bh = bh->b_this_page) { + block_end = block_start + blocksize; if (block_start >= len) { clear_buffer_dirty(bh); set_buffer_uptodate(bh); continue; } + clear_buffer_dirty(bh); ret = io_submit_add_bh(io, io_page, inode, wbc, bh); if (ret) { /* -- cgit v1.2.2 From 3a90983dbdcb2f4f48c0d771d8e5b4d88f27fae6 Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Tue, 18 Jan 2011 13:34:40 +0800 Subject: Btrfs: Fix page count calculation take offset of start position into account when calculating page count. Signed-off-by: Yan, Zheng Signed-off-by: Chris Mason --- fs/btrfs/file.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 9e097fbfc78d..b0ff34b96607 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -991,8 +991,8 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, size_t write_bytes = min(iov_iter_count(&i), nrptrs * (size_t)PAGE_CACHE_SIZE - offset); - size_t num_pages = (write_bytes + PAGE_CACHE_SIZE - 1) >> - PAGE_CACHE_SHIFT; + size_t num_pages = (write_bytes + offset + + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; WARN_ON(num_pages > nrptrs); memset(pages, 0, sizeof(struct page *) * nrptrs); @@ -1022,8 +1022,8 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, copied = btrfs_copy_from_user(pos, num_pages, write_bytes, pages, &i); - dirty_pages = (copied + PAGE_CACHE_SIZE - 1) >> - PAGE_CACHE_SHIFT; + dirty_pages = (copied + offset + PAGE_CACHE_SIZE - 1) >> + PAGE_CACHE_SHIFT; if (num_pages > dirty_pages) { if (copied > 0) -- cgit v1.2.2 From 7e90d705fc9f8c5e3a1549281dce0654d049243b Mon Sep 17 00:00:00 2001 From: Steve French Date: Tue, 8 Feb 2011 23:52:32 +0000 Subject: [CIFS] Do not send SMBEcho requests on new sockets until SMBNegotiate In order to determine whether an SMBEcho request can be sent we need to know that the socket is established (server tcpStatus == CifsGood) AND that an SMB NegotiateProtocol has been sent (server maxBuf != 0). Without the second check we can send an Echo request during reconnection before the server can accept it. CC: JG Reviewed-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/cifsglob.h | 2 ++ fs/cifs/connect.c | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index edd5b29b53c9..1ab33eb71d95 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -188,6 +188,8 @@ struct TCP_Server_Info { /* multiplexed reads or writes */ unsigned int maxBuf; /* maxBuf specifies the maximum */ /* message size the server can send or receive for non-raw SMBs */ + /* maxBuf is returned by SMB NegotiateProtocol so maxBuf is only 0 */ + /* when socket is setup (and during reconnect) before NegProt sent */ unsigned int max_rw; /* maxRw specifies the maximum */ /* message size the server can send or receive for */ /* SMB_COM_WRITE_RAW or SMB_COM_READ_RAW. */ diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 257b6d895e20..10011e99b34d 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -341,7 +341,7 @@ cifs_echo_request(struct work_struct *work) * We cannot send an echo until the NEGOTIATE_PROTOCOL request is done. * Also, no need to ping if we got a response recently */ - if (server->tcpStatus != CifsGood || + if ((server->tcpStatus != CifsGood) || (server->maxBuf == 0) || time_before(jiffies, server->lstrp + SMB_ECHO_INTERVAL - HZ)) goto requeue_echo; -- cgit v1.2.2 From 195291e68c2ad59a046fc56d32bf59635b100e5c Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 9 Feb 2011 12:01:42 -0500 Subject: cifs: clean up checks in cifs_echo_request Follow-on patch to 7e90d705 which is already in Steve's tree... The check for tcpStatus == CifsGood is not meaningful since it doesn't indicate whether the NEGOTIATE request has been done. Also, clarify why we're checking for maxBuf == 0. Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/connect.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 10011e99b34d..161f24ca4f6e 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -338,10 +338,11 @@ cifs_echo_request(struct work_struct *work) struct TCP_Server_Info, echo.work); /* - * We cannot send an echo until the NEGOTIATE_PROTOCOL request is done. - * Also, no need to ping if we got a response recently + * We cannot send an echo until the NEGOTIATE_PROTOCOL request is + * done, which is indicated by maxBuf != 0. Also, no need to ping if + * we got a response recently */ - if ((server->tcpStatus != CifsGood) || (server->maxBuf == 0) || + if (server->maxBuf == 0 || time_before(jiffies, server->lstrp + SMB_ECHO_INTERVAL - HZ)) goto requeue_echo; -- cgit v1.2.2 From 71823baff1978be892e7a36eddf6170e1cc6650d Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Thu, 10 Feb 2011 08:03:50 -0500 Subject: cifs: don't always drop malformed replies on the floor (try #3) Slight revision to this patch...use min_t() instead of conditional assignment. Also, remove the FIXME comment and replace it with the explanation that Steve gave earlier. After receiving a packet, we currently check the header. If it's no good, then we toss it out and continue the loop, leaving the caller waiting on that response. In cases where the packet has length inconsistencies, but the MID is valid, this leads to unneeded delays. That's especially problematic now that the client waits indefinitely for responses. Instead, don't immediately discard the packet if checkSMB fails. Try to find a matching mid_q_entry, mark it as having a malformed response and issue the callback. Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/cifsglob.h | 2 +- fs/cifs/connect.c | 30 ++++++++++++++++++++++++------ fs/cifs/transport.c | 3 +++ 3 files changed, 28 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 1ab33eb71d95..17afb0fbcaed 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -654,7 +654,7 @@ static inline void free_dfs_info_array(struct dfs_info3_param *param, #define MID_REQUEST_SUBMITTED 2 #define MID_RESPONSE_RECEIVED 4 #define MID_RETRY_NEEDED 8 /* session closed while this request out */ -#define MID_NO_RESP_NEEDED 0x10 +#define MID_RESPONSE_MALFORMED 0x10 /* Types of response buffer returned from SendReceive2 */ #define CIFS_NO_BUFFER 0 /* Response buffer not returned */ diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 161f24ca4f6e..8d6c17ab593d 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -586,11 +586,20 @@ incomplete_rcv: total_read += 4; /* account for rfc1002 hdr */ dump_smb(smb_buffer, total_read); - if (checkSMB(smb_buffer, smb_buffer->Mid, total_read)) { + + /* + * We know that we received enough to get to the MID as we + * checked the pdu_length earlier. Now check to see + * if the rest of the header is OK. We borrow the length + * var for the rest of the loop to avoid a new stack var. + * + * 48 bytes is enough to display the header and a little bit + * into the payload for debugging purposes. + */ + length = checkSMB(smb_buffer, smb_buffer->Mid, total_read); + if (length != 0) cifs_dump_mem("Bad SMB: ", smb_buffer, - total_read < 48 ? total_read : 48); - continue; - } + min_t(unsigned int, total_read, 48)); mid_entry = NULL; server->lstrp = jiffies; @@ -602,7 +611,8 @@ incomplete_rcv: if ((mid_entry->mid == smb_buffer->Mid) && (mid_entry->midState == MID_REQUEST_SUBMITTED) && (mid_entry->command == smb_buffer->Command)) { - if (check2ndT2(smb_buffer,server->maxBuf) > 0) { + if (length == 0 && + check2ndT2(smb_buffer, server->maxBuf) > 0) { /* We have a multipart transact2 resp */ isMultiRsp = true; if (mid_entry->resp_buf) { @@ -637,7 +647,12 @@ incomplete_rcv: mid_entry->resp_buf = smb_buffer; mid_entry->largeBuf = isLargeBuf; multi_t2_fnd: - mid_entry->midState = MID_RESPONSE_RECEIVED; + if (length == 0) + mid_entry->midState = + MID_RESPONSE_RECEIVED; + else + mid_entry->midState = + MID_RESPONSE_MALFORMED; #ifdef CONFIG_CIFS_STATS2 mid_entry->when_received = jiffies; #endif @@ -658,6 +673,9 @@ multi_t2_fnd: else smallbuf = NULL; } + } else if (length != 0) { + /* response sanity checks failed */ + continue; } else if (!is_valid_oplock_break(smb_buffer, server) && !isMultiRsp) { cERROR(1, "No task to wake, unknown frame received! " diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index fbc5aace54b1..46d8756f2b24 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -457,6 +457,9 @@ sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server) case MID_RETRY_NEEDED: rc = -EAGAIN; break; + case MID_RESPONSE_MALFORMED: + rc = -EIO; + break; default: cERROR(1, "%s: invalid mid state mid=%d state=%d", __func__, mid->mid, mid->midState); -- cgit v1.2.2 From 6b155c8fd4d239f7d883d455bbad1be47724bbfc Mon Sep 17 00:00:00 2001 From: David Teigland Date: Fri, 11 Feb 2011 16:44:31 -0600 Subject: dlm: use single thread workqueues The recent commit to use cmwq for send and recv threads dcce240ead802d42b1e45ad2fcb2ed4a399cb255 introduced problems, apparently due to multiple workqueue threads. Single threads make the problems go away, so return to that until we fully understand the concurrency issues with multiple threads. Signed-off-by: David Teigland --- fs/dlm/lowcomms.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c index 9c64ae9e4c1a..2d8c87b951c2 100644 --- a/fs/dlm/lowcomms.c +++ b/fs/dlm/lowcomms.c @@ -1468,15 +1468,13 @@ static void work_stop(void) static int work_start(void) { - recv_workqueue = alloc_workqueue("dlm_recv", WQ_MEM_RECLAIM | - WQ_HIGHPRI | WQ_FREEZEABLE, 0); + recv_workqueue = create_singlethread_workqueue("dlm_recv"); if (!recv_workqueue) { log_print("can't start dlm_recv"); return -ENOMEM; } - send_workqueue = alloc_workqueue("dlm_send", WQ_MEM_RECLAIM | - WQ_HIGHPRI | WQ_FREEZEABLE, 0); + send_workqueue = create_singlethread_workqueue("dlm_send"); if (!send_workqueue) { log_print("can't start dlm_send"); destroy_workqueue(recv_workqueue); -- cgit v1.2.2 From 2dab597441667d6c04451a7dcf215241ad4c74f6 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Fri, 11 Feb 2011 15:53:38 -0800 Subject: Fix possible filp_cachep memory corruption In commit 31e6b01f4183 ("fs: rcu-walk for path lookup") we started doing path lookup using RCU, which then falls back to a careful non-RCU lookup in case of problems (LOOKUP_REVAL). So do_filp_open() has this "re-do the lookup carefully" looping case. However, that means that we must not release the open-intent file data if we are going to loop around and use it once more! Fix this by moving the release of the open-intent data to the function that allocates it (do_filp_open() itself) rather than the helper functions that can get called multiple times (finish_open() and do_last()). This makes the logic for the lifetime of that field much more obvious, and avoids the possible double free. Reported-by: J. R. Okajima Acked-by: Al Viro Cc: Nick Piggin Cc: Andrew Morton Signed-off-by: Linus Torvalds --- fs/namei.c | 20 ++++++++++---------- fs/open.c | 2 ++ 2 files changed, 12 insertions(+), 10 deletions(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index 7d77f24d32a9..ec4b2d0190a8 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -561,10 +561,14 @@ static inline int nameidata_drop_rcu_last_maybe(struct nameidata *nd) */ void release_open_intent(struct nameidata *nd) { - if (nd->intent.open.file->f_path.dentry == NULL) - put_filp(nd->intent.open.file); - else - fput(nd->intent.open.file); + struct file *file = nd->intent.open.file; + + if (file && !IS_ERR(file)) { + if (file->f_path.dentry == NULL) + put_filp(file); + else + fput(file); + } } /* @@ -2265,8 +2269,6 @@ static struct file *finish_open(struct nameidata *nd, return filp; exit: - if (!IS_ERR(nd->intent.open.file)) - release_open_intent(nd); path_put(&nd->path); return ERR_PTR(error); } @@ -2389,8 +2391,6 @@ exit_mutex_unlock: exit_dput: path_put_conditional(path, nd); exit: - if (!IS_ERR(nd->intent.open.file)) - release_open_intent(nd); path_put(&nd->path); return ERR_PTR(error); } @@ -2477,6 +2477,7 @@ struct file *do_filp_open(int dfd, const char *pathname, } audit_inode(pathname, nd.path.dentry); filp = finish_open(&nd, open_flag, acc_mode); + release_open_intent(&nd); return filp; creat: @@ -2553,6 +2554,7 @@ out: path_put(&nd.root); if (filp == ERR_PTR(-ESTALE) && !(flags & LOOKUP_REVAL)) goto reval; + release_open_intent(&nd); return filp; exit_dput: @@ -2560,8 +2562,6 @@ exit_dput: out_path: path_put(&nd.path); out_filp: - if (!IS_ERR(nd.intent.open.file)) - release_open_intent(&nd); filp = ERR_PTR(error); goto out; } diff --git a/fs/open.c b/fs/open.c index e52389e1f05b..5a2c6ebc22b5 100644 --- a/fs/open.c +++ b/fs/open.c @@ -790,6 +790,8 @@ struct file *nameidata_to_filp(struct nameidata *nd) /* Pick up the filp from the open intent */ filp = nd->intent.open.file; + nd->intent.open.file = NULL; + /* Has the filesystem initialised the file for us? */ if (filp->f_path.dentry == NULL) { path_get(&nd->path); -- cgit v1.2.2 From d863b50ab01333659314c2034890cb76d9fdc3c7 Mon Sep 17 00:00:00 2001 From: Boaz Harrosh Date: Thu, 10 Feb 2011 15:01:20 -0800 Subject: vfs: call rcu_barrier after ->kill_sb() In commit fa0d7e3de6d6 ("fs: icache RCU free inodes"), we use rcu free inode instead of freeing the inode directly. It causes a crash when we rmmod immediately after we umount the volume[1]. So we need to call rcu_barrier after we kill_sb so that the inode is freed before we do rmmod. The idea is inspired by Aneesh Kumar. rcu_barrier will wait for all callbacks to end before preceding. The original patch was done by Tao Ma, but synchronize_rcu() is not enough here. 1. http://marc.info/?l=linux-fsdevel&m=129680863330185&w=2 Tested-by: Tao Ma Signed-off-by: Boaz Harrosh Cc: Nick Piggin Cc: Al Viro Cc: Chris Mason Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/super.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'fs') diff --git a/fs/super.c b/fs/super.c index 74e149efed81..7e9dd4cc2c01 100644 --- a/fs/super.c +++ b/fs/super.c @@ -177,6 +177,11 @@ void deactivate_locked_super(struct super_block *s) struct file_system_type *fs = s->s_type; if (atomic_dec_and_test(&s->s_active)) { fs->kill_sb(s); + /* + * We need to call rcu_barrier so all the delayed rcu free + * inodes are flushed before we release the fs module. + */ + rcu_barrier(); put_filesystem(fs); put_super(s); } else { -- cgit v1.2.2 From 2892c15ddda6a76dc10b7499e56c0f3b892e5a69 Mon Sep 17 00:00:00 2001 From: Eric Sandeen Date: Sat, 12 Feb 2011 08:12:18 -0500 Subject: ext4: make grpinfo slab cache names static In 2.6.37 I was running into oopses with repeated module loads & unloads. I tracked this down to: fb1813f4 ext4: use dedicated slab caches for group_info structures (this was in addition to the features advert unload problem) The kstrdup & subsequent kfree of the cache name was causing a double free. In slub, at least, if I read it right it allocates & frees the name itself, slab seems to do something different... so in slub I think we were leaking -our- cachep->name, and double freeing the one allocated by slub. After getting lost in slab/slub/slob a bit, I just looked at other sized-caches that get allocated. jbd2, biovec, sgpool all do it more or less the way jbd2 does. Below patch follows the jbd2 method of dynamically allocating a cache at mount time from a list of static names. (This might also possibly fix a race creating the caches with parallel mounts running). [Folded in a fix from Dan Carpenter which fixed an off-by-one error in the original patch] Cc: stable@kernel.org Signed-off-by: Eric Sandeen Signed-off-by: "Theodore Ts'o" --- fs/ext4/mballoc.c | 100 ++++++++++++++++++++++++++++++++---------------------- 1 file changed, 60 insertions(+), 40 deletions(-) (limited to 'fs') diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 851f49b2f9d2..d1fe09aea73d 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -342,10 +342,15 @@ static struct kmem_cache *ext4_free_ext_cachep; /* We create slab caches for groupinfo data structures based on the * superblock block size. There will be one per mounted filesystem for * each unique s_blocksize_bits */ -#define NR_GRPINFO_CACHES \ - (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE + 1) +#define NR_GRPINFO_CACHES 8 static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES]; +static const char *ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = { + "ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k", + "ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k", + "ext4_groupinfo_64k", "ext4_groupinfo_128k" +}; + static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, ext4_group_t group); static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, @@ -2414,6 +2419,55 @@ err_freesgi: return -ENOMEM; } +static void ext4_groupinfo_destroy_slabs(void) +{ + int i; + + for (i = 0; i < NR_GRPINFO_CACHES; i++) { + if (ext4_groupinfo_caches[i]) + kmem_cache_destroy(ext4_groupinfo_caches[i]); + ext4_groupinfo_caches[i] = NULL; + } +} + +static int ext4_groupinfo_create_slab(size_t size) +{ + static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex); + int slab_size; + int blocksize_bits = order_base_2(size); + int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; + struct kmem_cache *cachep; + + if (cache_index >= NR_GRPINFO_CACHES) + return -EINVAL; + + if (unlikely(cache_index < 0)) + cache_index = 0; + + mutex_lock(&ext4_grpinfo_slab_create_mutex); + if (ext4_groupinfo_caches[cache_index]) { + mutex_unlock(&ext4_grpinfo_slab_create_mutex); + return 0; /* Already created */ + } + + slab_size = offsetof(struct ext4_group_info, + bb_counters[blocksize_bits + 2]); + + cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index], + slab_size, 0, SLAB_RECLAIM_ACCOUNT, + NULL); + + mutex_unlock(&ext4_grpinfo_slab_create_mutex); + if (!cachep) { + printk(KERN_EMERG "EXT4: no memory for groupinfo slab cache\n"); + return -ENOMEM; + } + + ext4_groupinfo_caches[cache_index] = cachep; + + return 0; +} + int ext4_mb_init(struct super_block *sb, int needs_recovery) { struct ext4_sb_info *sbi = EXT4_SB(sb); @@ -2421,9 +2475,6 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery) unsigned offset; unsigned max; int ret; - int cache_index; - struct kmem_cache *cachep; - char *namep = NULL; i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_offsets); @@ -2440,30 +2491,9 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery) goto out; } - cache_index = sb->s_blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; - cachep = ext4_groupinfo_caches[cache_index]; - if (!cachep) { - char name[32]; - int len = offsetof(struct ext4_group_info, - bb_counters[sb->s_blocksize_bits + 2]); - - sprintf(name, "ext4_groupinfo_%d", sb->s_blocksize_bits); - namep = kstrdup(name, GFP_KERNEL); - if (!namep) { - ret = -ENOMEM; - goto out; - } - - /* Need to free the kmem_cache_name() when we - * destroy the slab */ - cachep = kmem_cache_create(namep, len, 0, - SLAB_RECLAIM_ACCOUNT, NULL); - if (!cachep) { - ret = -ENOMEM; - goto out; - } - ext4_groupinfo_caches[cache_index] = cachep; - } + ret = ext4_groupinfo_create_slab(sb->s_blocksize); + if (ret < 0) + goto out; /* order 0 is regular bitmap */ sbi->s_mb_maxs[0] = sb->s_blocksize << 3; @@ -2520,7 +2550,6 @@ out: if (ret) { kfree(sbi->s_mb_offsets); kfree(sbi->s_mb_maxs); - kfree(namep); } return ret; } @@ -2734,7 +2763,6 @@ int __init ext4_init_mballoc(void) void ext4_exit_mballoc(void) { - int i; /* * Wait for completion of call_rcu()'s on ext4_pspace_cachep * before destroying the slab cache. @@ -2743,15 +2771,7 @@ void ext4_exit_mballoc(void) kmem_cache_destroy(ext4_pspace_cachep); kmem_cache_destroy(ext4_ac_cachep); kmem_cache_destroy(ext4_free_ext_cachep); - - for (i = 0; i < NR_GRPINFO_CACHES; i++) { - struct kmem_cache *cachep = ext4_groupinfo_caches[i]; - if (cachep) { - char *name = (char *)kmem_cache_name(cachep); - kmem_cache_destroy(cachep); - kfree(name); - } - } + ext4_groupinfo_destroy_slabs(); ext4_remove_debugfs_entry(); } -- cgit v1.2.2 From e9e3bcecf44c04b9e6b505fd8e2eb9cea58fb94d Mon Sep 17 00:00:00 2001 From: Eric Sandeen Date: Sat, 12 Feb 2011 08:17:34 -0500 Subject: ext4: serialize unaligned asynchronous DIO ext4 has a data corruption case when doing non-block-aligned asynchronous direct IO into a sparse file, as demonstrated by xfstest 240. The root cause is that while ext4 preallocates space in the hole, mappings of that space still look "new" and dio_zero_block() will zero out the unwritten portions. When more than one AIO thread is going, they both find this "new" block and race to zero out their portion; this is uncoordinated and causes data corruption. Dave Chinner fixed this for xfs by simply serializing all unaligned asynchronous direct IO. I've done the same here. The difference is that we only wait on conversions, not all IO. This is a very big hammer, and I'm not very pleased with stuffing this into ext4_file_write(). But since ext4 is DIO_LOCKING, we need to serialize it at this high level. I tried to move this into ext4_ext_direct_IO, but by then we have the i_mutex already, and we will wait on the work queue to do conversions - which must also take the i_mutex. So that won't work. This was originally exposed by qemu-kvm installing to a raw disk image with a normal sector-63 alignment. I've tested a backport of this patch with qemu, and it does avoid the corruption. It is also quite a lot slower (14 min for package installs, vs. 8 min for well-aligned) but I'll take slow correctness over fast corruption any day. Mingming suggested that we can track outstanding conversions, and wait on those so that non-sparse files won't be affected, and I've implemented that here; unaligned AIO to nonsparse files won't take a perf hit. [tytso@mit.edu: Keep the mutex as a hashed array instead of bloating the ext4 inode] [tytso@mit.edu: Fix up namespace issues so that global variables are protected with an "ext4_" prefix.] Signed-off-by: Eric Sandeen Signed-off-by: "Theodore Ts'o" --- fs/ext4/ext4.h | 10 ++++++++++ fs/ext4/extents.c | 10 ++++++---- fs/ext4/file.c | 60 ++++++++++++++++++++++++++++++++++++++++++++++++++++++- fs/ext4/page-io.c | 25 ++++++++++++----------- fs/ext4/super.c | 13 +++++++++++- 5 files changed, 100 insertions(+), 18 deletions(-) (limited to 'fs') diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 0c8d97b56f34..3aa0b72b3b94 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -848,6 +848,7 @@ struct ext4_inode_info { atomic_t i_ioend_count; /* Number of outstanding io_end structs */ /* current io_end structure for async DIO write*/ ext4_io_end_t *cur_aio_dio; + atomic_t i_aiodio_unwritten; /* Nr. of inflight conversions pending */ spinlock_t i_block_reservation_lock; @@ -2119,6 +2120,15 @@ static inline void set_bitmap_uptodate(struct buffer_head *bh) #define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1) +/* For ioend & aio unwritten conversion wait queues */ +#define EXT4_WQ_HASH_SZ 37 +#define ext4_ioend_wq(v) (&ext4__ioend_wq[((unsigned long)(v)) %\ + EXT4_WQ_HASH_SZ]) +#define ext4_aio_mutex(v) (&ext4__aio_mutex[((unsigned long)(v)) %\ + EXT4_WQ_HASH_SZ]) +extern wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ]; +extern struct mutex ext4__aio_mutex[EXT4_WQ_HASH_SZ]; + #endif /* __KERNEL__ */ #endif /* _EXT4_H */ diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 63a75810b7c3..ccce8a7e94ed 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -3174,9 +3174,10 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, * that this IO needs to convertion to written when IO is * completed */ - if (io) + if (io && !(io->flag & EXT4_IO_END_UNWRITTEN)) { io->flag = EXT4_IO_END_UNWRITTEN; - else + atomic_inc(&EXT4_I(inode)->i_aiodio_unwritten); + } else ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); if (ext4_should_dioread_nolock(inode)) map->m_flags |= EXT4_MAP_UNINIT; @@ -3463,9 +3464,10 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, * that we need to perform convertion when IO is done. */ if ((flags & EXT4_GET_BLOCKS_PRE_IO)) { - if (io) + if (io && !(io->flag & EXT4_IO_END_UNWRITTEN)) { io->flag = EXT4_IO_END_UNWRITTEN; - else + atomic_inc(&EXT4_I(inode)->i_aiodio_unwritten); + } else ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); } diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 2e8322c8aa88..7b80d543b89e 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -55,11 +55,47 @@ static int ext4_release_file(struct inode *inode, struct file *filp) return 0; } +static void ext4_aiodio_wait(struct inode *inode) +{ + wait_queue_head_t *wq = ext4_ioend_wq(inode); + + wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_aiodio_unwritten) == 0)); +} + +/* + * This tests whether the IO in question is block-aligned or not. + * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they + * are converted to written only after the IO is complete. Until they are + * mapped, these blocks appear as holes, so dio_zero_block() will assume that + * it needs to zero out portions of the start and/or end block. If 2 AIO + * threads are at work on the same unwritten block, they must be synchronized + * or one thread will zero the other's data, causing corruption. + */ +static int +ext4_unaligned_aio(struct inode *inode, const struct iovec *iov, + unsigned long nr_segs, loff_t pos) +{ + struct super_block *sb = inode->i_sb; + int blockmask = sb->s_blocksize - 1; + size_t count = iov_length(iov, nr_segs); + loff_t final_size = pos + count; + + if (pos >= inode->i_size) + return 0; + + if ((pos & blockmask) || (final_size & blockmask)) + return 1; + + return 0; +} + static ssize_t ext4_file_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode; + int unaligned_aio = 0; + int ret; /* * If we have encountered a bitmap-format file, the size limit @@ -78,9 +114,31 @@ ext4_file_write(struct kiocb *iocb, const struct iovec *iov, nr_segs = iov_shorten((struct iovec *)iov, nr_segs, sbi->s_bitmap_maxbytes - pos); } + } else if (unlikely((iocb->ki_filp->f_flags & O_DIRECT) && + !is_sync_kiocb(iocb))) { + unaligned_aio = ext4_unaligned_aio(inode, iov, nr_segs, pos); } - return generic_file_aio_write(iocb, iov, nr_segs, pos); + /* Unaligned direct AIO must be serialized; see comment above */ + if (unaligned_aio) { + static unsigned long unaligned_warn_time; + + /* Warn about this once per day */ + if (printk_timed_ratelimit(&unaligned_warn_time, 60*60*24*HZ)) + ext4_msg(inode->i_sb, KERN_WARNING, + "Unaligned AIO/DIO on inode %ld by %s; " + "performance will be poor.", + inode->i_ino, current->comm); + mutex_lock(ext4_aio_mutex(inode)); + ext4_aiodio_wait(inode); + } + + ret = generic_file_aio_write(iocb, iov, nr_segs, pos); + + if (unaligned_aio) + mutex_unlock(ext4_aio_mutex(inode)); + + return ret; } static const struct vm_operations_struct ext4_file_vm_ops = { diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index 4e9b0a242f4c..955cc309142f 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c @@ -32,14 +32,8 @@ static struct kmem_cache *io_page_cachep, *io_end_cachep; -#define WQ_HASH_SZ 37 -#define to_ioend_wq(v) (&ioend_wq[((unsigned long)v) % WQ_HASH_SZ]) -static wait_queue_head_t ioend_wq[WQ_HASH_SZ]; - int __init ext4_init_pageio(void) { - int i; - io_page_cachep = KMEM_CACHE(ext4_io_page, SLAB_RECLAIM_ACCOUNT); if (io_page_cachep == NULL) return -ENOMEM; @@ -48,9 +42,6 @@ int __init ext4_init_pageio(void) kmem_cache_destroy(io_page_cachep); return -ENOMEM; } - for (i = 0; i < WQ_HASH_SZ; i++) - init_waitqueue_head(&ioend_wq[i]); - return 0; } @@ -62,7 +53,7 @@ void ext4_exit_pageio(void) void ext4_ioend_wait(struct inode *inode) { - wait_queue_head_t *wq = to_ioend_wq(inode); + wait_queue_head_t *wq = ext4_ioend_wq(inode); wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_ioend_count) == 0)); } @@ -87,7 +78,7 @@ void ext4_free_io_end(ext4_io_end_t *io) for (i = 0; i < io->num_io_pages; i++) put_io_page(io->pages[i]); io->num_io_pages = 0; - wq = to_ioend_wq(io->inode); + wq = ext4_ioend_wq(io->inode); if (atomic_dec_and_test(&EXT4_I(io->inode)->i_ioend_count) && waitqueue_active(wq)) wake_up_all(wq); @@ -102,6 +93,7 @@ int ext4_end_io_nolock(ext4_io_end_t *io) struct inode *inode = io->inode; loff_t offset = io->offset; ssize_t size = io->size; + wait_queue_head_t *wq; int ret = 0; ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p," @@ -126,7 +118,16 @@ int ext4_end_io_nolock(ext4_io_end_t *io) if (io->iocb) aio_complete(io->iocb, io->result, 0); /* clear the DIO AIO unwritten flag */ - io->flag &= ~EXT4_IO_END_UNWRITTEN; + if (io->flag & EXT4_IO_END_UNWRITTEN) { + io->flag &= ~EXT4_IO_END_UNWRITTEN; + /* Wake up anyone waiting on unwritten extent conversion */ + wq = ext4_ioend_wq(io->inode); + if (atomic_dec_and_test(&EXT4_I(inode)->i_aiodio_unwritten) && + waitqueue_active(wq)) { + wake_up_all(wq); + } + } + return ret; } diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 86b05486dc63..f6a318f836b2 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -833,6 +833,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb) ei->i_sync_tid = 0; ei->i_datasync_tid = 0; atomic_set(&ei->i_ioend_count, 0); + atomic_set(&ei->i_aiodio_unwritten, 0); return &ei->vfs_inode; } @@ -4800,11 +4801,21 @@ static void ext4_exit_feat_adverts(void) kfree(ext4_feat); } +/* Shared across all ext4 file systems */ +wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ]; +struct mutex ext4__aio_mutex[EXT4_WQ_HASH_SZ]; + static int __init ext4_init_fs(void) { - int err; + int i, err; ext4_check_flag_values(); + + for (i = 0; i < EXT4_WQ_HASH_SZ; i++) { + mutex_init(&ext4__aio_mutex[i]); + init_waitqueue_head(&ext4__ioend_wq[i]); + } + err = ext4_init_pageio(); if (err) return err; -- cgit v1.2.2 From e44718318004a5618d1dfe2d080e2862532d8e5f Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Sat, 12 Feb 2011 08:18:24 -0500 Subject: jbd2: call __jbd2_log_start_commit with j_state_lock write locked On an SMP ARM system running ext4, I've received a report that the first J_ASSERT in jbd2_journal_commit_transaction has been triggering: J_ASSERT(journal->j_running_transaction != NULL); While investigating possible causes for this problem, I noticed that __jbd2_log_start_commit() is getting called with j_state_lock only read-locked, in spite of the fact that it's possible for it might j_commit_request. Fix this by grabbing the necessary information so we can test to see if we need to start a new transaction before dropping the read lock, and then calling jbd2_log_start_commit() which will grab the write lock. Signed-off-by: "Theodore Ts'o" --- fs/jbd2/journal.c | 9 +++++++-- fs/jbd2/transaction.c | 21 ++++++++++++++------- 2 files changed, 21 insertions(+), 9 deletions(-) (limited to 'fs') diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index 9e4686900f18..97e73469b2c4 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -473,7 +473,8 @@ int __jbd2_log_space_left(journal_t *journal) } /* - * Called under j_state_lock. Returns true if a transaction commit was started. + * Called with j_state_lock locked for writing. + * Returns true if a transaction commit was started. */ int __jbd2_log_start_commit(journal_t *journal, tid_t target) { @@ -520,11 +521,13 @@ int jbd2_journal_force_commit_nested(journal_t *journal) { transaction_t *transaction = NULL; tid_t tid; + int need_to_start = 0; read_lock(&journal->j_state_lock); if (journal->j_running_transaction && !current->journal_info) { transaction = journal->j_running_transaction; - __jbd2_log_start_commit(journal, transaction->t_tid); + if (!tid_geq(journal->j_commit_request, transaction->t_tid)) + need_to_start = 1; } else if (journal->j_committing_transaction) transaction = journal->j_committing_transaction; @@ -535,6 +538,8 @@ int jbd2_journal_force_commit_nested(journal_t *journal) tid = transaction->t_tid; read_unlock(&journal->j_state_lock); + if (need_to_start) + jbd2_log_start_commit(journal, tid); jbd2_log_wait_commit(journal, tid); return 1; } diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index faad2bd787c7..1d1191050f99 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -117,10 +117,10 @@ static inline void update_t_max_wait(transaction_t *transaction) static int start_this_handle(journal_t *journal, handle_t *handle, int gfp_mask) { - transaction_t *transaction; - int needed; - int nblocks = handle->h_buffer_credits; - transaction_t *new_transaction = NULL; + transaction_t *transaction, *new_transaction = NULL; + tid_t tid; + int needed, need_to_start; + int nblocks = handle->h_buffer_credits; if (nblocks > journal->j_max_transaction_buffers) { printk(KERN_ERR "JBD: %s wants too many credits (%d > %d)\n", @@ -222,8 +222,11 @@ repeat: atomic_sub(nblocks, &transaction->t_outstanding_credits); prepare_to_wait(&journal->j_wait_transaction_locked, &wait, TASK_UNINTERRUPTIBLE); - __jbd2_log_start_commit(journal, transaction->t_tid); + tid = transaction->t_tid; + need_to_start = !tid_geq(journal->j_commit_request, tid); read_unlock(&journal->j_state_lock); + if (need_to_start) + jbd2_log_start_commit(journal, tid); schedule(); finish_wait(&journal->j_wait_transaction_locked, &wait); goto repeat; @@ -442,7 +445,8 @@ int jbd2__journal_restart(handle_t *handle, int nblocks, int gfp_mask) { transaction_t *transaction = handle->h_transaction; journal_t *journal = transaction->t_journal; - int ret; + tid_t tid; + int need_to_start, ret; /* If we've had an abort of any type, don't even think about * actually doing the restart! */ @@ -465,8 +469,11 @@ int jbd2__journal_restart(handle_t *handle, int nblocks, int gfp_mask) spin_unlock(&transaction->t_handle_lock); jbd_debug(2, "restarting handle %p\n", handle); - __jbd2_log_start_commit(journal, transaction->t_tid); + tid = transaction->t_tid; + need_to_start = !tid_geq(journal->j_commit_request, tid); read_unlock(&journal->j_state_lock); + if (need_to_start) + jbd2_log_start_commit(journal, tid); lock_map_release(&handle->h_lockdep_map); handle->h_buffer_credits = nblocks; -- cgit v1.2.2 From 541ce98c10111dae7604543dda6c6f7e7a6015d8 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Fri, 14 Jan 2011 20:00:02 -0500 Subject: nfsd: don't leak dentry count on mnt_want_write failure The exit cleanup isn't quite right here. Signed-off-by: J. Bruce Fields --- fs/nfsd/vfs.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 641117f2188d..fda3be237773 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -1812,22 +1812,22 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, host_err = mnt_want_write(fhp->fh_export->ex_path.mnt); if (host_err) - goto out_nfserr; + goto out_put; host_err = nfsd_break_lease(rdentry->d_inode); if (host_err) - goto out_put; + goto out_drop_write; if (type != S_IFDIR) host_err = vfs_unlink(dirp, rdentry); else host_err = vfs_rmdir(dirp, rdentry); -out_put: - dput(rdentry); - if (!host_err) host_err = commit_metadata(fhp); - +out_drop_write: mnt_drop_write(fhp->fh_export->ex_path.mnt); +out_put: + dput(rdentry); + out_nfserr: err = nfserrno(host_err); out: -- cgit v1.2.2 From 0af3f814ccf0a13d3e01e8115b96f1824379fc72 Mon Sep 17 00:00:00 2001 From: Benny Halevy Date: Thu, 13 Jan 2011 11:25:31 +0200 Subject: NFSD: use nfserr for status after decode_cb_op_status Bugs introduced in 85a56480191ca9f08fc775c129b9eb5c8c1f2c05 "NFSD: Update XDR decoders in NFSv4 callback client" Cc: Chuck Lever Signed-off-by: Benny Halevy Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4callback.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index 3be975e18919..cde36cb0f348 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -484,7 +484,7 @@ static int decode_cb_sequence4res(struct xdr_stream *xdr, out: return status; out_default: - return nfs_cb_stat_to_errno(status); + return nfs_cb_stat_to_errno(nfserr); } /* @@ -564,11 +564,9 @@ static int nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp, if (unlikely(status)) goto out; if (unlikely(nfserr != NFS4_OK)) - goto out_default; + status = nfs_cb_stat_to_errno(nfserr); out: return status; -out_default: - return nfs_cb_stat_to_errno(status); } /* -- cgit v1.2.2 From 3aa6e0aa8ab3e64bbfba092c64d42fd1d006b124 Mon Sep 17 00:00:00 2001 From: Konstantin Khorenko Date: Tue, 1 Feb 2011 17:16:29 +0300 Subject: NFSD: memory corruption due to writing beyond the stat array If nfsd fails to find an exported via NFS file in the readahead cache, it should increment corresponding nfsdstats counter (ra_depth[10]), but due to a bug it may instead write to ra_depth[11], corrupting the following field. In a kernel with NFSDv4 compiled in the corruption takes the form of an increment of a counter of the number of NFSv4 operation 0's received; since there is no operation 0, this is harmless. In a kernel with NFSDv4 disabled it corrupts whatever happens to be in the memory beyond nfsdstats. Signed-off-by: Konstantin Khorenko Cc: stable@kernel.org Signed-off-by: J. Bruce Fields --- fs/nfsd/vfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index fda3be237773..30c73f8a5791 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -808,7 +808,7 @@ nfsd_get_raparms(dev_t dev, ino_t ino) if (ra->p_count == 0) frap = rap; } - depth = nfsdstats.ra_size*11/10; + depth = nfsdstats.ra_size; if (!frap) { spin_unlock(&rab->pb_lock); return NULL; -- cgit v1.2.2 From 6b57d9c86d0ab11c091b6db2edff8b5553fd445b Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Mon, 31 Jan 2011 11:54:04 -0500 Subject: nfsd4: split up nfsd_break_deleg_cb We'll be adding some more code here soon. Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4state.c | 34 ++++++++++++++++------------------ 1 file changed, 16 insertions(+), 18 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index d98d0213285d..ceb66170fda3 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -2329,23 +2329,8 @@ nfs4_file_downgrade(struct nfs4_file *fp, unsigned int share_access) nfs4_file_put_access(fp, O_RDONLY); } -/* - * Spawn a thread to perform a recall on the delegation represented - * by the lease (file_lock) - * - * Called from break_lease() with lock_flocks() held. - * Note: we assume break_lease will only call this *once* for any given - * lease. - */ -static -void nfsd_break_deleg_cb(struct file_lock *fl) +static void nfsd_break_one_deleg(struct nfs4_delegation *dp) { - struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner; - - dprintk("NFSD nfsd_break_deleg_cb: dp %p fl %p\n",dp,fl); - if (!dp) - return; - /* We're assuming the state code never drops its reference * without first removing the lease. Since we're in this lease * callback (and since the lease code is serialized by the kernel @@ -2360,15 +2345,28 @@ void nfsd_break_deleg_cb(struct file_lock *fl) /* only place dl_time is set. protected by lock_flocks*/ dp->dl_time = get_seconds(); + nfsd4_cb_recall(dp); +} + +/* + * Called from break_lease() with lock_flocks() held. + * Note: we assume break_lease will only call this *once* for any given + * lease. + */ +static void nfsd_break_deleg_cb(struct file_lock *fl) +{ + struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner; + + BUG_ON(!dp); /* * We don't want the locks code to timeout the lease for us; * we'll remove it ourself if the delegation isn't returned - * in time. + * in time: */ fl->fl_break_time = 0; + nfsd_break_one_deleg(dp); dp->dl_file->fi_had_conflict = true; - nfsd4_cb_recall(dp); } static -- cgit v1.2.2 From 22d38c4c10e8344aa406897d99a35d585d2cb77d Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Mon, 31 Jan 2011 11:55:12 -0500 Subject: nfsd4: add helper function for lease setup Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4state.c | 33 +++++++++++++++++++++------------ 1 file changed, 21 insertions(+), 12 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index ceb66170fda3..65978a9aa877 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -2639,6 +2639,26 @@ static bool nfsd4_cb_channel_good(struct nfs4_client *clp) return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN; } +static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp, int flag) +{ + struct file_lock *fl; + + fl = locks_alloc_lock(); + if (!fl) + return NULL; + locks_init_lock(fl); + fl->fl_lmops = &nfsd_lease_mng_ops; + fl->fl_flags = FL_LEASE; + fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK; + fl->fl_end = OFFSET_MAX; + fl->fl_owner = (fl_owner_t)dp; + fl->fl_file = dp->dl_vfs_file; + BUG_ON(!fl->fl_file); + fl->fl_pid = current->tgid; + dp->dl_flock = fl; + return fl; +} + /* * Attempt to hand out a delegation. */ @@ -2684,20 +2704,9 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta goto out; } status = -ENOMEM; - fl = locks_alloc_lock(); + fl = nfs4_alloc_init_lease(dp, flag); if (!fl) goto out; - locks_init_lock(fl); - fl->fl_lmops = &nfsd_lease_mng_ops; - fl->fl_flags = FL_LEASE; - fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK; - fl->fl_end = OFFSET_MAX; - fl->fl_owner = (fl_owner_t)dp; - fl->fl_file = find_readable_file(stp->st_file); - BUG_ON(!fl->fl_file); - fl->fl_pid = current->tgid; - dp->dl_flock = fl; - /* vfs_setlease checks to see if delegation should be handed out. * the lock_manager callback fl_change is used */ -- cgit v1.2.2 From dd239cc05f0ad9f582dd83d88a4fb5edcc57a026 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Mon, 31 Jan 2011 17:14:55 -0500 Subject: nfsd4: fix leak on allocation error Also share some common exit code. Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4state.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 65978a9aa877..099d6fa64f7f 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -2699,14 +2699,12 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta } dp = alloc_init_deleg(sop->so_client, stp, fh, flag); - if (dp == NULL) { - flag = NFS4_OPEN_DELEGATE_NONE; - goto out; - } + if (dp == NULL) + goto out_no_deleg; status = -ENOMEM; fl = nfs4_alloc_init_lease(dp, flag); if (!fl) - goto out; + goto out_free; /* vfs_setlease checks to see if delegation should be handed out. * the lock_manager callback fl_change is used */ @@ -2714,9 +2712,7 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta dprintk("NFSD: setlease failed [%d], no delegation\n", status); dp->dl_flock = NULL; locks_free_lock(fl); - unhash_delegation(dp); - flag = NFS4_OPEN_DELEGATE_NONE; - goto out; + goto out_free; } memcpy(&open->op_delegate_stateid, &dp->dl_stateid, sizeof(dp->dl_stateid)); @@ -2729,6 +2725,12 @@ out: && open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) dprintk("NFSD: WARNING: refusing delegation reclaim\n"); open->op_delegate_type = flag; + return; +out_free: + unhash_delegation(dp); +out_no_deleg: + flag = NFS4_OPEN_DELEGATE_NONE; + goto out; } /* -- cgit v1.2.2 From edab9782b5a16abb8d139d261e81e13ef0be35a9 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Mon, 31 Jan 2011 17:58:10 -0500 Subject: nfsd4: split lease setting into separate function Splitting some code into a separate function which we'll be adding some more to. Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4state.c | 32 +++++++++++++++++++------------- 1 file changed, 19 insertions(+), 13 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 099d6fa64f7f..dbb2141cf88f 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -2659,6 +2659,23 @@ static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp, int f return fl; } +static int nfs4_setlease(struct nfs4_delegation *dp, int flag) +{ + struct file_lock *fl; + int status; + + fl = nfs4_alloc_init_lease(dp, flag); + if (!fl) + return -ENOMEM; + status = vfs_setlease(dp->dl_vfs_file, fl->fl_type, &fl); + if (status) { + dp->dl_flock = NULL; + locks_free_lock(fl); + return -ENOMEM; + } + return 0; +} + /* * Attempt to hand out a delegation. */ @@ -2668,7 +2685,6 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta struct nfs4_delegation *dp; struct nfs4_stateowner *sop = stp->st_stateowner; int cb_up; - struct file_lock *fl; int status, flag = 0; cb_up = nfsd4_cb_channel_good(sop->so_client); @@ -2701,19 +2717,9 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta dp = alloc_init_deleg(sop->so_client, stp, fh, flag); if (dp == NULL) goto out_no_deleg; - status = -ENOMEM; - fl = nfs4_alloc_init_lease(dp, flag); - if (!fl) - goto out_free; - /* vfs_setlease checks to see if delegation should be handed out. - * the lock_manager callback fl_change is used - */ - if ((status = vfs_setlease(fl->fl_file, fl->fl_type, &fl))) { - dprintk("NFSD: setlease failed [%d], no delegation\n", status); - dp->dl_flock = NULL; - locks_free_lock(fl); + status = nfs4_setlease(dp, flag); + if (status) goto out_free; - } memcpy(&open->op_delegate_stateid, &dp->dl_stateid, sizeof(dp->dl_stateid)); -- cgit v1.2.2 From 65bc58f5187e2ff4011ef1bd3082e83cd1b036f1 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Mon, 7 Feb 2011 15:44:12 -0500 Subject: nfsd4: remove unused deleg dprintk's. These aren't all that useful, and get in the way of the next steps. Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4state.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index dbb2141cf88f..d978192838a3 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -958,8 +958,6 @@ expire_client(struct nfs4_client *clp) spin_lock(&recall_lock); while (!list_empty(&clp->cl_delegations)) { dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt); - dprintk("NFSD: expire client. dp %p, fp %p\n", dp, - dp->dl_flock); list_del_init(&dp->dl_perclnt); list_move(&dp->dl_recall_lru, &reaplist); } @@ -2931,8 +2929,6 @@ nfs4_laundromat(void) test_val = u; break; } - dprintk("NFSD: purging unused delegation dp %p, fp %p\n", - dp, dp->dl_flock); list_move(&dp->dl_recall_lru, &reaplist); } spin_unlock(&recall_lock); -- cgit v1.2.2 From 5d926e8c2f46dc09f4ddde86644a5f1d0726a470 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Mon, 7 Feb 2011 16:53:46 -0500 Subject: nfsd4: modify fi_delegations under recall_lock Modify fi_delegations only under the recall_lock, allowing us to use that list on lease breaks. Also some trivial cleanup to simplify later changes. Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4state.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index d978192838a3..8b6cd3cf4835 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -277,9 +277,9 @@ nfs4_close_delegation(struct nfs4_delegation *dp) static void unhash_delegation(struct nfs4_delegation *dp) { - list_del_init(&dp->dl_perfile); list_del_init(&dp->dl_perclnt); spin_lock(&recall_lock); + list_del_init(&dp->dl_perfile); list_del_init(&dp->dl_recall_lru); spin_unlock(&recall_lock); nfs4_close_delegation(dp); @@ -2336,9 +2336,7 @@ static void nfsd_break_one_deleg(struct nfs4_delegation *dp) * it's safe to take a reference: */ atomic_inc(&dp->dl_count); - spin_lock(&recall_lock); list_add_tail(&dp->dl_recall_lru, &del_recall_lru); - spin_unlock(&recall_lock); /* only place dl_time is set. protected by lock_flocks*/ dp->dl_time = get_seconds(); @@ -2363,8 +2361,10 @@ static void nfsd_break_deleg_cb(struct file_lock *fl) */ fl->fl_break_time = 0; - nfsd_break_one_deleg(dp); + spin_lock(&recall_lock); dp->dl_file->fi_had_conflict = true; + nfsd_break_one_deleg(dp); + spin_unlock(&recall_lock); } static -- cgit v1.2.2 From acfdf5c383b38f7f4dddae41b97c97f1ae058f49 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Mon, 31 Jan 2011 19:20:39 -0500 Subject: nfsd4: acquire only one lease per file Instead of acquiring one lease each time another client opens a file, nfsd can acquire just one lease to represent all of them, and reference count it to determine when to release it. This fixes a regression introduced by c45821d263a8a5109d69a9e8942b8d65bcd5f31a "locks: eliminate fl_mylease callback": after that patch, only the struct file * is used to determine who owns a given lease. But since we recently converted the server to share a single struct file per open, if we acquire multiple leases on the same file from nfsd, it then becomes impossible on unlocking a lease to determine which of those leases (all of whom share the same struct file *) we meant to remove. Thanks to Takashi Iwai for catching a bug in a previous version of this patch. Tested-by: Takashi Iwai Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4state.c | 95 +++++++++++++++++++++++++++++++---------------------- fs/nfsd/state.h | 5 +-- 2 files changed, 58 insertions(+), 42 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 8b6cd3cf4835..54b60bfceb8d 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -230,9 +230,6 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_stateid *stp, struct svc_f dp->dl_client = clp; get_nfs4_file(fp); dp->dl_file = fp; - dp->dl_vfs_file = find_readable_file(fp); - get_file(dp->dl_vfs_file); - dp->dl_flock = NULL; dp->dl_type = type; dp->dl_stateid.si_boot = boot_time; dp->dl_stateid.si_stateownerid = current_delegid++; @@ -241,8 +238,6 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_stateid *stp, struct svc_f fh_copy_shallow(&dp->dl_fh, ¤t_fh->fh_handle); dp->dl_time = 0; atomic_set(&dp->dl_count, 1); - list_add(&dp->dl_perfile, &fp->fi_delegations); - list_add(&dp->dl_perclnt, &clp->cl_delegations); INIT_WORK(&dp->dl_recall.cb_work, nfsd4_do_callback_rpc); return dp; } @@ -253,24 +248,18 @@ nfs4_put_delegation(struct nfs4_delegation *dp) if (atomic_dec_and_test(&dp->dl_count)) { dprintk("NFSD: freeing dp %p\n",dp); put_nfs4_file(dp->dl_file); - fput(dp->dl_vfs_file); kmem_cache_free(deleg_slab, dp); num_delegations--; } } -/* Remove the associated file_lock first, then remove the delegation. - * lease_modify() is called to remove the FS_LEASE file_lock from - * the i_flock list, eventually calling nfsd's lock_manager - * fl_release_callback. - */ -static void -nfs4_close_delegation(struct nfs4_delegation *dp) +static void nfs4_put_deleg_lease(struct nfs4_file *fp) { - dprintk("NFSD: close_delegation dp %p\n",dp); - /* XXX: do we even need this check?: */ - if (dp->dl_flock) - vfs_setlease(dp->dl_vfs_file, F_UNLCK, &dp->dl_flock); + if (atomic_dec_and_test(&fp->fi_delegees)) { + vfs_setlease(fp->fi_deleg_file, F_UNLCK, &fp->fi_lease); + fp->fi_lease = NULL; + fp->fi_deleg_file = NULL; + } } /* Called under the state lock. */ @@ -282,7 +271,7 @@ unhash_delegation(struct nfs4_delegation *dp) list_del_init(&dp->dl_perfile); list_del_init(&dp->dl_recall_lru); spin_unlock(&recall_lock); - nfs4_close_delegation(dp); + nfs4_put_deleg_lease(dp->dl_file); nfs4_put_delegation(dp); } @@ -2076,6 +2065,7 @@ alloc_init_file(struct inode *ino) fp->fi_inode = igrab(ino); fp->fi_id = current_fileid++; fp->fi_had_conflict = false; + fp->fi_lease = NULL; memset(fp->fi_fds, 0, sizeof(fp->fi_fds)); memset(fp->fi_access, 0, sizeof(fp->fi_access)); spin_lock(&recall_lock); @@ -2344,26 +2334,26 @@ static void nfsd_break_one_deleg(struct nfs4_delegation *dp) nfsd4_cb_recall(dp); } -/* - * Called from break_lease() with lock_flocks() held. - * Note: we assume break_lease will only call this *once* for any given - * lease. - */ +/* Called from break_lease() with lock_flocks() held. */ static void nfsd_break_deleg_cb(struct file_lock *fl) { - struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner; + struct nfs4_file *fp = (struct nfs4_file *)fl->fl_owner; + struct nfs4_delegation *dp; - BUG_ON(!dp); + BUG_ON(!fp); + /* We assume break_lease is only called once per lease: */ + BUG_ON(fp->fi_had_conflict); /* * We don't want the locks code to timeout the lease for us; - * we'll remove it ourself if the delegation isn't returned + * we'll remove it ourself if a delegation isn't returned * in time: */ fl->fl_break_time = 0; spin_lock(&recall_lock); - dp->dl_file->fi_had_conflict = true; - nfsd_break_one_deleg(dp); + fp->fi_had_conflict = true; + list_for_each_entry(dp, &fp->fi_delegations, dl_perfile) + nfsd_break_one_deleg(dp); spin_unlock(&recall_lock); } @@ -2455,13 +2445,15 @@ nfs4_check_delegmode(struct nfs4_delegation *dp, int flags) static struct nfs4_delegation * find_delegation_file(struct nfs4_file *fp, stateid_t *stid) { - struct nfs4_delegation *dp; + struct nfs4_delegation *dp = NULL; + spin_lock(&recall_lock); list_for_each_entry(dp, &fp->fi_delegations, dl_perfile) { if (dp->dl_stateid.si_stateownerid == stid->si_stateownerid) - return dp; + break; } - return NULL; + spin_unlock(&recall_lock); + return dp; } int share_access_to_flags(u32 share_access) @@ -2649,28 +2641,51 @@ static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp, int f fl->fl_flags = FL_LEASE; fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK; fl->fl_end = OFFSET_MAX; - fl->fl_owner = (fl_owner_t)dp; - fl->fl_file = dp->dl_vfs_file; - BUG_ON(!fl->fl_file); + fl->fl_owner = (fl_owner_t)(dp->dl_file); fl->fl_pid = current->tgid; - dp->dl_flock = fl; return fl; } static int nfs4_setlease(struct nfs4_delegation *dp, int flag) { + struct nfs4_file *fp = dp->dl_file; struct file_lock *fl; int status; fl = nfs4_alloc_init_lease(dp, flag); if (!fl) return -ENOMEM; - status = vfs_setlease(dp->dl_vfs_file, fl->fl_type, &fl); + fl->fl_file = find_readable_file(fp); + list_add(&dp->dl_perclnt, &dp->dl_client->cl_delegations); + status = vfs_setlease(fl->fl_file, fl->fl_type, &fl); if (status) { - dp->dl_flock = NULL; + list_del_init(&dp->dl_perclnt); locks_free_lock(fl); return -ENOMEM; } + fp->fi_lease = fl; + fp->fi_deleg_file = fl->fl_file; + get_file(fp->fi_deleg_file); + atomic_set(&fp->fi_delegees, 1); + list_add(&dp->dl_perfile, &fp->fi_delegations); + return 0; +} + +static int nfs4_set_delegation(struct nfs4_delegation *dp, int flag) +{ + struct nfs4_file *fp = dp->dl_file; + + if (!fp->fi_lease) + return nfs4_setlease(dp, flag); + spin_lock(&recall_lock); + if (fp->fi_had_conflict) { + spin_unlock(&recall_lock); + return -EAGAIN; + } + atomic_inc(&fp->fi_delegees); + list_add(&dp->dl_perfile, &fp->fi_delegations); + spin_unlock(&recall_lock); + list_add(&dp->dl_perclnt, &dp->dl_client->cl_delegations); return 0; } @@ -2715,7 +2730,7 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta dp = alloc_init_deleg(sop->so_client, stp, fh, flag); if (dp == NULL) goto out_no_deleg; - status = nfs4_setlease(dp, flag); + status = nfs4_set_delegation(dp, flag); if (status) goto out_free; @@ -2731,7 +2746,7 @@ out: open->op_delegate_type = flag; return; out_free: - unhash_delegation(dp); + nfs4_put_delegation(dp); out_no_deleg: flag = NFS4_OPEN_DELEGATE_NONE; goto out; @@ -3139,7 +3154,7 @@ nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate, goto out; renew_client(dp->dl_client); if (filpp) { - *filpp = find_readable_file(dp->dl_file); + *filpp = dp->dl_file->fi_deleg_file; BUG_ON(!*filpp); } } else { /* open or lock stateid */ diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h index 3074656ba7bf..2d31224b07bf 100644 --- a/fs/nfsd/state.h +++ b/fs/nfsd/state.h @@ -83,8 +83,6 @@ struct nfs4_delegation { atomic_t dl_count; /* ref count */ struct nfs4_client *dl_client; struct nfs4_file *dl_file; - struct file *dl_vfs_file; - struct file_lock *dl_flock; u32 dl_type; time_t dl_time; /* For recall: */ @@ -379,6 +377,9 @@ struct nfs4_file { */ atomic_t fi_readers; atomic_t fi_writers; + struct file *fi_deleg_file; + struct file_lock *fi_lease; + atomic_t fi_delegees; struct inode *fi_inode; u32 fi_id; /* used with stateowner->so_id * for stateid_hashtbl hash */ -- cgit v1.2.2 From 83f6b0c18204f68961f58b9f69e5dba0d36056a2 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Sun, 6 Feb 2011 16:46:30 -0500 Subject: nfsd: break lease on unlink due to rename 4795bb37effb7b8fe77e2d2034545d062d3788a8 "nfsd: break lease on unlink, link, and rename", only broke the lease on the file that was being renamed, and didn't handle the case where the target path refers to an already-existing file that will be unlinked by a rename--in that case the target file should have any leases broken as well. Signed-off-by: J. Bruce Fields --- fs/nfsd/vfs.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'fs') diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 30c73f8a5791..da1d9701f8e4 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -1742,6 +1742,13 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen, goto out_dput_new; host_err = nfsd_break_lease(odentry->d_inode); + if (host_err) + goto out_drop_write; + if (ndentry->d_inode) { + host_err = nfsd_break_lease(ndentry->d_inode); + if (host_err) + goto out_drop_write; + } if (host_err) goto out_drop_write; host_err = vfs_rename(fdir, odentry, tdir, ndentry); -- cgit v1.2.2 From eb14ab8ed24a0405fd056068b28c33a1cd846024 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Thu, 10 Feb 2011 12:35:00 -0500 Subject: Btrfs: fix page->private races There is a race where btrfs_releasepage can drop the page->private contents just as alloc_extent_buffer is setting up pages for metadata. Because of how the Btrfs page flags work, this results in us skipping the crc on the page during IO. This patch sovles the race by waiting until after the extent buffer is inserted into the radix tree before it sets page private. Signed-off-by: Chris Mason --- fs/btrfs/disk-io.c | 8 ++++++-- fs/btrfs/extent_io.c | 38 +++++++++++++++++++++++++++++++++++--- 2 files changed, 41 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index b36eeef19194..3e1ea3e0477e 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -359,10 +359,14 @@ static int csum_dirty_buffer(struct btrfs_root *root, struct page *page) tree = &BTRFS_I(page->mapping->host)->io_tree; - if (page->private == EXTENT_PAGE_PRIVATE) + if (page->private == EXTENT_PAGE_PRIVATE) { + WARN_ON(1); goto out; - if (!page->private) + } + if (!page->private) { + WARN_ON(1); goto out; + } len = page->private >> 2; WARN_ON(len == 0); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 8862dda46ff6..0418bf2c9757 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1946,6 +1946,7 @@ void set_page_extent_mapped(struct page *page) static void set_page_extent_head(struct page *page, unsigned long len) { + WARN_ON(!PagePrivate(page)); set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2); } @@ -3195,7 +3196,13 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, } if (!PageUptodate(p)) uptodate = 0; - unlock_page(p); + + /* + * see below about how we avoid a nasty race with release page + * and why we unlock later + */ + if (i != 0) + unlock_page(p); } if (uptodate) set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); @@ -3219,9 +3226,26 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, atomic_inc(&eb->refs); spin_unlock(&tree->buffer_lock); radix_tree_preload_end(); + + /* + * there is a race where release page may have + * tried to find this extent buffer in the radix + * but failed. It will tell the VM it is safe to + * reclaim the, and it will clear the page private bit. + * We must make sure to set the page private bit properly + * after the extent buffer is in the radix tree so + * it doesn't get lost + */ + set_page_extent_mapped(eb->first_page); + set_page_extent_head(eb->first_page, eb->len); + if (!page0) + unlock_page(eb->first_page); return eb; free_eb: + if (eb->first_page && !page0) + unlock_page(eb->first_page); + if (!atomic_dec_and_test(&eb->refs)) return exists; btrfs_release_extent_buffer(eb); @@ -3272,10 +3296,11 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree, continue; lock_page(page); + WARN_ON(!PagePrivate(page)); + + set_page_extent_mapped(page); if (i == 0) set_page_extent_head(page, eb->len); - else - set_page_private(page, EXTENT_PAGE_PRIVATE); clear_page_dirty_for_io(page); spin_lock_irq(&page->mapping->tree_lock); @@ -3465,6 +3490,13 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, for (i = start_i; i < num_pages; i++) { page = extent_buffer_page(eb, i); + + WARN_ON(!PagePrivate(page)); + + set_page_extent_mapped(page); + if (i == 0) + set_page_extent_head(page, eb->len); + if (inc_all_pages) page_cache_get(page); if (!PageUptodate(page)) { -- cgit v1.2.2 From e3f24cc521cb7ba60ac137abd1939e4e03435e80 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Mon, 14 Feb 2011 12:52:08 -0500 Subject: Btrfs: don't release pages when we can't clear the uptodate bits Btrfs tracks uptodate state in an rbtree as well as in the page bits. This is supposed to enable us to use block sizes other than the page size, but there are a few parts still missing before that completely works. But, our readpage routine trusts this additional range based tracking of uptodateness, much in the same way the buffer head up to date bits are trusted for the other filesystems. The problem is that sometimes we need to allocate memory in order to split records in the rbtree, even when we are just clearing bits. This can be difficult when our clearing function is called GFP_ATOMIC, which can happen in the releasepage path. So, what happens today looks like this: releasepage called with GFP_ATOMIC btrfs_releasepage calls clear_extent_bit clear_extent_bit fails to allocate ram, leaving the up to date bit set btrfs_releasepage returns success The end result is the page being gone, but btrfs thinking the range is up to date. Later on if someone tries to read that same page, the btrfs readpage code will return immediately thinking the page is already up to date. This commit fixes things to fail the releasepage when we can't clear the extent state bits. It covers both data pages and metadata tree blocks. Signed-off-by: Chris Mason --- fs/btrfs/extent_io.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 0418bf2c9757..e7aeba242701 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2822,9 +2822,17 @@ int try_release_extent_state(struct extent_map_tree *map, * at this point we can safely clear everything except the * locked bit and the nodatasum bit */ - clear_extent_bit(tree, start, end, + ret = clear_extent_bit(tree, start, end, ~(EXTENT_LOCKED | EXTENT_NODATASUM), 0, 0, NULL, mask); + + /* if clear_extent_bit failed for enomem reasons, + * we can't allow the release to continue. + */ + if (ret < 0) + ret = 0; + else + ret = 1; } return ret; } -- cgit v1.2.2 From 6848ad6461e551849ba3c32d945d4f45e96453a6 Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Mon, 14 Feb 2011 16:00:03 -0500 Subject: Btrfs: Fix balance panic Mark the cloned backref_node as checked in clone_backref_node() Signed-off-by: Yan, Zheng Signed-off-by: Chris Mason --- fs/btrfs/relocation.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 1f5556acb530..0825e4ed9447 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -1157,6 +1157,7 @@ static int clone_backref_node(struct btrfs_trans_handle *trans, new_node->bytenr = dest->node->start; new_node->level = node->level; new_node->lowest = node->lowest; + new_node->checked = 1; new_node->root = dest; if (!node->lowest) { -- cgit v1.2.2 From 51788b1bdd0d68345bab0af4301e7fa429277228 Mon Sep 17 00:00:00 2001 From: Dan Rosenberg Date: Mon, 14 Feb 2011 16:04:23 -0500 Subject: btrfs: prevent heap corruption in btrfs_ioctl_space_info() Commit bf5fc093c5b625e4259203f1cee7ca73488a5620 refactored btrfs_ioctl_space_info() and introduced several security issues. space_args.space_slots is an unsigned 64-bit type controlled by a possibly unprivileged caller. The comparison as a signed int type allows providing values that are treated as negative and cause the subsequent allocation size calculation to wrap, or be truncated to 0. By providing a size that's truncated to 0, kmalloc() will return ZERO_SIZE_PTR. It's also possible to provide a value smaller than the slot count. The subsequent loop ignores the allocation size when copying data in, resulting in a heap overflow or write to ZERO_SIZE_PTR. The fix changes the slot count type and comparison typecast to u64, which prevents truncation or signedness errors, and also ensures that we don't copy more data than we've allocated in the subsequent loop. Note that zero-size allocations are no longer possible since there is already an explicit check for space_args.space_slots being 0 and truncation of this value is no longer an issue. Signed-off-by: Dan Rosenberg Signed-off-by: Josef Bacik Reviewed-by: Josef Bacik Signed-off-by: Chris Mason --- fs/btrfs/ioctl.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 02d224e8c83f..be2d4f6aaa5e 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -2208,7 +2208,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg) int num_types = 4; int alloc_size; int ret = 0; - int slot_count = 0; + u64 slot_count = 0; int i, c; if (copy_from_user(&space_args, @@ -2247,7 +2247,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg) goto out; } - slot_count = min_t(int, space_args.space_slots, slot_count); + slot_count = min_t(u64, space_args.space_slots, slot_count); alloc_size = sizeof(*dest) * slot_count; @@ -2267,6 +2267,9 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg) for (i = 0; i < num_types; i++) { struct btrfs_space_info *tmp; + if (!slot_count) + break; + info = NULL; rcu_read_lock(); list_for_each_entry_rcu(tmp, &root->fs_info->space_info, @@ -2288,7 +2291,10 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg) memcpy(dest, &space, sizeof(space)); dest++; space_args.total_spaces++; + slot_count--; } + if (!slot_count) + break; } up_read(&info->groups_sem); } -- cgit v1.2.2 From 67100f255dba284bcbb5ce795355dad1cff35658 Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Sun, 6 Feb 2011 19:58:21 +0000 Subject: Btrfs - Fix memory leak in btrfs_init_new_device() Memory allocated by calling kstrdup() should be freed. Signed-off-by: Ilya Dryomov Signed-off-by: Chris Mason --- fs/btrfs/volumes.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs') diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 7cad59353b09..dadaaa8005c8 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1603,12 +1603,14 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) ret = find_next_devid(root, &device->devid); if (ret) { + kfree(device->name); kfree(device); goto error; } trans = btrfs_start_transaction(root, 0); if (IS_ERR(trans)) { + kfree(device->name); kfree(device); ret = PTR_ERR(trans); goto error; -- cgit v1.2.2 From c26a920373a983b52223eed5a13b97404d8b4158 Mon Sep 17 00:00:00 2001 From: Tsutomu Itoh Date: Mon, 14 Feb 2011 00:45:29 +0000 Subject: Btrfs: check return value of alloc_extent_map() I add the check on the return value of alloc_extent_map() to several places. In addition, alloc_extent_map() returns only the address or NULL. Therefore, check by IS_ERR() is unnecessary. So, I remove IS_ERR() checking. Signed-off-by: Tsutomu Itoh Signed-off-by: Chris Mason --- fs/btrfs/extent-tree.c | 2 +- fs/btrfs/extent_map.c | 4 ++-- fs/btrfs/file.c | 1 + fs/btrfs/inode.c | 3 +++ 4 files changed, 7 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 565e22d77b1b..a7aaa10c5302 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -6584,7 +6584,7 @@ static noinline int relocate_data_extent(struct inode *reloc_inode, u64 end = start + extent_key->offset - 1; em = alloc_extent_map(GFP_NOFS); - BUG_ON(!em || IS_ERR(em)); + BUG_ON(!em); em->start = start; em->len = extent_key->offset; diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index b0e1fce12530..2b6c12e983b3 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c @@ -51,8 +51,8 @@ struct extent_map *alloc_extent_map(gfp_t mask) { struct extent_map *em; em = kmem_cache_alloc(extent_map_cache, mask); - if (!em || IS_ERR(em)) - return em; + if (!em) + return NULL; em->in_tree = 0; em->flags = 0; em->compress_type = BTRFS_COMPRESS_NONE; diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index b0ff34b96607..65338a1d14ad 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -185,6 +185,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, split = alloc_extent_map(GFP_NOFS); if (!split2) split2 = alloc_extent_map(GFP_NOFS); + BUG_ON(!split || !split2); write_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, start, len); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index c9bc0afdbfc6..8d392ed73d57 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -644,6 +644,7 @@ retry: async_extent->ram_size - 1, 0); em = alloc_extent_map(GFP_NOFS); + BUG_ON(!em); em->start = async_extent->start; em->len = async_extent->ram_size; em->orig_start = em->start; @@ -820,6 +821,7 @@ static noinline int cow_file_range(struct inode *inode, BUG_ON(ret); em = alloc_extent_map(GFP_NOFS); + BUG_ON(!em); em->start = start; em->orig_start = em->start; ram_size = ins.offset; @@ -1169,6 +1171,7 @@ out_check: struct extent_map_tree *em_tree; em_tree = &BTRFS_I(inode)->extent_tree; em = alloc_extent_map(GFP_NOFS); + BUG_ON(!em); em->start = cur_offset; em->orig_start = em->start; em->len = num_bytes; -- cgit v1.2.2 From 844a391799c25d9ba85cbce33e4697db06083ec6 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 15 Feb 2011 00:38:26 -0500 Subject: nothing in do_follow_link() is going to see RCU Signed-off-by: Al Viro --- fs/namei.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index ec4b2d0190a8..9ce6d272f4f2 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -668,9 +668,6 @@ force_reval_path(struct path *path, struct nameidata *nd) return 0; if (!status) { - /* Don't d_invalidate in rcu-walk mode */ - if (nameidata_drop_rcu(nd)) - return -ECHILD; d_invalidate(dentry); status = -ESTALE; } @@ -777,6 +774,8 @@ __do_follow_link(const struct path *link, struct nameidata *nd, void **p) int error; struct dentry *dentry = link->dentry; + BUG_ON(nd->flags & LOOKUP_RCU); + touch_atime(link->mnt, dentry); nd_set_link(nd, NULL); @@ -811,6 +810,11 @@ static inline int do_follow_link(struct path *path, struct nameidata *nd) { void *cookie; int err = -ELOOP; + + /* We drop rcu-walk here */ + if (nameidata_dentry_drop_rcu_maybe(nd, path->dentry)) + return -ECHILD; + if (current->link_count >= MAX_NESTED_LINKS) goto loop; if (current->total_link_count >= 40) @@ -1419,9 +1423,6 @@ exec_again: goto out_dput; if (inode->i_op->follow_link) { - /* We commonly drop rcu-walk here */ - if (nameidata_dentry_drop_rcu_maybe(nd, next.dentry)) - return -ECHILD; BUG_ON(inode != next.dentry->d_inode); err = do_follow_link(&next, nd); if (err) @@ -1467,8 +1468,6 @@ last_component: break; if (inode && unlikely(inode->i_op->follow_link) && (lookup_flags & LOOKUP_FOLLOW)) { - if (nameidata_dentry_drop_rcu_maybe(nd, next.dentry)) - return -ECHILD; BUG_ON(inode != next.dentry->d_inode); err = do_follow_link(&next, nd); if (err) -- cgit v1.2.2 From 24643087e748bf192f1182766716e522dc1c972f Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 15 Feb 2011 01:26:22 -0500 Subject: in do_lookup() split RCU and non-RCU cases of need_revalidate and use unlikely() instead of gotos, for fsck sake... Signed-off-by: Al Viro --- fs/namei.c | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index 9ce6d272f4f2..7609bacc7046 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1259,9 +1259,15 @@ static int do_lookup(struct nameidata *nd, struct qstr *name, return -ECHILD; nd->seq = seq; - if (dentry->d_flags & DCACHE_OP_REVALIDATE) - goto need_revalidate; -done2: + if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) { + dentry = do_revalidate(dentry, nd); + if (!dentry) + goto need_lookup; + if (IS_ERR(dentry)) + goto fail; + if (!(nd->flags & LOOKUP_RCU)) + goto done; + } path->mnt = mnt; path->dentry = dentry; if (likely(__follow_mount_rcu(nd, path, inode, false))) @@ -1274,8 +1280,13 @@ done2: if (!dentry) goto need_lookup; found: - if (dentry->d_flags & DCACHE_OP_REVALIDATE) - goto need_revalidate; + if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) { + dentry = do_revalidate(dentry, nd); + if (!dentry) + goto need_lookup; + if (IS_ERR(dentry)) + goto fail; + } done: path->mnt = mnt; path->dentry = dentry; @@ -1317,16 +1328,6 @@ need_lookup: mutex_unlock(&dir->i_mutex); goto found; -need_revalidate: - dentry = do_revalidate(dentry, nd); - if (!dentry) - goto need_lookup; - if (IS_ERR(dentry)) - goto fail; - if (nd->flags & LOOKUP_RCU) - goto done2; - goto done; - fail: return PTR_ERR(dentry); } -- cgit v1.2.2 From f5e1c1c1afc1d979e2ac6a24cc99ba7143639f4d Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 15 Feb 2011 01:32:55 -0500 Subject: split do_revalidate() into RCU and non-RCU cases fixing oopsen in lookup_one_len() Signed-off-by: Al Viro --- fs/namei.c | 47 ++++++++++++++++++++++++++++++----------------- 1 file changed, 30 insertions(+), 17 deletions(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index 7609bacc7046..a98f7f141780 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -592,12 +592,10 @@ static int d_revalidate(struct dentry *dentry, struct nameidata *nd) return status; } -static inline struct dentry * +static struct dentry * do_revalidate(struct dentry *dentry, struct nameidata *nd) { - int status; - - status = d_revalidate(dentry, nd); + int status = d_revalidate(dentry, nd); if (unlikely(status <= 0)) { /* * The dentry failed validation. @@ -606,24 +604,39 @@ do_revalidate(struct dentry *dentry, struct nameidata *nd) * to return a fail status. */ if (status < 0) { - /* If we're in rcu-walk, we don't have a ref */ - if (!(nd->flags & LOOKUP_RCU)) - dput(dentry); + dput(dentry); dentry = ERR_PTR(status); - - } else { - /* Don't d_invalidate in rcu-walk mode */ - if (nameidata_dentry_drop_rcu_maybe(nd, dentry)) - return ERR_PTR(-ECHILD); - if (!d_invalidate(dentry)) { - dput(dentry); - dentry = NULL; - } + } else if (!d_invalidate(dentry)) { + dput(dentry); + dentry = NULL; } } return dentry; } +static inline struct dentry * +do_revalidate_rcu(struct dentry *dentry, struct nameidata *nd) +{ + int status = dentry->d_op->d_revalidate(dentry, nd); + if (likely(status > 0)) + return dentry; + if (status == -ECHILD) { + if (nameidata_dentry_drop_rcu(nd, dentry)) + return ERR_PTR(-ECHILD); + return do_revalidate(dentry, nd); + } + if (status < 0) + return ERR_PTR(status); + /* Don't d_invalidate in rcu-walk mode */ + if (nameidata_dentry_drop_rcu(nd, dentry)) + return ERR_PTR(-ECHILD); + if (!d_invalidate(dentry)) { + dput(dentry); + dentry = NULL; + } + return dentry; +} + static inline int need_reval_dot(struct dentry *dentry) { if (likely(!(dentry->d_flags & DCACHE_OP_REVALIDATE))) @@ -1260,7 +1273,7 @@ static int do_lookup(struct nameidata *nd, struct qstr *name, nd->seq = seq; if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) { - dentry = do_revalidate(dentry, nd); + dentry = do_revalidate_rcu(dentry, nd); if (!dentry) goto need_lookup; if (IS_ERR(dentry)) -- cgit v1.2.2 From f60aef7ec625236a6366722bb1be7b37596bf0ae Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 15 Feb 2011 01:35:28 -0500 Subject: drop out of RCU in return_reval ... thus killing the need to handle drop-from-RCU in d_revalidate() Signed-off-by: Al Viro --- fs/namei.c | 25 ++++++------------------- 1 file changed, 6 insertions(+), 19 deletions(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index a98f7f141780..10635d329175 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -571,25 +571,9 @@ void release_open_intent(struct nameidata *nd) } } -/* - * Call d_revalidate and handle filesystems that request rcu-walk - * to be dropped. This may be called and return in rcu-walk mode, - * regardless of success or error. If -ECHILD is returned, the caller - * must return -ECHILD back up the path walk stack so path walk may - * be restarted in ref-walk mode. - */ -static int d_revalidate(struct dentry *dentry, struct nameidata *nd) +static inline int d_revalidate(struct dentry *dentry, struct nameidata *nd) { - int status; - - status = dentry->d_op->d_revalidate(dentry, nd); - if (status == -ECHILD) { - if (nameidata_dentry_drop_rcu(nd, dentry)) - return status; - status = dentry->d_op->d_revalidate(dentry, nd); - } - - return status; + return dentry->d_op->d_revalidate(dentry, nd); } static struct dentry * @@ -617,7 +601,7 @@ do_revalidate(struct dentry *dentry, struct nameidata *nd) static inline struct dentry * do_revalidate_rcu(struct dentry *dentry, struct nameidata *nd) { - int status = dentry->d_op->d_revalidate(dentry, nd); + int status = d_revalidate(dentry, nd); if (likely(status > 0)) return dentry; if (status == -ECHILD) { @@ -1517,12 +1501,15 @@ return_reval: * We may need to check the cached dentry for staleness. */ if (need_reval_dot(nd->path.dentry)) { + if (nameidata_drop_rcu_last_maybe(nd)) + return -ECHILD; /* Note: we do not d_invalidate() */ err = d_revalidate(nd->path.dentry, nd); if (!err) err = -ESTALE; if (err < 0) break; + return 0; } return_base: if (nameidata_drop_rcu_last_maybe(nd)) -- cgit v1.2.2 From 4e924a4f53a0e1ea060bd50695a12a238b250322 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 15 Feb 2011 01:42:59 -0500 Subject: get rid of nameidata_dentry_drop_rcu() calling nameidata_drop_rcu() can't happen anymore and didn't work right anyway Signed-off-by: Al Viro --- fs/namei.c | 8 -------- 1 file changed, 8 deletions(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index 10635d329175..9e701e28a329 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -455,14 +455,6 @@ static int nameidata_dentry_drop_rcu(struct nameidata *nd, struct dentry *dentry struct fs_struct *fs = current->fs; struct dentry *parent = nd->path.dentry; - /* - * It can be possible to revalidate the dentry that we started - * the path walk with. force_reval_path may also revalidate the - * dentry already committed to the nameidata. - */ - if (unlikely(parent == dentry)) - return nameidata_drop_rcu(nd); - BUG_ON(!(nd->flags & LOOKUP_RCU)); if (nd->root.mnt) { spin_lock(&fs->lock); -- cgit v1.2.2 From 261cd298a8c363d7985e3482946edb4bfedacf98 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Tue, 15 Feb 2011 09:43:32 +0100 Subject: s390: remove task_show_regs task_show_regs used to be a debugging aid in the early bringup days of Linux on s390. /proc//status is a world readable file, it is not a good idea to show the registers of a process. The only correct fix is to remove task_show_regs. Reported-by: Al Viro Signed-off-by: Martin Schwidefsky Signed-off-by: Linus Torvalds --- fs/proc/array.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'fs') diff --git a/fs/proc/array.c b/fs/proc/array.c index df2b703b9d0f..7c99c1cf7e5c 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -353,9 +353,6 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns, task_cap(m, task); task_cpus_allowed(m, task); cpuset_task_status_allowed(m, task); -#if defined(CONFIG_S390) - task_show_regs(m, task); -#endif task_context_switch_counts(m, task); return 0; } -- cgit v1.2.2 From 58a69cb47ec6991bf006a3e5d202e8571b0327a4 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 16 Feb 2011 09:25:31 +0100 Subject: workqueue, freezer: unify spelling of 'freeze' + 'able' to 'freezable' There are two spellings in use for 'freeze' + 'able' - 'freezable' and 'freezeable'. The former is the more prominent one. The latter is mostly used by workqueue and in a few other odd places. Unify the spelling to 'freezable'. Signed-off-by: Tejun Heo Reported-by: Alan Stern Acked-by: "Rafael J. Wysocki" Acked-by: Greg Kroah-Hartman Acked-by: Dmitry Torokhov Cc: David Woodhouse Cc: Alex Dubov Cc: "David S. Miller" Cc: Steven Whitehouse --- fs/gfs2/glock.c | 4 ++-- fs/gfs2/main.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 08a8beb152e6..7cd9a5a68d59 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -1779,11 +1779,11 @@ int __init gfs2_glock_init(void) #endif glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM | - WQ_HIGHPRI | WQ_FREEZEABLE, 0); + WQ_HIGHPRI | WQ_FREEZABLE, 0); if (IS_ERR(glock_workqueue)) return PTR_ERR(glock_workqueue); gfs2_delete_workqueue = alloc_workqueue("delete_workqueue", - WQ_MEM_RECLAIM | WQ_FREEZEABLE, + WQ_MEM_RECLAIM | WQ_FREEZABLE, 0); if (IS_ERR(gfs2_delete_workqueue)) { destroy_workqueue(glock_workqueue); diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c index ebef7ab6e17e..85ba027d1c4d 100644 --- a/fs/gfs2/main.c +++ b/fs/gfs2/main.c @@ -144,7 +144,7 @@ static int __init init_gfs2_fs(void) error = -ENOMEM; gfs_recovery_wq = alloc_workqueue("gfs_recovery", - WQ_MEM_RECLAIM | WQ_FREEZEABLE, 0); + WQ_MEM_RECLAIM | WQ_FREEZABLE, 0); if (!gfs_recovery_wq) goto fail_wq; -- cgit v1.2.2 From 3abb17e82f08628b59e20d8cbcb55e2204180f69 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Wed, 16 Feb 2011 08:56:55 -0800 Subject: vfs: fix BUG_ON() in fs/namei.c:1461 When Al moved the nameidata_dentry_drop_rcu_maybe() call into the do_follow_link function in commit 844a391799c2 ("nothing in do_follow_link() is going to see RCU"), he mistakenly left the BUG_ON(inode != path->dentry->d_inode); behind. Which would otherwise be ok, but that BUG_ON() really needs to be _after_ dropping RCU, since the dentry isn't necessarily stable otherwise. So complete the code movement in that commit, and move the BUG_ON() into do_follow_link() too. This means that we need to pass in 'inode' as an argument (just for this one use), but that's a small thing. And eventually we may be confident enough in our path lookup that we can just remove the BUG_ON() and the unnecessary inode argument. Reported-and-tested-by: Eric Dumazet Acked-by: Al Viro Signed-off-by: Linus Torvalds --- fs/namei.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/namei.c b/fs/namei.c index 9e701e28a329..0087cf9c2c6b 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -795,7 +795,7 @@ __do_follow_link(const struct path *link, struct nameidata *nd, void **p) * Without that kind of total limit, nasty chains of consecutive * symlinks can cause almost arbitrarily long lookups. */ -static inline int do_follow_link(struct path *path, struct nameidata *nd) +static inline int do_follow_link(struct inode *inode, struct path *path, struct nameidata *nd) { void *cookie; int err = -ELOOP; @@ -803,6 +803,7 @@ static inline int do_follow_link(struct path *path, struct nameidata *nd) /* We drop rcu-walk here */ if (nameidata_dentry_drop_rcu_maybe(nd, path->dentry)) return -ECHILD; + BUG_ON(inode != path->dentry->d_inode); if (current->link_count >= MAX_NESTED_LINKS) goto loop; @@ -1413,8 +1414,7 @@ exec_again: goto out_dput; if (inode->i_op->follow_link) { - BUG_ON(inode != next.dentry->d_inode); - err = do_follow_link(&next, nd); + err = do_follow_link(inode, &next, nd); if (err) goto return_err; nd->inode = nd->path.dentry->d_inode; @@ -1458,8 +1458,7 @@ last_component: break; if (inode && unlikely(inode->i_op->follow_link) && (lookup_flags & LOOKUP_FOLLOW)) { - BUG_ON(inode != next.dentry->d_inode); - err = do_follow_link(&next, nd); + err = do_follow_link(inode, &next, nd); if (err) goto return_err; nd->inode = nd->path.dentry->d_inode; -- cgit v1.2.2 From 91435650c233b93e0da389db74f4b2c11c5ad2d4 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Wed, 16 Feb 2011 13:10:41 -0500 Subject: Btrfs: put ENOSPC debugging under a mount option ENOSPC in btrfs is getting to the point where the extra debugging isn't required. I've put it under mount -o enospc_debug just in case someone is having difficult problems. Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 1 + fs/btrfs/extent-tree.c | 2 +- fs/btrfs/super.c | 7 ++++++- 3 files changed, 8 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 72195378bef9..6297701bc19c 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1254,6 +1254,7 @@ struct btrfs_root { #define BTRFS_MOUNT_SPACE_CACHE (1 << 12) #define BTRFS_MOUNT_CLEAR_CACHE (1 << 13) #define BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED (1 << 14) +#define BTRFS_MOUNT_ENOSPC_DEBUG (1 << 15) #define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt) #define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index a7aaa10c5302..d375fc04a065 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -5377,7 +5377,7 @@ again: num_bytes, data, 1); goto again; } - if (ret == -ENOSPC) { + if (ret == -ENOSPC && btrfs_test_opt(root, ENOSPC_DEBUG)) { struct btrfs_space_info *sinfo; sinfo = __find_space_info(root->fs_info, data); diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 0209b5fc772c..db0a827252bd 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -155,7 +155,8 @@ enum { Opt_nossd, Opt_ssd_spread, Opt_thread_pool, Opt_noacl, Opt_compress, Opt_compress_type, Opt_compress_force, Opt_compress_force_type, Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard, - Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed, Opt_err, + Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed, + Opt_enospc_debug, Opt_err, }; static match_table_t tokens = { @@ -184,6 +185,7 @@ static match_table_t tokens = { {Opt_space_cache, "space_cache"}, {Opt_clear_cache, "clear_cache"}, {Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"}, + {Opt_enospc_debug, "enospc_debug"}, {Opt_err, NULL}, }; @@ -358,6 +360,9 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) case Opt_user_subvol_rm_allowed: btrfs_set_opt(info->mount_opt, USER_SUBVOL_RM_ALLOWED); break; + case Opt_enospc_debug: + btrfs_set_opt(info->mount_opt, ENOSPC_DEBUG); + break; case Opt_err: printk(KERN_INFO "btrfs: unrecognized mount option " "'%s'\n", p); -- cgit v1.2.2 From c87f08ca44e83b2c8d28f63f9c33f3a270a04bbe Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Wed, 16 Feb 2011 13:57:04 -0500 Subject: Btrfs: allow balance to explicitly allocate chunks as it relocates Btrfs device shrinking and balancing ends up reallocating all the blocks in order to allow COW to move them to new destinations. It is somewhat awkward in terms of ENOSPC because most of the enospc code is built around the idea that some operation on a reference counted tree triggers allocations in the non-reference counted trees. This commit changes the balancing code to deal with enospc by trying to allocate a new chunk. If that allocation succeeds, we go ahead and retry whatever failed due to enospc. Signed-off-by: Chris Mason --- fs/btrfs/ctree.h | 2 ++ fs/btrfs/extent-tree.c | 7 +++++++ fs/btrfs/relocation.c | 13 ++++++++++++- 3 files changed, 21 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 6297701bc19c..28188a786da0 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2219,6 +2219,8 @@ int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end); int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr, u64 num_bytes); +int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, + struct btrfs_root *root, u64 type); /* ctree.c */ int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index d375fc04a065..100e409e9053 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -8066,6 +8066,13 @@ out: return ret; } +int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, + struct btrfs_root *root, u64 type) +{ + u64 alloc_flags = get_alloc_profile(root, type); + return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1); +} + /* * helper to account the unused space of all the readonly block group in the * list. takes mirrors into account. diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 0825e4ed9447..31ade5802ae8 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -3654,6 +3654,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc) u32 item_size; int ret; int err = 0; + int progress = 0; path = btrfs_alloc_path(); if (!path) @@ -3666,9 +3667,10 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc) } while (1) { + progress++; trans = btrfs_start_transaction(rc->extent_root, 0); BUG_ON(IS_ERR(trans)); - +restart: if (update_backref_cache(trans, &rc->backref_cache)) { btrfs_end_transaction(trans, rc->extent_root); continue; @@ -3781,6 +3783,15 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc) } } } + if (trans && progress && err == -ENOSPC) { + ret = btrfs_force_chunk_alloc(trans, rc->extent_root, + rc->block_group->flags); + if (ret == 0) { + err = 0; + progress = 0; + goto restart; + } + } btrfs_release_path(rc->extent_root, path); clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY, -- cgit v1.2.2 From b4dc2b8c694ead005b828f5fb7fa1134db5b6275 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Wed, 16 Feb 2011 06:06:34 +0000 Subject: Btrfs: Fix BTRFS_IOC_SUBVOL_SETFLAGS ioctl - Check user-specified flags correctly - Check the inode owership - Search root item in root tree but not fs tree Reported-by: Dan Rosenberg Signed-off-by: Li Zefan Signed-off-by: Chris Mason --- fs/btrfs/ioctl.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index be2d4f6aaa5e..5fdb2abc4fa7 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -1071,12 +1071,15 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file, if (copy_from_user(&flags, arg, sizeof(flags))) return -EFAULT; - if (flags & ~BTRFS_SUBVOL_CREATE_ASYNC) + if (flags & BTRFS_SUBVOL_CREATE_ASYNC) return -EINVAL; if (flags & ~BTRFS_SUBVOL_RDONLY) return -EOPNOTSUPP; + if (!is_owner_or_cap(inode)) + return -EACCES; + down_write(&root->fs_info->subvol_sem); /* nothing to do */ @@ -1097,7 +1100,7 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file, goto out_reset; } - ret = btrfs_update_root(trans, root, + ret = btrfs_update_root(trans, root->fs_info->tree_root, &root->root_key, &root->root_item); btrfs_commit_transaction(trans, root); -- cgit v1.2.2 From ca9b688c1c9a21635cfc8af8b68565b154185196 Mon Sep 17 00:00:00 2001 From: Li Zefan Date: Wed, 16 Feb 2011 06:06:41 +0000 Subject: Btrfs: Avoid accessing unmapped kernel address When decompressing a chunk of data, we'll copy the data out to a working buffer if the data is stored in more than one page, otherwise we'll use the mapped page directly to avoid memory copy. In the latter case, we'll end up accessing the kernel address after we've unmapped the page in a corner case. Reported-by: Juan Francisco Cantero Hurtado Signed-off-by: Li Zefan Signed-off-by: Chris Mason --- fs/btrfs/lzo.c | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c index cc9b450399df..a178f5ebea78 100644 --- a/fs/btrfs/lzo.c +++ b/fs/btrfs/lzo.c @@ -280,6 +280,7 @@ static int lzo_decompress_biovec(struct list_head *ws, unsigned long tot_out; unsigned long tot_len; char *buf; + bool may_late_unmap, need_unmap; data_in = kmap(pages_in[0]); tot_len = read_compress_length(data_in); @@ -300,11 +301,13 @@ static int lzo_decompress_biovec(struct list_head *ws, tot_in += in_len; working_bytes = in_len; + may_late_unmap = need_unmap = false; /* fast path: avoid using the working buffer */ if (in_page_bytes_left >= in_len) { buf = data_in + in_offset; bytes = in_len; + may_late_unmap = true; goto cont; } @@ -329,14 +332,17 @@ cont: if (working_bytes == 0 && tot_in >= tot_len) break; - kunmap(pages_in[page_in_index]); - page_in_index++; - if (page_in_index >= total_pages_in) { + if (page_in_index + 1 >= total_pages_in) { ret = -1; - data_in = NULL; goto done; } - data_in = kmap(pages_in[page_in_index]); + + if (may_late_unmap) + need_unmap = true; + else + kunmap(pages_in[page_in_index]); + + data_in = kmap(pages_in[++page_in_index]); in_page_bytes_left = PAGE_CACHE_SIZE; in_offset = 0; @@ -346,6 +352,8 @@ cont: out_len = lzo1x_worst_compress(PAGE_CACHE_SIZE); ret = lzo1x_decompress_safe(buf, in_len, workspace->buf, &out_len); + if (need_unmap) + kunmap(pages_in[page_in_index - 1]); if (ret != LZO_E_OK) { printk(KERN_WARNING "btrfs decompress failed\n"); ret = -1; @@ -363,8 +371,7 @@ cont: break; } done: - if (data_in) - kunmap(pages_in[page_in_index]); + kunmap(pages_in[page_in_index]); return ret; } -- cgit v1.2.2 From 9b3517e9136824346227b7b04f8f7ea1f3a726cc Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Tue, 15 Feb 2011 18:14:25 +0000 Subject: Btrfs: make btrfs_rm_device() fail gracefully If shrinking done as part of the online device removal fails add that device back to the allocation list and increment the rw_devices counter. This fixes two bugs: 1) we could have a perfectly good device out of alloc list for no good reason; 2) in the btrfs consisting of two devices, failure in btrfs_rm_device() could lead to a situation where it was impossible to remove any of the devices because of the "unable to remove the only writeable device" error. Signed-off-by: Ilya Dryomov Signed-off-by: Chris Mason --- fs/btrfs/volumes.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index dadaaa8005c8..f31c33119bb6 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1337,11 +1337,11 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) ret = btrfs_shrink_device(device, 0); if (ret) - goto error_brelse; + goto error_undo; ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device); if (ret) - goto error_brelse; + goto error_undo; device->in_fs_metadata = 0; @@ -1415,6 +1415,13 @@ out: mutex_unlock(&root->fs_info->volume_mutex); mutex_unlock(&uuid_mutex); return ret; +error_undo: + if (device->writeable) { + list_add(&device->dev_alloc_list, + &root->fs_info->fs_devices->alloc_list); + root->fs_info->fs_devices->rw_devices++; + } + goto error_brelse; } /* -- cgit v1.2.2 From fb01aa85b8b29c1a4e1f4a28ea54175de6bf7559 Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Tue, 15 Feb 2011 18:12:57 +0000 Subject: Btrfs: set FMODE_EXCL in btrfs_device->mode This fixes a bug introduced in d4d77629, where the device added online (and therefore initialized via btrfs_init_new_device()) would be left with the positive bdev->bd_holders after unmount. Since d4d77629 we no longer OR FMODE_EXCL explicitly on blkdev_put(), set it in btrfs_device->mode. Signed-off-by: Ilya Dryomov Signed-off-by: Chris Mason --- fs/btrfs/volumes.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index f31c33119bb6..94334d952280 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1639,7 +1639,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) device->dev_root = root->fs_info->dev_root; device->bdev = bdev; device->in_fs_metadata = 1; - device->mode = 0; + device->mode = FMODE_EXCL; set_blocksize(device->bdev, 4096); if (seeding_dev) { -- cgit v1.2.2 From 47c85291d3dd1a51501555000b90f8e281a0458e Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Wed, 16 Feb 2011 13:08:35 +1100 Subject: nfsd: correctly handle return value from nfsd_map_name_to_* These functions return an nfs status, not a host_err. So don't try to convert before returning. This is a regression introduced by 3c726023402a2f3b28f49b9d90ebf9e71151157d; I fixed up two of the callers, but missed these two. Cc: stable@kernel.org Reported-by: Herbert Poetzl Signed-off-by: NeilBrown Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4xdr.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 956629b9cdc9..1275b8655070 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -317,8 +317,8 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval, READ_BUF(dummy32); len += (XDR_QUADLEN(dummy32) << 2); READMEM(buf, dummy32); - if ((host_err = nfsd_map_name_to_uid(argp->rqstp, buf, dummy32, &iattr->ia_uid))) - goto out_nfserr; + if ((status = nfsd_map_name_to_uid(argp->rqstp, buf, dummy32, &iattr->ia_uid))) + return status; iattr->ia_valid |= ATTR_UID; } if (bmval[1] & FATTR4_WORD1_OWNER_GROUP) { @@ -328,8 +328,8 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval, READ_BUF(dummy32); len += (XDR_QUADLEN(dummy32) << 2); READMEM(buf, dummy32); - if ((host_err = nfsd_map_name_to_gid(argp->rqstp, buf, dummy32, &iattr->ia_gid))) - goto out_nfserr; + if ((status = nfsd_map_name_to_gid(argp->rqstp, buf, dummy32, &iattr->ia_gid))) + return status; iattr->ia_valid |= ATTR_GID; } if (bmval[1] & FATTR4_WORD1_TIME_ACCESS_SET) { -- cgit v1.2.2 From e51900f7d38cbcfb481d84567fd92540e7e1d23a Mon Sep 17 00:00:00 2001 From: Chuck Ebbert Date: Wed, 16 Feb 2011 18:11:53 -0500 Subject: block: revert block_dev read-only check This reverts commit 75f1dc0d076d ("block: check bdev_read_only() from blkdev_get()"). That commit added stricter checking to make sure devices that were being used read-only were actually opened in that mode. It turns out that the change breaks a bunch of kernel code that opens block devices. Affected systems include dm, md, and the loop device. Because strict checking for read-only opens of block devices was not done before this, the code that opens the devices was opening them read-write even if they were being used read-only. Auditing all that code will take time, and new userspace packages for dm, mdadm, etc. will also be required. Signed-off-by: Chuck Ebbert Signed-off-by: Linus Torvalds --- fs/block_dev.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/block_dev.c b/fs/block_dev.c index 333a7bb4cb9c..4fb8a3431531 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -1215,12 +1215,6 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder) res = __blkdev_get(bdev, mode, 0); - /* __blkdev_get() may alter read only status, check it afterwards */ - if (!res && (mode & FMODE_WRITE) && bdev_read_only(bdev)) { - __blkdev_put(bdev, mode, 0); - res = -EACCES; - } - if (whole) { /* finish claiming */ mutex_lock(&bdev->bd_mutex); @@ -1298,6 +1292,11 @@ struct block_device *blkdev_get_by_path(const char *path, fmode_t mode, if (err) return ERR_PTR(err); + if ((mode & FMODE_WRITE) && bdev_read_only(bdev)) { + blkdev_put(bdev, mode); + return ERR_PTR(-EACCES); + } + return bdev; } EXPORT_SYMBOL(blkdev_get_by_path); -- cgit v1.2.2 From 9616125611ee47693186533d76e403856a36b3c8 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Wed, 16 Feb 2011 09:34:16 -0500 Subject: cifs: fix handling of scopeid in cifs_convert_address MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The code finds, the '%' sign in an ipv6 address and copies that to a buffer allocated on the stack. It then ignores that buffer, and passes 'pct' to simple_strtoul(), which doesn't work right because we're comparing 'endp' against a completely different string. Fix it by passing the correct pointer. While we're at it, this is a good candidate for conversion to strict_strtoul as well. Cc: stable@kernel.org Cc: David Howells Reported-by: Björn JACKE Signed-off-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/netmisc.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c index 8d9189f64477..79f641eeda30 100644 --- a/fs/cifs/netmisc.c +++ b/fs/cifs/netmisc.c @@ -170,7 +170,7 @@ cifs_convert_address(struct sockaddr *dst, const char *src, int len) { int rc, alen, slen; const char *pct; - char *endp, scope_id[13]; + char scope_id[13]; struct sockaddr_in *s4 = (struct sockaddr_in *) dst; struct sockaddr_in6 *s6 = (struct sockaddr_in6 *) dst; @@ -197,9 +197,9 @@ cifs_convert_address(struct sockaddr *dst, const char *src, int len) memcpy(scope_id, pct + 1, slen); scope_id[slen] = '\0'; - s6->sin6_scope_id = (u32) simple_strtoul(pct, &endp, 0); - if (endp != scope_id + slen) - return 0; + rc = strict_strtoul(scope_id, 0, + (unsigned long *)&s6->sin6_scope_id); + rc = (rc == 0) ? 1 : 0; } return rc; -- cgit v1.2.2 From fa7ea87a057958a8b7926c1a60a3ca6d696328ed Mon Sep 17 00:00:00 2001 From: Timo Warns Date: Thu, 17 Feb 2011 22:27:40 +0100 Subject: fs/partitions: Validate map_count in Mac partition tables Validate number of blocks in map and remove redundant variable. Signed-off-by: Timo Warns Cc: stable@kernel.org Signed-off-by: Linus Torvalds --- fs/partitions/mac.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/partitions/mac.c b/fs/partitions/mac.c index 68d6a216ee79..11f688bd76c5 100644 --- a/fs/partitions/mac.c +++ b/fs/partitions/mac.c @@ -29,10 +29,9 @@ static inline void mac_fix_string(char *stg, int len) int mac_partition(struct parsed_partitions *state) { - int slot = 1; Sector sect; unsigned char *data; - int blk, blocks_in_map; + int slot, blocks_in_map; unsigned secsize; #ifdef CONFIG_PPC_PMAC int found_root = 0; @@ -59,10 +58,14 @@ int mac_partition(struct parsed_partitions *state) put_dev_sector(sect); return 0; /* not a MacOS disk */ } - strlcat(state->pp_buf, " [mac]", PAGE_SIZE); blocks_in_map = be32_to_cpu(part->map_count); - for (blk = 1; blk <= blocks_in_map; ++blk) { - int pos = blk * secsize; + if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) { + put_dev_sector(sect); + return 0; + } + strlcat(state->pp_buf, " [mac]", PAGE_SIZE); + for (slot = 1; slot <= blocks_in_map; ++slot) { + int pos = slot * secsize; put_dev_sector(sect); data = read_part_sector(state, pos/512, §); if (!data) @@ -113,13 +116,11 @@ int mac_partition(struct parsed_partitions *state) } if (goodness > found_root_goodness) { - found_root = blk; + found_root = slot; found_root_goodness = goodness; } } #endif /* CONFIG_PPC_PMAC */ - - ++slot; } #ifdef CONFIG_PPC_PMAC if (found_root_goodness) -- cgit v1.2.2 From 8787c7a3e0e3f1aa21856d6b6cd6880cc93497e9 Mon Sep 17 00:00:00 2001 From: Tyler Hicks Date: Thu, 17 Feb 2011 18:51:24 -0600 Subject: eCryptfs: Revert "dont call lookup_one_len to avoid NULL nameidata" This reverts commit 21edad32205e97dc7ccb81a85234c77e760364c8 and commit 93c3fe40c279f002906ad14584c30671097d4394, which fixed a regression by the former. Al Viro pointed out bypassed dcache lookups in ecryptfs_new_lower_dentry(), misuse of vfs_path_lookup() in ecryptfs_lookup_one_lower() and a dislike of passing nameidata to the lower filesystem. Reported-by: Al Viro Signed-off-by: Tyler Hicks --- fs/ecryptfs/inode.c | 106 ++++++---------------------------------------------- 1 file changed, 12 insertions(+), 94 deletions(-) (limited to 'fs') diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index bd33f87a1907..fc44823fea3a 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c @@ -348,75 +348,6 @@ out: return rc; } -/** - * ecryptfs_new_lower_dentry - * @name: The name of the new dentry. - * @lower_dir_dentry: Parent directory of the new dentry. - * @nd: nameidata from last lookup. - * - * Create a new dentry or get it from lower parent dir. - */ -static struct dentry * -ecryptfs_new_lower_dentry(struct qstr *name, struct dentry *lower_dir_dentry, - struct nameidata *nd) -{ - struct dentry *new_dentry; - struct dentry *tmp; - struct inode *lower_dir_inode; - - lower_dir_inode = lower_dir_dentry->d_inode; - - tmp = d_alloc(lower_dir_dentry, name); - if (!tmp) - return ERR_PTR(-ENOMEM); - - mutex_lock(&lower_dir_inode->i_mutex); - new_dentry = lower_dir_inode->i_op->lookup(lower_dir_inode, tmp, nd); - mutex_unlock(&lower_dir_inode->i_mutex); - - if (!new_dentry) - new_dentry = tmp; - else - dput(tmp); - - return new_dentry; -} - - -/** - * ecryptfs_lookup_one_lower - * @ecryptfs_dentry: The eCryptfs dentry that we are looking up - * @lower_dir_dentry: lower parent directory - * @name: lower file name - * - * Get the lower dentry from vfs. If lower dentry does not exist yet, - * create it. - */ -static struct dentry * -ecryptfs_lookup_one_lower(struct dentry *ecryptfs_dentry, - struct dentry *lower_dir_dentry, struct qstr *name) -{ - struct nameidata nd; - struct vfsmount *lower_mnt; - int err; - - lower_mnt = mntget(ecryptfs_dentry_to_lower_mnt( - ecryptfs_dentry->d_parent)); - err = vfs_path_lookup(lower_dir_dentry, lower_mnt, name->name , 0, &nd); - mntput(lower_mnt); - - if (!err) { - /* we dont need the mount */ - mntput(nd.path.mnt); - return nd.path.dentry; - } - if (err != -ENOENT) - return ERR_PTR(err); - - /* create a new lower dentry */ - return ecryptfs_new_lower_dentry(name, lower_dir_dentry, &nd); -} - /** * ecryptfs_lookup * @ecryptfs_dir_inode: The eCryptfs directory inode @@ -434,7 +365,6 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode, size_t encrypted_and_encoded_name_size; struct ecryptfs_mount_crypt_stat *mount_crypt_stat = NULL; struct dentry *lower_dir_dentry, *lower_dentry; - struct qstr lower_name; int rc = 0; if ((ecryptfs_dentry->d_name.len == 1 @@ -444,20 +374,14 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode, goto out_d_drop; } lower_dir_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry->d_parent); - lower_name.name = ecryptfs_dentry->d_name.name; - lower_name.len = ecryptfs_dentry->d_name.len; - lower_name.hash = ecryptfs_dentry->d_name.hash; - if (lower_dir_dentry->d_op && lower_dir_dentry->d_op->d_hash) { - rc = lower_dir_dentry->d_op->d_hash(lower_dir_dentry, - lower_dir_dentry->d_inode, &lower_name); - if (rc < 0) - goto out_d_drop; - } - lower_dentry = ecryptfs_lookup_one_lower(ecryptfs_dentry, - lower_dir_dentry, &lower_name); + mutex_lock(&lower_dir_dentry->d_inode->i_mutex); + lower_dentry = lookup_one_len(ecryptfs_dentry->d_name.name, + lower_dir_dentry, + ecryptfs_dentry->d_name.len); + mutex_unlock(&lower_dir_dentry->d_inode->i_mutex); if (IS_ERR(lower_dentry)) { rc = PTR_ERR(lower_dentry); - ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_lower() returned " + ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned " "[%d] on lower_dentry = [%s]\n", __func__, rc, encrypted_and_encoded_name); goto out_d_drop; @@ -479,20 +403,14 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode, "filename; rc = [%d]\n", __func__, rc); goto out_d_drop; } - lower_name.name = encrypted_and_encoded_name; - lower_name.len = encrypted_and_encoded_name_size; - lower_name.hash = full_name_hash(lower_name.name, lower_name.len); - if (lower_dir_dentry->d_op && lower_dir_dentry->d_op->d_hash) { - rc = lower_dir_dentry->d_op->d_hash(lower_dir_dentry, - lower_dir_dentry->d_inode, &lower_name); - if (rc < 0) - goto out_d_drop; - } - lower_dentry = ecryptfs_lookup_one_lower(ecryptfs_dentry, - lower_dir_dentry, &lower_name); + mutex_lock(&lower_dir_dentry->d_inode->i_mutex); + lower_dentry = lookup_one_len(encrypted_and_encoded_name, + lower_dir_dentry, + encrypted_and_encoded_name_size); + mutex_unlock(&lower_dir_dentry->d_inode->i_mutex); if (IS_ERR(lower_dentry)) { rc = PTR_ERR(lower_dentry); - ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_lower() returned " + ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned " "[%d] on lower_dentry = [%s]\n", __func__, rc, encrypted_and_encoded_name); goto out_d_drop; -- cgit v1.2.2 From 97d79b403ef03f729883246208ef5d8a2ebc4d68 Mon Sep 17 00:00:00 2001 From: Yehuda Sadeh Date: Tue, 18 Jan 2011 13:37:28 -0800 Subject: ceph: keep reference to parent inode on ceph_dentry When creating a new dentry we now hold a reference to the parent inode in the ceph_dentry. This is required due to the new RCU changes from 949854d0, which set dentry->d_parent to NULL in d_kill before calling the ->release() callback. If/when that behavior is changed, we can revert this hack. Signed-off-by: Yehuda Sadeh Signed-off-by: Sage Weil --- fs/ceph/dir.c | 5 ++++- fs/ceph/super.h | 1 + 2 files changed, 5 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index 562f9884a4d9..6bfaa6a4ec47 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -60,6 +60,7 @@ int ceph_init_dentry(struct dentry *dentry) } di->dentry = dentry; di->lease_session = NULL; + di->parent_inode = igrab(dentry->d_parent->d_inode); dentry->d_fsdata = di; dentry->d_time = jiffies; ceph_dentry_lru_add(dentry); @@ -1025,7 +1026,7 @@ static void ceph_dentry_release(struct dentry *dentry) u64 snapid = CEPH_NOSNAP; if (!IS_ROOT(dentry)) { - parent_inode = dentry->d_parent->d_inode; + parent_inode = di->parent_inode; if (parent_inode) snapid = ceph_snap(parent_inode); } @@ -1050,6 +1051,8 @@ static void ceph_dentry_release(struct dentry *dentry) kmem_cache_free(ceph_dentry_cachep, di); dentry->d_fsdata = NULL; } + if (parent_inode) + iput(parent_inode); } static int ceph_snapdir_d_revalidate(struct dentry *dentry, diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 6e0826695112..c01aa646b407 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -207,6 +207,7 @@ struct ceph_dentry_info { struct dentry *dentry; u64 time; u64 offset; + struct inode *parent_inode; }; struct ceph_inode_xattrs_info { -- cgit v1.2.2 From 705773a6656bba66f2a80a44ddaacf9620df8a59 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Thu, 3 Feb 2011 14:16:19 +0100 Subject: ocfs2: Fix estimate of necessary credits for mkdir In the rare case that INLINE_DATA, INDEX_DIR, QUOTA, XATTR features are disabled and both the allocation of the directory inode and the allocation of the first directory block need to relink allocation group, there need not be enough credits reserved in a transaction. Fix the estimate. CC: Mark Fasheh Signed-off-by: Jan Kara Signed-off-by: Joel Becker --- fs/ocfs2/journal.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h index 43e56b97f9c0..6180da1e37e6 100644 --- a/fs/ocfs2/journal.h +++ b/fs/ocfs2/journal.h @@ -405,9 +405,9 @@ static inline int ocfs2_remove_extent_credits(struct super_block *sb) ocfs2_quota_trans_credits(sb); } -/* data block for new dir/symlink, 2 for bitmap updates (bitmap fe + - * bitmap block for the new bit) dx_root update for free list */ -#define OCFS2_DIR_LINK_ADDITIONAL_CREDITS (1 + 2 + 1) +/* data block for new dir/symlink, allocation of directory block, dx_root + * update for free list */ +#define OCFS2_DIR_LINK_ADDITIONAL_CREDITS (1 + OCFS2_SUBALLOC_ALLOC + 1) static inline int ocfs2_add_dir_index_credits(struct super_block *sb) { -- cgit v1.2.2 From acf3bb007e5636ef4c17505affb0974175108553 Mon Sep 17 00:00:00 2001 From: Tristan Ye Date: Fri, 21 Jan 2011 18:20:18 +0800 Subject: Ocfs2/refcounttree: Fix a bug for refcounttree to writeback clusters in a right number. Current refcounttree codes actually didn't writeback the new pages out in write-back mode, due to a bug of always passing a ZERO number of clusters to 'ocfs2_cow_sync_writeback', the patch tries to pass a proper one in. Signed-off-by: Tristan Ye Cc: stable@kernel.org Signed-off-by: Joel Becker --- fs/ocfs2/refcounttree.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c index b5f9160e93e9..19ebc5aad391 100644 --- a/fs/ocfs2/refcounttree.c +++ b/fs/ocfs2/refcounttree.c @@ -3228,7 +3228,7 @@ static int ocfs2_make_clusters_writable(struct super_block *sb, u32 num_clusters, unsigned int e_flags) { int ret, delete, index, credits = 0; - u32 new_bit, new_len; + u32 new_bit, new_len, orig_num_clusters; unsigned int set_len; struct ocfs2_super *osb = OCFS2_SB(sb); handle_t *handle; @@ -3261,6 +3261,8 @@ static int ocfs2_make_clusters_writable(struct super_block *sb, goto out; } + orig_num_clusters = num_clusters; + while (num_clusters) { ret = ocfs2_get_refcount_rec(ref_ci, context->ref_root_bh, p_cluster, num_clusters, @@ -3348,7 +3350,8 @@ static int ocfs2_make_clusters_writable(struct super_block *sb, * in write-back mode. */ if (context->get_clusters == ocfs2_di_get_clusters) { - ret = ocfs2_cow_sync_writeback(sb, context, cpos, num_clusters); + ret = ocfs2_cow_sync_writeback(sb, context, cpos, + orig_num_clusters); if (ret) mlog_errno(ret); } -- cgit v1.2.2 From 52c303c56c3638944b5f733e3961dc58eb8c7270 Mon Sep 17 00:00:00 2001 From: Mark Fasheh Date: Mon, 31 Jan 2011 11:31:04 -0800 Subject: ocfs2: Check heartbeat mode for kernel stacks only Commit 2c442719e90a44a6982c033d69df4aae4b167cfa added some checks for proper heartbeat mode when the o2cb stack is running. Unfortunately, it didn't take into account that a userpsace stack could be running. Fix this by only doing the check if o2cb is in use. This patch allows userspace stacks to mount the fs again. Cc: stable@kernel.org Signed-off-by: Mark Fasheh Signed-off-by: Joel Becker --- fs/ocfs2/super.c | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 38f986d2447e..36c423fb0635 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -1316,7 +1316,7 @@ static int ocfs2_parse_options(struct super_block *sb, struct mount_options *mopt, int is_remount) { - int status; + int status, user_stack = 0; char *p; u32 tmp; @@ -1459,6 +1459,15 @@ static int ocfs2_parse_options(struct super_block *sb, memcpy(mopt->cluster_stack, args[0].from, OCFS2_STACK_LABEL_LEN); mopt->cluster_stack[OCFS2_STACK_LABEL_LEN] = '\0'; + /* + * Open code the memcmp here as we don't have + * an osb to pass to + * ocfs2_userspace_stack(). + */ + if (memcmp(mopt->cluster_stack, + OCFS2_CLASSIC_CLUSTER_STACK, + OCFS2_STACK_LABEL_LEN)) + user_stack = 1; break; case Opt_inode64: mopt->mount_opt |= OCFS2_MOUNT_INODE64; @@ -1514,13 +1523,16 @@ static int ocfs2_parse_options(struct super_block *sb, } } - /* Ensure only one heartbeat mode */ - tmp = mopt->mount_opt & (OCFS2_MOUNT_HB_LOCAL | OCFS2_MOUNT_HB_GLOBAL | - OCFS2_MOUNT_HB_NONE); - if (hweight32(tmp) != 1) { - mlog(ML_ERROR, "Invalid heartbeat mount options\n"); - status = 0; - goto bail; + if (user_stack == 0) { + /* Ensure only one heartbeat mode */ + tmp = mopt->mount_opt & (OCFS2_MOUNT_HB_LOCAL | + OCFS2_MOUNT_HB_GLOBAL | + OCFS2_MOUNT_HB_NONE); + if (hweight32(tmp) != 1) { + mlog(ML_ERROR, "Invalid heartbeat mount options\n"); + status = 0; + goto bail; + } } status = 1; -- cgit v1.2.2 From 70b8902199003b098fde86d1db02e7465115a02c Mon Sep 17 00:00:00 2001 From: Tyler Hicks Date: Thu, 17 Feb 2011 17:35:20 -0600 Subject: eCryptfs: Handle NULL nameidata pointers Allow for NULL nameidata pointers in eCryptfs create, lookup, and d_revalidate functions. Signed-off-by: Tyler Hicks --- fs/ecryptfs/dentry.c | 22 +++++++++++++--------- fs/ecryptfs/ecryptfs_kernel.h | 3 +-- fs/ecryptfs/inode.c | 30 +++++++++++++++--------------- 3 files changed, 29 insertions(+), 26 deletions(-) (limited to 'fs') diff --git a/fs/ecryptfs/dentry.c b/fs/ecryptfs/dentry.c index 6fc4f319b550..534c1d46e69e 100644 --- a/fs/ecryptfs/dentry.c +++ b/fs/ecryptfs/dentry.c @@ -46,24 +46,28 @@ static int ecryptfs_d_revalidate(struct dentry *dentry, struct nameidata *nd) { struct dentry *lower_dentry; struct vfsmount *lower_mnt; - struct dentry *dentry_save; - struct vfsmount *vfsmount_save; + struct dentry *dentry_save = NULL; + struct vfsmount *vfsmount_save = NULL; int rc = 1; - if (nd->flags & LOOKUP_RCU) + if (nd && nd->flags & LOOKUP_RCU) return -ECHILD; lower_dentry = ecryptfs_dentry_to_lower(dentry); lower_mnt = ecryptfs_dentry_to_lower_mnt(dentry); if (!lower_dentry->d_op || !lower_dentry->d_op->d_revalidate) goto out; - dentry_save = nd->path.dentry; - vfsmount_save = nd->path.mnt; - nd->path.dentry = lower_dentry; - nd->path.mnt = lower_mnt; + if (nd) { + dentry_save = nd->path.dentry; + vfsmount_save = nd->path.mnt; + nd->path.dentry = lower_dentry; + nd->path.mnt = lower_mnt; + } rc = lower_dentry->d_op->d_revalidate(lower_dentry, nd); - nd->path.dentry = dentry_save; - nd->path.mnt = vfsmount_save; + if (nd) { + nd->path.dentry = dentry_save; + nd->path.mnt = vfsmount_save; + } if (dentry->d_inode) { struct inode *lower_inode = ecryptfs_inode_to_lower(dentry->d_inode); diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h index dbc84ed96336..e00753496e3e 100644 --- a/fs/ecryptfs/ecryptfs_kernel.h +++ b/fs/ecryptfs/ecryptfs_kernel.h @@ -632,8 +632,7 @@ int ecryptfs_interpose(struct dentry *hidden_dentry, u32 flags); int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry, struct dentry *lower_dentry, - struct inode *ecryptfs_dir_inode, - struct nameidata *ecryptfs_nd); + struct inode *ecryptfs_dir_inode); int ecryptfs_decode_and_decrypt_filename(char **decrypted_name, size_t *decrypted_name_size, struct dentry *ecryptfs_dentry, diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index fc44823fea3a..eb0d267ee715 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c @@ -74,16 +74,20 @@ ecryptfs_create_underlying_file(struct inode *lower_dir_inode, unsigned int flags_save; int rc; - dentry_save = nd->path.dentry; - vfsmount_save = nd->path.mnt; - flags_save = nd->flags; - nd->path.dentry = lower_dentry; - nd->path.mnt = lower_mnt; - nd->flags &= ~LOOKUP_OPEN; + if (nd) { + dentry_save = nd->path.dentry; + vfsmount_save = nd->path.mnt; + flags_save = nd->flags; + nd->path.dentry = lower_dentry; + nd->path.mnt = lower_mnt; + nd->flags &= ~LOOKUP_OPEN; + } rc = vfs_create(lower_dir_inode, lower_dentry, mode, nd); - nd->path.dentry = dentry_save; - nd->path.mnt = vfsmount_save; - nd->flags = flags_save; + if (nd) { + nd->path.dentry = dentry_save; + nd->path.mnt = vfsmount_save; + nd->flags = flags_save; + } return rc; } @@ -241,8 +245,7 @@ out: */ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry, struct dentry *lower_dentry, - struct inode *ecryptfs_dir_inode, - struct nameidata *ecryptfs_nd) + struct inode *ecryptfs_dir_inode) { struct dentry *lower_dir_dentry; struct vfsmount *lower_mnt; @@ -290,8 +293,6 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry, goto out; if (special_file(lower_inode->i_mode)) goto out; - if (!ecryptfs_nd) - goto out; /* Released in this function */ page_virt = kmem_cache_zalloc(ecryptfs_header_cache_2, GFP_USER); if (!page_virt) { @@ -417,8 +418,7 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode, } lookup_and_interpose: rc = ecryptfs_lookup_and_interpose_lower(ecryptfs_dentry, lower_dentry, - ecryptfs_dir_inode, - ecryptfs_nd); + ecryptfs_dir_inode); goto out; out_d_drop: d_drop(ecryptfs_dentry); -- cgit v1.2.2 From 323ef68faf1bbd9b1e66aea268fd09d358d7e8ab Mon Sep 17 00:00:00 2001 From: Andy Whitcroft Date: Wed, 16 Feb 2011 04:49:59 +0000 Subject: ecryptfs: read on a directory should return EISDIR if not supported read() calls against a file descriptor connected to a directory are incorrectly returning EINVAL rather than EISDIR: [EISDIR] [XSI] [Option Start] The fildes argument refers to a directory and the implementation does not allow the directory to be read using read() or pread(). The readdir() function should be used instead. [Option End] This occurs because we do not have a .read operation defined for ecryptfs directories. Connect this up to generic_read_dir(). BugLink: http://bugs.launchpad.net/bugs/719691 Signed-off-by: Andy Whitcroft Signed-off-by: Tyler Hicks --- fs/ecryptfs/file.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c index 81e10e6a9443..7d1050e254f9 100644 --- a/fs/ecryptfs/file.c +++ b/fs/ecryptfs/file.c @@ -317,6 +317,7 @@ ecryptfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) const struct file_operations ecryptfs_dir_fops = { .readdir = ecryptfs_readdir, + .read = generic_read_dir, .unlocked_ioctl = ecryptfs_unlocked_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = ecryptfs_compat_ioctl, -- cgit v1.2.2 From 55f9cf6bbaa682958a7dd2755f883b768270c3ce Mon Sep 17 00:00:00 2001 From: Tyler Hicks Date: Tue, 11 Jan 2011 12:43:42 -0600 Subject: eCryptfs: Copy up lower inode attrs in getattr The lower filesystem may do some type of inode revalidation during a getattr call. eCryptfs should take advantage of that by copying the lower inode attributes to the eCryptfs inode after a call to vfs_getattr() on the lower inode. I originally wrote this fix while working on eCryptfs on nfsv3 support, but discovered it also fixed an eCryptfs on ext4 nanosecond timestamp bug that was reported. https://bugs.launchpad.net/bugs/613873 Cc: Signed-off-by: Tyler Hicks --- fs/ecryptfs/inode.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs') diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index eb0d267ee715..b592938a84bc 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c @@ -1010,6 +1010,8 @@ int ecryptfs_getattr(struct vfsmount *mnt, struct dentry *dentry, rc = vfs_getattr(ecryptfs_dentry_to_lower_mnt(dentry), ecryptfs_dentry_to_lower(dentry), &lower_stat); if (!rc) { + fsstack_copy_attr_all(dentry->d_inode, + ecryptfs_inode_to_lower(dentry->d_inode)); generic_fillattr(dentry->d_inode, stat); stat->blocks = lower_stat.blocks; } -- cgit v1.2.2 From 5e640927a597a7c3e72b61e8bce74c22e906de65 Mon Sep 17 00:00:00 2001 From: Shirish Pargaonkar Date: Thu, 17 Feb 2011 14:38:31 -0600 Subject: cifs: Fix regression in LANMAN (LM) auth code LANMAN response length was changed to 16 bytes instead of 24 bytes. Revert it back to 24 bytes. Signed-off-by: Shirish Pargaonkar CC: stable@kernel.org Signed-off-by: Steve French --- fs/cifs/sess.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'fs') diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index 1adc9625a344..16765703131b 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c @@ -656,13 +656,13 @@ ssetup_ntlmssp_authenticate: if (type == LANMAN) { #ifdef CONFIG_CIFS_WEAK_PW_HASH - char lnm_session_key[CIFS_SESS_KEY_SIZE]; + char lnm_session_key[CIFS_AUTH_RESP_SIZE]; pSMB->req.hdr.Flags2 &= ~SMBFLG2_UNICODE; /* no capabilities flags in old lanman negotiation */ - pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_SESS_KEY_SIZE); + pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE); /* Calculate hash with password and copy into bcc_ptr. * Encryption Key (stored as in cryptkey) gets used if the @@ -675,8 +675,8 @@ ssetup_ntlmssp_authenticate: true : false, lnm_session_key); ses->flags |= CIFS_SES_LANMAN; - memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_SESS_KEY_SIZE); - bcc_ptr += CIFS_SESS_KEY_SIZE; + memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_AUTH_RESP_SIZE); + bcc_ptr += CIFS_AUTH_RESP_SIZE; /* can not sign if LANMAN negotiated so no need to calculate signing key? but what if server -- cgit v1.2.2 From eed9e8307e01d6d8d6170afcb2f00e1a471b87d4 Mon Sep 17 00:00:00 2001 From: Steve French Date: Mon, 21 Feb 2011 22:31:47 +0000 Subject: [CIFS] update cifs version Update version to 1.71 so we can more easily spot modules with the last two fixes Signed-off-by: Steve French --- fs/cifs/cifsfs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index 4a3330235d55..a9371b6578c0 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -127,5 +127,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); extern const struct export_operations cifs_export_ops; #endif /* EXPERIMENTAL */ -#define CIFS_VERSION "1.70" +#define CIFS_VERSION "1.71" #endif /* _CIFSFS_H */ -- cgit v1.2.2 From 361821854b71fc3a53c9e17701538247bddbd4ba Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sun, 20 Feb 2011 20:08:35 -0800 Subject: Docbook: add fs/eventfd.c and fix typos in it Add fs/eventfd.c to filesystems docbook. Make typo corrections in fs/eventfd.c. Signed-off-by: Randy Dunlap Cc: Davide Libenzi Signed-off-by: Linus Torvalds --- fs/eventfd.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/eventfd.c b/fs/eventfd.c index e0194b3e14d6..d9a591773919 100644 --- a/fs/eventfd.c +++ b/fs/eventfd.c @@ -99,7 +99,7 @@ EXPORT_SYMBOL_GPL(eventfd_ctx_get); * @ctx: [in] Pointer to eventfd context. * * The eventfd context reference must have been previously acquired either - * with eventfd_ctx_get() or eventfd_ctx_fdget()). + * with eventfd_ctx_get() or eventfd_ctx_fdget(). */ void eventfd_ctx_put(struct eventfd_ctx *ctx) { @@ -146,9 +146,9 @@ static void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt) * eventfd_ctx_remove_wait_queue - Read the current counter and removes wait queue. * @ctx: [in] Pointer to eventfd context. * @wait: [in] Wait queue to be removed. - * @cnt: [out] Pointer to the 64bit conter value. + * @cnt: [out] Pointer to the 64-bit counter value. * - * Returns zero if successful, or the following error codes: + * Returns %0 if successful, or the following error codes: * * -EAGAIN : The operation would have blocked. * @@ -175,11 +175,11 @@ EXPORT_SYMBOL_GPL(eventfd_ctx_remove_wait_queue); * eventfd_ctx_read - Reads the eventfd counter or wait if it is zero. * @ctx: [in] Pointer to eventfd context. * @no_wait: [in] Different from zero if the operation should not block. - * @cnt: [out] Pointer to the 64bit conter value. + * @cnt: [out] Pointer to the 64-bit counter value. * - * Returns zero if successful, or the following error codes: + * Returns %0 if successful, or the following error codes: * - * -EAGAIN : The operation would have blocked but @no_wait was nonzero. + * -EAGAIN : The operation would have blocked but @no_wait was non-zero. * -ERESTARTSYS : A signal interrupted the wait operation. * * If @no_wait is zero, the function might sleep until the eventfd internal -- cgit v1.2.2 From 3a3675b7f23f83ca8c67c9c2b6edf707fd28d1ba Mon Sep 17 00:00:00 2001 From: Dan Rosenberg Date: Mon, 14 Feb 2011 13:45:28 +0000 Subject: xfs: prevent leaking uninitialized stack memory in FSGEOMETRY_V1 The FSGEOMETRY_V1 ioctl (and its compat equivalent) calls out to xfs_fs_geometry() with a version number of 3. This code path does not fill in the logsunit member of the passed xfs_fsop_geom_t, leading to the leaking of four bytes of uninitialized stack data to potentially unprivileged callers. v2 switches to memset() to avoid future issues if structure members change, on suggestion of Dave Chinner. Signed-off-by: Dan Rosenberg Reviewed-by: Eugene Teo Signed-off-by: Alex Elder --- fs/xfs/xfs_fsops.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'fs') diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index cec89dd5d7d2..85668efb3e3e 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c @@ -53,6 +53,9 @@ xfs_fs_geometry( xfs_fsop_geom_t *geo, int new_version) { + + memset(geo, 0, sizeof(*geo)); + geo->blocksize = mp->m_sb.sb_blocksize; geo->rtextsize = mp->m_sb.sb_rextsize; geo->agblocks = mp->m_sb.sb_agblocks; -- cgit v1.2.2 From be715140b5c3baf8ab6708060cfab80bef279d18 Mon Sep 17 00:00:00 2001 From: Lukas Czerner Date: Tue, 15 Feb 2011 17:07:36 +0000 Subject: xfs: check if device support discard in xfs_ioc_trim() Right now we, are relying on the fact that when we attempt to actually do the discard, blkdev_issue_discar() returns -EOPNOTSUPP and the user is informed that the device does not support discard. However, in the case where the we do not hit any suitable free extent to trim in FITRIM code, it will finish without any error. This is very confusing, because it seems that FITRIM was successful even though the device does not actually supports discard. Solution: Check for the discard support before attempt to search for free extents. Signed-off-by: Lukas Czerner Signed-off-by: Alex Elder --- fs/xfs/linux-2.6/xfs_discard.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_discard.c b/fs/xfs/linux-2.6/xfs_discard.c index 05201ae719e5..d61611c88012 100644 --- a/fs/xfs/linux-2.6/xfs_discard.c +++ b/fs/xfs/linux-2.6/xfs_discard.c @@ -152,6 +152,8 @@ xfs_ioc_trim( if (!capable(CAP_SYS_ADMIN)) return -XFS_ERROR(EPERM); + if (!blk_queue_discard(q)) + return -XFS_ERROR(EOPNOTSUPP); if (copy_from_user(&range, urange, sizeof(range))) return -XFS_ERROR(EFAULT); -- cgit v1.2.2 From ec29ed5b407d618a8128f5942aade9e1758aa14b Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Wed, 23 Feb 2011 16:23:20 -0500 Subject: Btrfs: fix fiemap bugs with delalloc The Btrfs fiemap code wasn't properly returning delalloc extents, so applications that trust fiemap to decide if there are holes in the file see holes instead of delalloc. This reworks the btrfs fiemap code, adding a get_extent helper that searches for delalloc ranges and also adding a helper for extent_fiemap that skips past holes in the file. Signed-off-by: Chris Mason --- fs/btrfs/extent_io.c | 138 ++++++++++++++++++++++++++++++++++++--------------- fs/btrfs/extent_io.h | 2 +- fs/btrfs/inode.c | 126 +++++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 224 insertions(+), 42 deletions(-) (limited to 'fs') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index e7aeba242701..ff45b80d90f0 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1433,12 +1433,13 @@ int extent_clear_unlock_delalloc(struct inode *inode, */ u64 count_range_bits(struct extent_io_tree *tree, u64 *start, u64 search_end, u64 max_bytes, - unsigned long bits) + unsigned long bits, int contig) { struct rb_node *node; struct extent_state *state; u64 cur_start = *start; u64 total_bytes = 0; + u64 last = 0; int found = 0; if (search_end <= cur_start) { @@ -1463,7 +1464,9 @@ u64 count_range_bits(struct extent_io_tree *tree, state = rb_entry(node, struct extent_state, rb_node); if (state->start > search_end) break; - if (state->end >= cur_start && (state->state & bits)) { + if (contig && found && state->start > last + 1) + break; + if (state->end >= cur_start && (state->state & bits) == bits) { total_bytes += min(search_end, state->end) + 1 - max(cur_start, state->start); if (total_bytes >= max_bytes) @@ -1472,6 +1475,9 @@ u64 count_range_bits(struct extent_io_tree *tree, *start = state->start; found = 1; } + last = state->end; + } else if (contig && found) { + break; } node = rb_next(node); if (!node) @@ -2912,6 +2918,46 @@ out: return sector; } +/* + * helper function for fiemap, which doesn't want to see any holes. + * This maps until we find something past 'last' + */ +static struct extent_map *get_extent_skip_holes(struct inode *inode, + u64 offset, + u64 last, + get_extent_t *get_extent) +{ + u64 sectorsize = BTRFS_I(inode)->root->sectorsize; + struct extent_map *em; + u64 len; + + if (offset >= last) + return NULL; + + while(1) { + len = last - offset; + if (len == 0) + break; + len = (len + sectorsize - 1) & ~(sectorsize - 1); + em = get_extent(inode, NULL, 0, offset, len, 0); + if (!em || IS_ERR(em)) + return em; + + /* if this isn't a hole return it */ + if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) && + em->block_start != EXTENT_MAP_HOLE) { + return em; + } + + /* this is a hole, advance to the next extent */ + offset = extent_map_end(em); + free_extent_map(em); + if (offset >= last) + break; + } + return NULL; +} + int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, __u64 start, __u64 len, get_extent_t *get_extent) { @@ -2921,16 +2967,19 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u32 flags = 0; u32 found_type; u64 last; + u64 last_for_get_extent = 0; u64 disko = 0; + u64 isize = i_size_read(inode); struct btrfs_key found_key; struct extent_map *em = NULL; struct extent_state *cached_state = NULL; struct btrfs_path *path; struct btrfs_file_extent_item *item; int end = 0; - u64 em_start = 0, em_len = 0; + u64 em_start = 0; + u64 em_len = 0; + u64 em_end = 0; unsigned long emflags; - int hole = 0; if (len == 0) return -EINVAL; @@ -2940,6 +2989,10 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, return -ENOMEM; path->leave_spinning = 1; + /* + * lookup the last file extent. We're not using i_size here + * because there might be preallocation past i_size + */ ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root, path, inode->i_ino, -1, 0); if (ret < 0) { @@ -2953,18 +3006,38 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]); found_type = btrfs_key_type(&found_key); - /* No extents, just return */ + /* No extents, but there might be delalloc bits */ if (found_key.objectid != inode->i_ino || found_type != BTRFS_EXTENT_DATA_KEY) { - btrfs_free_path(path); - return 0; + /* have to trust i_size as the end */ + last = (u64)-1; + last_for_get_extent = isize; + } else { + /* + * remember the start of the last extent. There are a + * bunch of different factors that go into the length of the + * extent, so its much less complex to remember where it started + */ + last = found_key.offset; + last_for_get_extent = last + 1; } - last = found_key.offset; btrfs_free_path(path); + /* + * we might have some extents allocated but more delalloc past those + * extents. so, we trust isize unless the start of the last extent is + * beyond isize + */ + if (last < isize) { + last = (u64)-1; + last_for_get_extent = isize; + } + lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0, &cached_state, GFP_NOFS); - em = get_extent(inode, NULL, 0, off, max - off, 0); + + em = get_extent_skip_holes(inode, off, last_for_get_extent, + get_extent); if (!em) goto out; if (IS_ERR(em)) { @@ -2973,19 +3046,14 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, } while (!end) { - hole = 0; - off = em->start + em->len; + off = extent_map_end(em); if (off >= max) end = 1; - if (em->block_start == EXTENT_MAP_HOLE) { - hole = 1; - goto next; - } - em_start = em->start; em_len = em->len; - + em_end = extent_map_end(em); + emflags = em->flags; disko = 0; flags = 0; @@ -3004,37 +3072,29 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) flags |= FIEMAP_EXTENT_ENCODED; -next: - emflags = em->flags; free_extent_map(em); em = NULL; - if (!end) { - em = get_extent(inode, NULL, 0, off, max - off, 0); - if (!em) - goto out; - if (IS_ERR(em)) { - ret = PTR_ERR(em); - goto out; - } - emflags = em->flags; - } - - if (test_bit(EXTENT_FLAG_VACANCY, &emflags)) { + if ((em_start >= last) || em_len == (u64)-1 || + (last == (u64)-1 && isize <= em_end)) { flags |= FIEMAP_EXTENT_LAST; end = 1; } - if (em_start == last) { + /* now scan forward to see if this is really the last extent. */ + em = get_extent_skip_holes(inode, off, last_for_get_extent, + get_extent); + if (IS_ERR(em)) { + ret = PTR_ERR(em); + goto out; + } + if (!em) { flags |= FIEMAP_EXTENT_LAST; end = 1; } - - if (!hole) { - ret = fiemap_fill_next_extent(fieinfo, em_start, disko, - em_len, flags); - if (ret) - goto out_free; - } + ret = fiemap_fill_next_extent(fieinfo, em_start, disko, + em_len, flags); + if (ret) + goto out_free; } out_free: free_extent_map(em); diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 7083cfafd061..9318dfefd59c 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -191,7 +191,7 @@ void extent_io_exit(void); u64 count_range_bits(struct extent_io_tree *tree, u64 *start, u64 search_end, - u64 max_bytes, unsigned long bits); + u64 max_bytes, unsigned long bits, int contig); void free_extent_state(struct extent_state *state); int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 8d392ed73d57..44b926646e33 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1913,7 +1913,7 @@ static int btrfs_clean_io_failures(struct inode *inode, u64 start) private = 0; if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private, - (u64)-1, 1, EXTENT_DIRTY)) { + (u64)-1, 1, EXTENT_DIRTY, 0)) { ret = get_state_private(&BTRFS_I(inode)->io_failure_tree, start, &private_failure); if (ret == 0) { @@ -5282,6 +5282,128 @@ out: return em; } +struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page, + size_t pg_offset, u64 start, u64 len, + int create) +{ + struct extent_map *em; + struct extent_map *hole_em = NULL; + u64 range_start = start; + u64 end; + u64 found; + u64 found_end; + int err = 0; + + em = btrfs_get_extent(inode, page, pg_offset, start, len, create); + if (IS_ERR(em)) + return em; + if (em) { + /* + * if our em maps to a hole, there might + * actually be delalloc bytes behind it + */ + if (em->block_start != EXTENT_MAP_HOLE) + return em; + else + hole_em = em; + } + + /* check to see if we've wrapped (len == -1 or similar) */ + end = start + len; + if (end < start) + end = (u64)-1; + else + end -= 1; + + em = NULL; + + /* ok, we didn't find anything, lets look for delalloc */ + found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start, + end, len, EXTENT_DELALLOC, 1); + found_end = range_start + found; + if (found_end < range_start) + found_end = (u64)-1; + + /* + * we didn't find anything useful, return + * the original results from get_extent() + */ + if (range_start > end || found_end <= start) { + em = hole_em; + hole_em = NULL; + goto out; + } + + /* adjust the range_start to make sure it doesn't + * go backwards from the start they passed in + */ + range_start = max(start,range_start); + found = found_end - range_start; + + if (found > 0) { + u64 hole_start = start; + u64 hole_len = len; + + em = alloc_extent_map(GFP_NOFS); + if (!em) { + err = -ENOMEM; + goto out; + } + /* + * when btrfs_get_extent can't find anything it + * returns one huge hole + * + * make sure what it found really fits our range, and + * adjust to make sure it is based on the start from + * the caller + */ + if (hole_em) { + u64 calc_end = extent_map_end(hole_em); + + if (calc_end <= start || (hole_em->start > end)) { + free_extent_map(hole_em); + hole_em = NULL; + } else { + hole_start = max(hole_em->start, start); + hole_len = calc_end - hole_start; + } + } + em->bdev = NULL; + if (hole_em && range_start > hole_start) { + /* our hole starts before our delalloc, so we + * have to return just the parts of the hole + * that go until the delalloc starts + */ + em->len = min(hole_len, + range_start - hole_start); + em->start = hole_start; + em->orig_start = hole_start; + /* + * don't adjust block start at all, + * it is fixed at EXTENT_MAP_HOLE + */ + em->block_start = hole_em->block_start; + em->block_len = hole_len; + } else { + em->start = range_start; + em->len = found; + em->orig_start = range_start; + em->block_start = EXTENT_MAP_DELALLOC; + em->block_len = found; + } + } else if (hole_em) { + return hole_em; + } +out: + + free_extent_map(hole_em); + if (err) { + free_extent_map(em); + return ERR_PTR(err); + } + return em; +} + static struct extent_map *btrfs_new_extent_direct(struct inode *inode, u64 start, u64 len) { @@ -6104,7 +6226,7 @@ out: static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, __u64 start, __u64 len) { - return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent); + return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap); } int btrfs_readpage(struct file *file, struct page *page) -- cgit v1.2.2 From 2aa15890f3c191326678f1bd68af61ec6b8753ec Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Wed, 23 Feb 2011 13:49:47 +0100 Subject: mm: prevent concurrent unmap_mapping_range() on the same inode Michael Leun reported that running parallel opens on a fuse filesystem can trigger a "kernel BUG at mm/truncate.c:475" Gurudas Pai reported the same bug on NFS. The reason is, unmap_mapping_range() is not prepared for more than one concurrent invocation per inode. For example: thread1: going through a big range, stops in the middle of a vma and stores the restart address in vm_truncate_count. thread2: comes in with a small (e.g. single page) unmap request on the same vma, somewhere before restart_address, finds that the vma was already unmapped up to the restart address and happily returns without doing anything. Another scenario would be two big unmap requests, both having to restart the unmapping and each one setting vm_truncate_count to its own value. This could go on forever without any of them being able to finish. Truncate and hole punching already serialize with i_mutex. Other callers of unmap_mapping_range() do not, and it's difficult to get i_mutex protection for all callers. In particular ->d_revalidate(), which calls invalidate_inode_pages2_range() in fuse, may be called with or without i_mutex. This patch adds a new mutex to 'struct address_space' to prevent running multiple concurrent unmap_mapping_range() on the same mapping. [ We'll hopefully get rid of all this with the upcoming mm preemptibility series by Peter Zijlstra, the "mm: Remove i_mmap_mutex lockbreak" patch in particular. But that is for 2.6.39 ] Signed-off-by: Miklos Szeredi Reported-by: Michael Leun Reported-by: Gurudas Pai Tested-by: Gurudas Pai Acked-by: Hugh Dickins Cc: stable@kernel.org Signed-off-by: Linus Torvalds --- fs/gfs2/main.c | 9 +-------- fs/inode.c | 22 +++++++++++++++------- fs/nilfs2/btnode.c | 5 ----- fs/nilfs2/btnode.h | 1 - fs/nilfs2/mdt.c | 4 ++-- fs/nilfs2/page.c | 13 ------------- fs/nilfs2/page.h | 1 - fs/nilfs2/super.c | 2 +- 8 files changed, 19 insertions(+), 38 deletions(-) (limited to 'fs') diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c index 85ba027d1c4d..72c31a315d96 100644 --- a/fs/gfs2/main.c +++ b/fs/gfs2/main.c @@ -59,14 +59,7 @@ static void gfs2_init_gl_aspace_once(void *foo) struct address_space *mapping = (struct address_space *)(gl + 1); gfs2_init_glock_once(gl); - memset(mapping, 0, sizeof(*mapping)); - INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC); - spin_lock_init(&mapping->tree_lock); - spin_lock_init(&mapping->i_mmap_lock); - INIT_LIST_HEAD(&mapping->private_list); - spin_lock_init(&mapping->private_lock); - INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap); - INIT_LIST_HEAD(&mapping->i_mmap_nonlinear); + address_space_init_once(mapping); } /** diff --git a/fs/inode.c b/fs/inode.c index da85e56378f3..9c2b795ccc93 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -295,6 +295,20 @@ static void destroy_inode(struct inode *inode) call_rcu(&inode->i_rcu, i_callback); } +void address_space_init_once(struct address_space *mapping) +{ + memset(mapping, 0, sizeof(*mapping)); + INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC); + spin_lock_init(&mapping->tree_lock); + spin_lock_init(&mapping->i_mmap_lock); + INIT_LIST_HEAD(&mapping->private_list); + spin_lock_init(&mapping->private_lock); + INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap); + INIT_LIST_HEAD(&mapping->i_mmap_nonlinear); + mutex_init(&mapping->unmap_mutex); +} +EXPORT_SYMBOL(address_space_init_once); + /* * These are initializations that only need to be done * once, because the fields are idempotent across use @@ -308,13 +322,7 @@ void inode_init_once(struct inode *inode) INIT_LIST_HEAD(&inode->i_devices); INIT_LIST_HEAD(&inode->i_wb_list); INIT_LIST_HEAD(&inode->i_lru); - INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC); - spin_lock_init(&inode->i_data.tree_lock); - spin_lock_init(&inode->i_data.i_mmap_lock); - INIT_LIST_HEAD(&inode->i_data.private_list); - spin_lock_init(&inode->i_data.private_lock); - INIT_RAW_PRIO_TREE_ROOT(&inode->i_data.i_mmap); - INIT_LIST_HEAD(&inode->i_data.i_mmap_nonlinear); + address_space_init_once(&inode->i_data); i_size_ordered_init(inode); #ifdef CONFIG_FSNOTIFY INIT_HLIST_HEAD(&inode->i_fsnotify_marks); diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c index 388e9e8f5286..85f7baa15f5d 100644 --- a/fs/nilfs2/btnode.c +++ b/fs/nilfs2/btnode.c @@ -35,11 +35,6 @@ #include "btnode.h" -void nilfs_btnode_cache_init_once(struct address_space *btnc) -{ - nilfs_mapping_init_once(btnc); -} - static const struct address_space_operations def_btnode_aops = { .sync_page = block_sync_page, }; diff --git a/fs/nilfs2/btnode.h b/fs/nilfs2/btnode.h index 79037494f1e0..1b8ebd888c28 100644 --- a/fs/nilfs2/btnode.h +++ b/fs/nilfs2/btnode.h @@ -37,7 +37,6 @@ struct nilfs_btnode_chkey_ctxt { struct buffer_head *newbh; }; -void nilfs_btnode_cache_init_once(struct address_space *); void nilfs_btnode_cache_init(struct address_space *, struct backing_dev_info *); void nilfs_btnode_cache_clear(struct address_space *); struct buffer_head *nilfs_btnode_create_block(struct address_space *btnc, diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c index 6a0e2a189f60..a0babd2bff6a 100644 --- a/fs/nilfs2/mdt.c +++ b/fs/nilfs2/mdt.c @@ -454,9 +454,9 @@ int nilfs_mdt_setup_shadow_map(struct inode *inode, struct backing_dev_info *bdi = inode->i_sb->s_bdi; INIT_LIST_HEAD(&shadow->frozen_buffers); - nilfs_mapping_init_once(&shadow->frozen_data); + address_space_init_once(&shadow->frozen_data); nilfs_mapping_init(&shadow->frozen_data, bdi, &shadow_map_aops); - nilfs_mapping_init_once(&shadow->frozen_btnodes); + address_space_init_once(&shadow->frozen_btnodes); nilfs_mapping_init(&shadow->frozen_btnodes, bdi, &shadow_map_aops); mi->mi_shadow = shadow; return 0; diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c index 0c432416cfef..a585b35fd6bc 100644 --- a/fs/nilfs2/page.c +++ b/fs/nilfs2/page.c @@ -492,19 +492,6 @@ unsigned nilfs_page_count_clean_buffers(struct page *page, return nc; } -void nilfs_mapping_init_once(struct address_space *mapping) -{ - memset(mapping, 0, sizeof(*mapping)); - INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC); - spin_lock_init(&mapping->tree_lock); - INIT_LIST_HEAD(&mapping->private_list); - spin_lock_init(&mapping->private_lock); - - spin_lock_init(&mapping->i_mmap_lock); - INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap); - INIT_LIST_HEAD(&mapping->i_mmap_nonlinear); -} - void nilfs_mapping_init(struct address_space *mapping, struct backing_dev_info *bdi, const struct address_space_operations *aops) diff --git a/fs/nilfs2/page.h b/fs/nilfs2/page.h index 622df27cd891..2a00953ebd5f 100644 --- a/fs/nilfs2/page.h +++ b/fs/nilfs2/page.h @@ -61,7 +61,6 @@ void nilfs_free_private_page(struct page *); int nilfs_copy_dirty_pages(struct address_space *, struct address_space *); void nilfs_copy_back_pages(struct address_space *, struct address_space *); void nilfs_clear_dirty_pages(struct address_space *); -void nilfs_mapping_init_once(struct address_space *mapping); void nilfs_mapping_init(struct address_space *mapping, struct backing_dev_info *bdi, const struct address_space_operations *aops); diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index 58fd707174e1..1673b3d99842 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c @@ -1279,7 +1279,7 @@ static void nilfs_inode_init_once(void *obj) #ifdef CONFIG_NILFS_XATTR init_rwsem(&ii->xattr_sem); #endif - nilfs_btnode_cache_init_once(&ii->i_btnode_cache); + address_space_init_once(&ii->i_btnode_cache); ii->i_bmap = &ii->i_bmap_data; inode_init_once(&ii->vfs_inode); } -- cgit v1.2.2 From 93b270f76e7ef3b81001576860c2701931cdc78b Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Thu, 24 Feb 2011 17:25:47 +1100 Subject: Fix over-zealous flush_disk when changing device size. There are two cases when we call flush_disk. In one, the device has disappeared (check_disk_change) so any data will hold becomes irrelevant. In the oter, the device has changed size (check_disk_size_change) so data we hold may be irrelevant. In both cases it makes sense to discard any 'clean' buffers, so they will be read back from the device if needed. In the former case it makes sense to discard 'dirty' buffers as there will never be anywhere safe to write the data. In the second case it *does*not* make sense to discard dirty buffers as that will lead to file system corruption when you simply enlarge the containing devices. flush_disk calls __invalidate_devices. __invalidate_device calls both invalidate_inodes and invalidate_bdev. invalidate_inodes *does* discard I_DIRTY inodes and this does lead to fs corruption. invalidate_bev *does*not* discard dirty pages, but I don't really care about that at present. So this patch adds a flag to __invalidate_device (calling it __invalidate_device2) to indicate whether dirty buffers should be killed, and this is passed to invalidate_inodes which can choose to skip dirty inodes. flusk_disk then passes true from check_disk_change and false from check_disk_size_change. dm avoids tripping over this problem by calling i_size_write directly rathher than using check_disk_size_change. md does use check_disk_size_change and so is affected. This regression was introduced by commit 608aeef17a which causes check_disk_size_change to call flush_disk, so it is suitable for any kernel since 2.6.27. Cc: stable@kernel.org Acked-by: Jeff Moyer Cc: Andrew Patterson Cc: Jens Axboe Signed-off-by: NeilBrown --- fs/block_dev.c | 12 ++++++------ fs/inode.c | 9 ++++++++- fs/internal.h | 2 +- 3 files changed, 15 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/block_dev.c b/fs/block_dev.c index 333a7bb4cb9c..5e23152d04ad 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -927,9 +927,9 @@ EXPORT_SYMBOL_GPL(bd_unlink_disk_holder); * when a disk has been changed -- either by a media change or online * resize. */ -static void flush_disk(struct block_device *bdev) +static void flush_disk(struct block_device *bdev, bool kill_dirty) { - if (__invalidate_device(bdev)) { + if (__invalidate_device(bdev, kill_dirty)) { char name[BDEVNAME_SIZE] = ""; if (bdev->bd_disk) @@ -966,7 +966,7 @@ void check_disk_size_change(struct gendisk *disk, struct block_device *bdev) "%s: detected capacity change from %lld to %lld\n", name, bdev_size, disk_size); i_size_write(bdev->bd_inode, disk_size); - flush_disk(bdev); + flush_disk(bdev, false); } } EXPORT_SYMBOL(check_disk_size_change); @@ -1019,7 +1019,7 @@ int check_disk_change(struct block_device *bdev) if (!(events & DISK_EVENT_MEDIA_CHANGE)) return 0; - flush_disk(bdev); + flush_disk(bdev, true); if (bdops->revalidate_disk) bdops->revalidate_disk(bdev->bd_disk); return 1; @@ -1601,7 +1601,7 @@ fail: } EXPORT_SYMBOL(lookup_bdev); -int __invalidate_device(struct block_device *bdev) +int __invalidate_device(struct block_device *bdev, bool kill_dirty) { struct super_block *sb = get_super(bdev); int res = 0; @@ -1614,7 +1614,7 @@ int __invalidate_device(struct block_device *bdev) * hold). */ shrink_dcache_sb(sb); - res = invalidate_inodes(sb); + res = invalidate_inodes(sb, kill_dirty); drop_super(sb); } invalidate_bdev(bdev); diff --git a/fs/inode.c b/fs/inode.c index da85e56378f3..c50d7feb87b1 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -540,11 +540,14 @@ void evict_inodes(struct super_block *sb) /** * invalidate_inodes - attempt to free all inodes on a superblock * @sb: superblock to operate on + * @kill_dirty: flag to guide handling of dirty inodes * * Attempts to free all inodes for a given superblock. If there were any * busy inodes return a non-zero value, else zero. + * If @kill_dirty is set, discard dirty inodes too, otherwise treat + * them as busy. */ -int invalidate_inodes(struct super_block *sb) +int invalidate_inodes(struct super_block *sb, bool kill_dirty) { int busy = 0; struct inode *inode, *next; @@ -556,6 +559,10 @@ int invalidate_inodes(struct super_block *sb) list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) continue; + if (inode->i_state & I_DIRTY && !kill_dirty) { + busy = 1; + continue; + } if (atomic_read(&inode->i_count)) { busy = 1; continue; diff --git a/fs/internal.h b/fs/internal.h index 0663568b1247..9b976b57d7fe 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -112,4 +112,4 @@ extern void release_open_intent(struct nameidata *); */ extern int get_nr_dirty_inodes(void); extern void evict_inodes(struct super_block *); -extern int invalidate_inodes(struct super_block *); +extern int invalidate_inodes(struct super_block *, bool); -- cgit v1.2.2 From bf9faa2aa30e2ebf30287536712ed2717bb47002 Mon Sep 17 00:00:00 2001 From: "J. R. Okajima" Date: Wed, 23 Feb 2011 16:59:49 +0900 Subject: Unlock vfsmount_lock in do_umount By the commit b3e19d9 2011-01-07 fs: scale mntget/mntput vfsmount_lock was introduced around testing mnt_count. Fix the mis-typed 'unlock' Signed-off-by: J. R. Okajima Acked-by: Al Viro Signed-off-by: Al Viro --- fs/namespace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/namespace.c b/fs/namespace.c index 7b0b95371696..d1edf26025dc 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -1244,7 +1244,7 @@ static int do_umount(struct vfsmount *mnt, int flags) */ br_write_lock(vfsmount_lock); if (mnt_get_count(mnt) != 2) { - br_write_lock(vfsmount_lock); + br_write_unlock(vfsmount_lock); return -EBUSY; } br_write_unlock(vfsmount_lock); -- cgit v1.2.2 From e7407d1619713f4b1fdff3a485e1bd8e77bd480d Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 24 Feb 2011 09:56:32 +0100 Subject: block: bd_link_disk_holder() should hold on to holder_dir The new implementation of bd_link_disk_holder() added by 49731baa41d (block: restore multiple bd_link_disk_holder() support) didn't get an extra reference for the holder_dir kobject of the slave bdev; however, bdev kills holder_dir on removal, not release, so if the slave bdev is removed while there are holder links, the holder_dir will be destroyed while there still are holder links, which leads to oops later when bd_unlink_disk_order() tries to remove those links. Make bd_link_disk_holder() grab an extra reference for the slave's holder_dir and put it in bd_unlink_disk_holder(). Signed-off-by: Tejun Heo Reported-by: "Hawrylewicz Czarnowski, Przemyslaw" Tested-by: "Hawrylewicz Czarnowski, Przemyslaw" Cc: Neil Brown Cc: Jens Axboe Signed-off-by: Linus Torvalds --- fs/block_dev.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'fs') diff --git a/fs/block_dev.c b/fs/block_dev.c index 4fb8a3431531..94d41db62004 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -873,6 +873,11 @@ int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk) ret = add_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj); if (ret) goto out_del; + /* + * bdev could be deleted beneath us which would implicitly destroy + * the holder directory. Hold on to it. + */ + kobject_get(bdev->bd_part->holder_dir); list_add(&holder->list, &bdev->bd_holder_disks); goto out_unlock; @@ -909,6 +914,7 @@ void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk) del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj); del_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj); + kobject_put(bdev->bd_part->holder_dir); list_del_init(&holder->list); kfree(holder); } -- cgit v1.2.2 From 5a18ec176c934ca1bc9dc61580a5e0e90a9b5733 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Fri, 25 Feb 2011 14:44:58 +0100 Subject: fuse: fix hang of single threaded fuseblk filesystem Single threaded NTFS-3G could get stuck if a delayed RELEASE reply triggered a DESTROY request via path_put(). Fix this by a) making RELEASE requests synchronous, whenever possible, on fuseblk filesystems b) if not possible (triggered by an asynchronous read/write) then do the path_put() in a separate thread with schedule_work(). Reported-by: Oliver Neukum Cc: stable@kernel.org Signed-off-by: Miklos Szeredi --- fs/fuse/file.c | 52 +++++++++++++++++++++++++++++++++++++++++++++------- fs/fuse/fuse_i.h | 6 +++++- 2 files changed, 50 insertions(+), 8 deletions(-) (limited to 'fs') diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 95da1bc1c826..9e0832dbb1e3 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -86,18 +86,52 @@ struct fuse_file *fuse_file_get(struct fuse_file *ff) return ff; } +static void fuse_release_async(struct work_struct *work) +{ + struct fuse_req *req; + struct fuse_conn *fc; + struct path path; + + req = container_of(work, struct fuse_req, misc.release.work); + path = req->misc.release.path; + fc = get_fuse_conn(path.dentry->d_inode); + + fuse_put_request(fc, req); + path_put(&path); +} + static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req) { - path_put(&req->misc.release.path); + if (fc->destroy_req) { + /* + * If this is a fuseblk mount, then it's possible that + * releasing the path will result in releasing the + * super block and sending the DESTROY request. If + * the server is single threaded, this would hang. + * For this reason do the path_put() in a separate + * thread. + */ + atomic_inc(&req->count); + INIT_WORK(&req->misc.release.work, fuse_release_async); + schedule_work(&req->misc.release.work); + } else { + path_put(&req->misc.release.path); + } } -static void fuse_file_put(struct fuse_file *ff) +static void fuse_file_put(struct fuse_file *ff, bool sync) { if (atomic_dec_and_test(&ff->count)) { struct fuse_req *req = ff->reserved_req; - req->end = fuse_release_end; - fuse_request_send_background(ff->fc, req); + if (sync) { + fuse_request_send(ff->fc, req); + path_put(&req->misc.release.path); + fuse_put_request(ff->fc, req); + } else { + req->end = fuse_release_end; + fuse_request_send_background(ff->fc, req); + } kfree(ff); } } @@ -219,8 +253,12 @@ void fuse_release_common(struct file *file, int opcode) * Normally this will send the RELEASE request, however if * some asynchronous READ or WRITE requests are outstanding, * the sending will be delayed. + * + * Make the release synchronous if this is a fuseblk mount, + * synchronous RELEASE is allowed (and desirable) in this case + * because the server can be trusted not to screw up. */ - fuse_file_put(ff); + fuse_file_put(ff, ff->fc->destroy_req != NULL); } static int fuse_open(struct inode *inode, struct file *file) @@ -558,7 +596,7 @@ static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req) page_cache_release(page); } if (req->ff) - fuse_file_put(req->ff); + fuse_file_put(req->ff, false); } static void fuse_send_readpages(struct fuse_req *req, struct file *file) @@ -1137,7 +1175,7 @@ static ssize_t fuse_direct_write(struct file *file, const char __user *buf, static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req) { __free_page(req->pages[0]); - fuse_file_put(req->ff); + fuse_file_put(req->ff, false); } static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req) diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index ae5744a2f9e9..d4286947bc2c 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -21,6 +21,7 @@ #include #include #include +#include /** Max number of pages that can be used in a single read request */ #define FUSE_MAX_PAGES_PER_REQ 32 @@ -262,7 +263,10 @@ struct fuse_req { /** Data for asynchronous requests */ union { struct { - struct fuse_release_in in; + union { + struct fuse_release_in in; + struct work_struct work; + }; struct path path; } release; struct fuse_init_in init_in; -- cgit v1.2.2 From 8d56addd70c7c0626502569e22cc8fce49ae39f5 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Fri, 25 Feb 2011 14:44:58 +0100 Subject: fuse: fix truncate after open Commit e1181ee6 "vfs: pass struct file to do_truncate on O_TRUNC opens" broke the behavior of open(O_TRUNC|O_RDONLY) in fuse. Fuse assumed that when called from open, a truncate() will be done, not an ftruncate(). Fix by restoring the old behavior, based on the ATTR_OPEN flag. Signed-off-by: Miklos Szeredi --- fs/fuse/dir.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index bfed8447ed80..83543b5ff941 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -1283,8 +1283,11 @@ static int fuse_do_setattr(struct dentry *entry, struct iattr *attr, if (err) return err; - if ((attr->ia_valid & ATTR_OPEN) && fc->atomic_o_trunc) - return 0; + if (attr->ia_valid & ATTR_OPEN) { + if (fc->atomic_o_trunc) + return 0; + file = NULL; + } if (attr->ia_valid & ATTR_SIZE) is_truncate = true; -- cgit v1.2.2 From f129ccc9231c95513a1227ca9da876beeb03e577 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Fri, 25 Feb 2011 15:33:02 +0000 Subject: afs: Fix oops in afs_unlink_writeback I'm seeing the following oops when testing afs: Unable to handle kernel paging request for data at address 0x00000008 ... NIP [c0000000003393b0] .afs_unlink_writeback+0x38/0xc0 LR [c00000000033987c] .afs_put_writeback+0x98/0xec Call Trace: [c00000000345f600] [c00000000033987c] .afs_put_writeback+0x98/0xec [c00000000345f690] [c00000000033ae80] .afs_write_begin+0x6a4/0x75c [c00000000345f790] [c00000000012b77c] .generic_file_buffered_write+0x148/0x320 [c00000000345f8d0] [c00000000012e1b8] .__generic_file_aio_write+0x37c/0x3e4 [c00000000345f9d0] [c00000000012e2a8] .generic_file_aio_write+0x88/0xfc [c00000000345fa90] [c0000000003390a8] .afs_file_write+0x10c/0x178 [c00000000345fb40] [c000000000188788] .do_sync_write+0xc4/0x128 [c00000000345fcc0] [c000000000189658] .vfs_write+0xe8/0x1d8 [c00000000345fd70] [c000000000189884] .SyS_write+0x68/0xb0 [c00000000345fe30] [c000000000008564] syscall_exit+0x0/0x40 afs_write_begin hits an error and calls afs_unlink_writeback. In there we do list_del_init on an uninitialised list. The patch below initialises ->link when creating the afs_writeback struct. Signed-off-by: Anton Blanchard Signed-off-by: David Howells Signed-off-by: Linus Torvalds --- fs/afs/write.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/afs/write.c b/fs/afs/write.c index 15690bb1d3b5..789b3afb3423 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c @@ -140,6 +140,7 @@ int afs_write_begin(struct file *file, struct address_space *mapping, candidate->first = candidate->last = index; candidate->offset_first = from; candidate->to_last = to; + INIT_LIST_HEAD(&candidate->link); candidate->usage = 1; candidate->state = AFS_WBACK_PENDING; init_waitqueue_head(&candidate->waitq); -- cgit v1.2.2 From 22bacca48a1755f79b7e0f192ddb9fbb7fc6e64e Mon Sep 17 00:00:00 2001 From: Davide Libenzi Date: Fri, 25 Feb 2011 14:44:12 -0800 Subject: epoll: prevent creating circular epoll structures In several places, an epoll fd can call another file's ->f_op->poll() method with ep->mtx held. This is in general unsafe, because that other file could itself be an epoll fd that contains the original epoll fd. The code defends against this possibility in its own ->poll() method using ep_call_nested, but there are several other unsafe calls to ->poll elsewhere that can be made to deadlock. For example, the following simple program causes the call in ep_insert recursively call the original fd's ->poll, leading to deadlock: #include #include int main(void) { int e1, e2, p[2]; struct epoll_event evt = { .events = EPOLLIN }; e1 = epoll_create(1); e2 = epoll_create(2); pipe(p); epoll_ctl(e2, EPOLL_CTL_ADD, e1, &evt); epoll_ctl(e1, EPOLL_CTL_ADD, p[0], &evt); write(p[1], p, sizeof p); epoll_ctl(e1, EPOLL_CTL_ADD, e2, &evt); return 0; } On insertion, check whether the inserted file is itself a struct epoll, and if so, do a recursive walk to detect whether inserting this file would create a loop of epoll structures, which could lead to deadlock. [nelhage@ksplice.com: Use epmutex to serialize concurrent inserts] Signed-off-by: Davide Libenzi Signed-off-by: Nelson Elhage Reported-by: Nelson Elhage Tested-by: Nelson Elhage Cc: [2.6.34+, possibly earlier] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/eventpoll.c | 95 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 95 insertions(+) (limited to 'fs') diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 267d0ada4541..4a09af9e9a63 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -63,6 +63,13 @@ * cleanup path and it is also acquired by eventpoll_release_file() * if a file has been pushed inside an epoll set and it is then * close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL). + * It is also acquired when inserting an epoll fd onto another epoll + * fd. We do this so that we walk the epoll tree and ensure that this + * insertion does not create a cycle of epoll file descriptors, which + * could lead to deadlock. We need a global mutex to prevent two + * simultaneous inserts (A into B and B into A) from racing and + * constructing a cycle without either insert observing that it is + * going to. * It is possible to drop the "ep->mtx" and to use the global * mutex "epmutex" (together with "ep->lock") to have it working, * but having "ep->mtx" will make the interface more scalable. @@ -224,6 +231,9 @@ static long max_user_watches __read_mostly; */ static DEFINE_MUTEX(epmutex); +/* Used to check for epoll file descriptor inclusion loops */ +static struct nested_calls poll_loop_ncalls; + /* Used for safe wake up implementation */ static struct nested_calls poll_safewake_ncalls; @@ -1198,6 +1208,62 @@ retry: return res; } +/** + * ep_loop_check_proc - Callback function to be passed to the @ep_call_nested() + * API, to verify that adding an epoll file inside another + * epoll structure, does not violate the constraints, in + * terms of closed loops, or too deep chains (which can + * result in excessive stack usage). + * + * @priv: Pointer to the epoll file to be currently checked. + * @cookie: Original cookie for this call. This is the top-of-the-chain epoll + * data structure pointer. + * @call_nests: Current dept of the @ep_call_nested() call stack. + * + * Returns: Returns zero if adding the epoll @file inside current epoll + * structure @ep does not violate the constraints, or -1 otherwise. + */ +static int ep_loop_check_proc(void *priv, void *cookie, int call_nests) +{ + int error = 0; + struct file *file = priv; + struct eventpoll *ep = file->private_data; + struct rb_node *rbp; + struct epitem *epi; + + mutex_lock(&ep->mtx); + for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) { + epi = rb_entry(rbp, struct epitem, rbn); + if (unlikely(is_file_epoll(epi->ffd.file))) { + error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS, + ep_loop_check_proc, epi->ffd.file, + epi->ffd.file->private_data, current); + if (error != 0) + break; + } + } + mutex_unlock(&ep->mtx); + + return error; +} + +/** + * ep_loop_check - Performs a check to verify that adding an epoll file (@file) + * another epoll file (represented by @ep) does not create + * closed loops or too deep chains. + * + * @ep: Pointer to the epoll private data structure. + * @file: Pointer to the epoll file to be checked. + * + * Returns: Returns zero if adding the epoll @file inside current epoll + * structure @ep does not violate the constraints, or -1 otherwise. + */ +static int ep_loop_check(struct eventpoll *ep, struct file *file) +{ + return ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS, + ep_loop_check_proc, file, ep, current); +} + /* * Open an eventpoll file descriptor. */ @@ -1246,6 +1312,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, struct epoll_event __user *, event) { int error; + int did_lock_epmutex = 0; struct file *file, *tfile; struct eventpoll *ep; struct epitem *epi; @@ -1287,6 +1354,25 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, */ ep = file->private_data; + /* + * When we insert an epoll file descriptor, inside another epoll file + * descriptor, there is the change of creating closed loops, which are + * better be handled here, than in more critical paths. + * + * We hold epmutex across the loop check and the insert in this case, in + * order to prevent two separate inserts from racing and each doing the + * insert "at the same time" such that ep_loop_check passes on both + * before either one does the insert, thereby creating a cycle. + */ + if (unlikely(is_file_epoll(tfile) && op == EPOLL_CTL_ADD)) { + mutex_lock(&epmutex); + did_lock_epmutex = 1; + error = -ELOOP; + if (ep_loop_check(ep, tfile) != 0) + goto error_tgt_fput; + } + + mutex_lock(&ep->mtx); /* @@ -1322,6 +1408,9 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, mutex_unlock(&ep->mtx); error_tgt_fput: + if (unlikely(did_lock_epmutex)) + mutex_unlock(&epmutex); + fput(tfile); error_fput: fput(file); @@ -1441,6 +1530,12 @@ static int __init eventpoll_init(void) EP_ITEM_COST; BUG_ON(max_user_watches < 0); + /* + * Initialize the structure used to perform epoll file descriptor + * inclusion loops checks. + */ + ep_nested_calls_init(&poll_loop_ncalls); + /* Initialize the structure used to perform safe poll wait head wake ups */ ep_nested_calls_init(&poll_safewake_ncalls); -- cgit v1.2.2 From 294f6cf48666825d23c9372ef37631232746e40d Mon Sep 17 00:00:00 2001 From: Timo Warns Date: Fri, 25 Feb 2011 14:44:21 -0800 Subject: ldm: corrupted partition table can cause kernel oops The kernel automatically evaluates partition tables of storage devices. The code for evaluating LDM partitions (in fs/partitions/ldm.c) contains a bug that causes a kernel oops on certain corrupted LDM partitions. A kernel subsystem seems to crash, because, after the oops, the kernel no longer recognizes newly connected storage devices. The patch changes ldm_parse_vmdb() to Validate the value of vblk_size. Signed-off-by: Timo Warns Cc: Eugene Teo Acked-by: Richard Russon Cc: Harvey Harrison Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/partitions/ldm.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'fs') diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c index 789c625c7aa5..b10e3540d5b7 100644 --- a/fs/partitions/ldm.c +++ b/fs/partitions/ldm.c @@ -251,6 +251,11 @@ static bool ldm_parse_vmdb (const u8 *data, struct vmdb *vm) } vm->vblk_size = get_unaligned_be32(data + 0x08); + if (vm->vblk_size == 0) { + ldm_error ("Illegal VBLK size"); + return false; + } + vm->vblk_offset = get_unaligned_be32(data + 0x0C); vm->last_vblk_seq = get_unaligned_be32(data + 0x04); -- cgit v1.2.2 From 3bd9a5d734c7cc7533b27abf451416c7f50095a7 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Fri, 25 Feb 2011 14:44:26 -0800 Subject: aio: fix rcu ioctx lookup aio-dio-invalidate-failure GPFs in aio_put_req from io_submit. lookup_ioctx doesn't implement the rcu lookup pattern properly. rcu_read_lock does not prevent refcount going to zero, so we might take a refcount on a zero count ioctx. Fix the bug by atomically testing for zero refcount before incrementing. [jack@suse.cz: added comment into the code] Reviewed-by: Jeff Moyer Signed-off-by: Nick Piggin Signed-off-by: Jan Kara Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/aio.c | 35 ++++++++++++++++++++++++----------- 1 file changed, 24 insertions(+), 11 deletions(-) (limited to 'fs') diff --git a/fs/aio.c b/fs/aio.c index fc557a3be0a9..b4dd668fbccc 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -239,15 +239,23 @@ static void __put_ioctx(struct kioctx *ctx) call_rcu(&ctx->rcu_head, ctx_rcu_free); } -#define get_ioctx(kioctx) do { \ - BUG_ON(atomic_read(&(kioctx)->users) <= 0); \ - atomic_inc(&(kioctx)->users); \ -} while (0) -#define put_ioctx(kioctx) do { \ - BUG_ON(atomic_read(&(kioctx)->users) <= 0); \ - if (unlikely(atomic_dec_and_test(&(kioctx)->users))) \ - __put_ioctx(kioctx); \ -} while (0) +static inline void get_ioctx(struct kioctx *kioctx) +{ + BUG_ON(atomic_read(&kioctx->users) <= 0); + atomic_inc(&kioctx->users); +} + +static inline int try_get_ioctx(struct kioctx *kioctx) +{ + return atomic_inc_not_zero(&kioctx->users); +} + +static inline void put_ioctx(struct kioctx *kioctx) +{ + BUG_ON(atomic_read(&kioctx->users) <= 0); + if (unlikely(atomic_dec_and_test(&kioctx->users))) + __put_ioctx(kioctx); +} /* ioctx_alloc * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed. @@ -601,8 +609,13 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id) rcu_read_lock(); hlist_for_each_entry_rcu(ctx, n, &mm->ioctx_list, list) { - if (ctx->user_id == ctx_id && !ctx->dead) { - get_ioctx(ctx); + /* + * RCU protects us against accessing freed memory but + * we have to be careful not to get a reference when the + * reference count already dropped to 0 (ctx->dead test + * is unreliable because of races). + */ + if (ctx->user_id == ctx_id && !ctx->dead && try_get_ioctx(ctx)){ ret = ctx; break; } -- cgit v1.2.2 From 7137c6bd455234bcb7560fd829e6ee49cae5fed6 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Fri, 25 Feb 2011 14:44:27 -0800 Subject: aio: fix race between io_destroy() and io_submit() A race can occur when io_submit() races with io_destroy(): CPU1 CPU2 io_submit() do_io_submit() ... ctx = lookup_ioctx(ctx_id); io_destroy() Now do_io_submit() holds the last reference to ctx. ... queue new AIO put_ioctx(ctx) - frees ctx with active AIOs We solve this issue by checking whether ctx is being destroyed in AIO submission path after adding new AIO to ctx. Then we are guaranteed that either io_destroy() waits for new AIO or we see that ctx is being destroyed and bail out. Cc: Nick Piggin Reviewed-by: Jeff Moyer Signed-off-by: Jan Kara Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/aio.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) (limited to 'fs') diff --git a/fs/aio.c b/fs/aio.c index b4dd668fbccc..26869cde3953 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -1642,6 +1642,23 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, goto out_put_req; spin_lock_irq(&ctx->ctx_lock); + /* + * We could have raced with io_destroy() and are currently holding a + * reference to ctx which should be destroyed. We cannot submit IO + * since ctx gets freed as soon as io_submit() puts its reference. The + * check here is reliable: io_destroy() sets ctx->dead before waiting + * for outstanding IO and the barrier between these two is realized by + * unlock of mm->ioctx_lock and lock of ctx->ctx_lock. Analogously we + * increment ctx->reqs_active before checking for ctx->dead and the + * barrier is realized by unlock and lock of ctx->ctx_lock. Thus if we + * don't see ctx->dead set here, io_destroy() waits for our IO to + * finish. + */ + if (ctx->dead) { + spin_unlock_irq(&ctx->ctx_lock); + ret = -EINVAL; + goto out_put_req; + } aio_run_iocb(req); if (!list_empty(&ctx->run_list)) { /* drain the run list */ -- cgit v1.2.2 From e6eb5ce1b202ac9cdcfda5be559c9b9d8ec7542c Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sat, 26 Feb 2011 10:54:00 -0800 Subject: fs/block_dev.c: fix new kernel-doc warning Fix new kernel-doc warning in fs/block_dev.c: Warning(fs/block_dev.c:937): No description found for parameter 'kill_dirty' Signed-off-by: Randy Dunlap Signed-off-by: Linus Torvalds --- fs/block_dev.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/block_dev.c b/fs/block_dev.c index f05bf16cd979..889287019599 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -928,6 +928,7 @@ EXPORT_SYMBOL_GPL(bd_unlink_disk_holder); * flush_disk - invalidates all buffer-cache entries on a disk * * @bdev: struct block device to be flushed + * @kill_dirty: flag to guide handling of dirty inodes * * Invalidates all buffer-cache entries on a disk. It should be called * when a disk has been changed -- either by a media change or online -- cgit v1.2.2 From 72746ac643928f6c3113b5aa783d8ea1b13949d2 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Mon, 28 Feb 2011 13:41:11 +0900 Subject: nilfs2: fix regression that i-flag is not set on changeless checkpoints According to the report from Jiro SEKIBA titled "regression in 2.6.37?" (Message-Id: <8739n8vs1f.wl%jir@sekiba.com>), on 2.6.37 and later kernels, lscp command no longer displays "i" flag on checkpoints that snapshot operations or garbage collection created. This is a regression of nilfs2 checkpointing function, and it's critical since it broke behavior of a part of nilfs2 applications. For instance, snapshot manager of TimeBrowse gets to create meaningless snapshots continuously; snapshot creation triggers another checkpoint, but applications cannot distinguish whether the new checkpoint contains meaningful changes or not without the i-flag. This patch fixes the regression and brings that application behavior back to normal. Reported-by: Jiro SEKIBA Signed-off-by: Ryusuke Konishi Tested-by: Ryusuke Konishi Tested-by: Jiro SEKIBA Cc: stable [2.6.37] --- fs/nilfs2/segment.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 55ebae5c7f39..2de9f636792a 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -430,7 +430,8 @@ static void nilfs_segctor_begin_finfo(struct nilfs_sc_info *sci, nilfs_segctor_map_segsum_entry( sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo)); - if (inode->i_sb && !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags)) + if (NILFS_I(inode)->i_root && + !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags)) set_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags); /* skip finfo */ } -- cgit v1.2.2 From af24ee9ea8d532e16883251a6684dfa1be8eec29 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Tue, 1 Mar 2011 17:50:00 +0000 Subject: xfs: zero proper structure size for geometry calls Commit 493f3358cb289ccf716c5a14fa5bb52ab75943e5 added this call to xfs_fs_geometry() in order to avoid passing kernel stack data back to user space: + memset(geo, 0, sizeof(*geo)); Unfortunately, one of the callers of that function passes the address of a smaller data type, cast to fit the type that xfs_fs_geometry() requires. As a result, this can happen: Kernel panic - not syncing: stack-protector: Kernel stack is corrupted in: f87aca93 Pid: 262, comm: xfs_fsr Not tainted 2.6.38-rc6-493f3358cb2+ #1 Call Trace: [] ? panic+0x50/0x150 [] ? __stack_chk_fail+0x10/0x18 [] ? xfs_ioc_fsgeometry_v1+0x56/0x5d [xfs] Fix this by fixing that one caller to pass the right type and then copy out the subset it is interested in. Note: This patch is an alternative to one originally proposed by Eric Sandeen. Reported-by: Jeffrey Hundstad Signed-off-by: Alex Elder Reviewed-by: Eric Sandeen Tested-by: Jeffrey Hundstad --- fs/xfs/linux-2.6/xfs_ioctl.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'fs') diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c index f5e2a19e0f8e..0ca0e3c024d7 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl.c +++ b/fs/xfs/linux-2.6/xfs_ioctl.c @@ -695,14 +695,19 @@ xfs_ioc_fsgeometry_v1( xfs_mount_t *mp, void __user *arg) { - xfs_fsop_geom_v1_t fsgeo; + xfs_fsop_geom_t fsgeo; int error; - error = xfs_fs_geometry(mp, (xfs_fsop_geom_t *)&fsgeo, 3); + error = xfs_fs_geometry(mp, &fsgeo, 3); if (error) return -error; - if (copy_to_user(arg, &fsgeo, sizeof(fsgeo))) + /* + * Caller should have passed an argument of type + * xfs_fsop_geom_v1_t. This is a proper subset of the + * xfs_fsop_geom_t that xfs_fs_geometry() fills in. + */ + if (copy_to_user(arg, &fsgeo, sizeof(xfs_fsop_geom_v1_t))) return -XFS_ERROR(EFAULT); return 0; } -- cgit v1.2.2 From e8a80c6f769dd4622d8b211b398452158ee60c0b Mon Sep 17 00:00:00 2001 From: Josh Hunt Date: Thu, 24 Feb 2011 11:48:22 +0100 Subject: ext2: Fix link count corruption under heavy link+rename load vfs_rename_other() does not lock renamed inode with i_mutex. Thus changing i_nlink in a non-atomic manner (which happens in ext2_rename()) can corrupt it as reported and analyzed by Josh. In fact, there is no good reason to mess with i_nlink of the moved file. We did it presumably to simulate linking into the new directory and unlinking from an old one. But the practical effect of this is disputable because fsck can possibly treat file as being properly linked into both directories without writing any error which is confusing. So we just stop increment-decrement games with i_nlink which also fixes the corruption. CC: stable@kernel.org CC: Al Viro Signed-off-by: Josh Hunt Signed-off-by: Jan Kara --- fs/ext2/namei.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c index 2e1d8341d827..adb91855ccd0 100644 --- a/fs/ext2/namei.c +++ b/fs/ext2/namei.c @@ -344,7 +344,6 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry, new_de = ext2_find_entry (new_dir, &new_dentry->d_name, &new_page); if (!new_de) goto out_dir; - inode_inc_link_count(old_inode); ext2_set_link(new_dir, new_de, new_page, old_inode, 1); new_inode->i_ctime = CURRENT_TIME_SEC; if (dir_de) @@ -356,12 +355,9 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry, if (new_dir->i_nlink >= EXT2_LINK_MAX) goto out_dir; } - inode_inc_link_count(old_inode); err = ext2_add_link(new_dentry, old_inode); - if (err) { - inode_dec_link_count(old_inode); + if (err) goto out_dir; - } if (dir_de) inode_inc_link_count(new_dir); } @@ -369,12 +365,11 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry, /* * Like most other Unix systems, set the ctime for inodes on a * rename. - * inode_dec_link_count() will mark the inode dirty. */ old_inode->i_ctime = CURRENT_TIME_SEC; + mark_inode_dirty(old_inode); ext2_delete_entry (old_de, old_page); - inode_dec_link_count(old_inode); if (dir_de) { if (old_dir != new_dir) -- cgit v1.2.2 From 8aaccf7fa2a2f148db1edbe7b09e3119c3f910cf Mon Sep 17 00:00:00 2001 From: Paul Bolle Date: Mon, 14 Feb 2011 22:34:22 +0100 Subject: of/flattree: Drop an uninteresting message to pr_debug level This message looks like an error (which it isn't) when booting with a flattened device tree. Remove the message from normal kernel builds. Signed-off-by: Paul Bolle Signed-off-by: Grant Likely --- fs/proc/proc_devtree.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/proc/proc_devtree.c b/fs/proc/proc_devtree.c index d9396a4fc7ff..927cbd115e53 100644 --- a/fs/proc/proc_devtree.c +++ b/fs/proc/proc_devtree.c @@ -233,7 +233,7 @@ void __init proc_device_tree_init(void) return; root = of_find_node_by_path("/"); if (root == NULL) { - printk(KERN_ERR "/proc/device-tree: can't find root\n"); + pr_debug("/proc/device-tree: can't find root\n"); return; } proc_device_tree_add_node(root, proc_device_tree); -- cgit v1.2.2 From 4787d45fa76b97e224a8299086bb5fb496275796 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 2 Mar 2011 09:38:45 -0500 Subject: sysv: i_nlink races in rename() Signed-off-by: Al Viro --- fs/sysv/namei.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c index b427b1208c26..e474fbcf8bde 100644 --- a/fs/sysv/namei.c +++ b/fs/sysv/namei.c @@ -245,7 +245,6 @@ static int sysv_rename(struct inode * old_dir, struct dentry * old_dentry, new_de = sysv_find_entry(new_dentry, &new_page); if (!new_de) goto out_dir; - inode_inc_link_count(old_inode); sysv_set_link(new_de, new_page, old_inode); new_inode->i_ctime = CURRENT_TIME_SEC; if (dir_de) @@ -257,18 +256,15 @@ static int sysv_rename(struct inode * old_dir, struct dentry * old_dentry, if (new_dir->i_nlink >= SYSV_SB(new_dir->i_sb)->s_link_max) goto out_dir; } - inode_inc_link_count(old_inode); err = sysv_add_link(new_dentry, old_inode); - if (err) { - inode_dec_link_count(old_inode); + if (err) goto out_dir; - } if (dir_de) inode_inc_link_count(new_dir); } sysv_delete_entry(old_de, old_page); - inode_dec_link_count(old_inode); + mark_inode_dirty(old_inode); if (dir_de) { sysv_set_link(dir_de, dir_page, new_dir); -- cgit v1.2.2 From 37750cdda36721fa7fa816f5f58258d2c013b248 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 2 Mar 2011 09:40:21 -0500 Subject: ufs: i_nlink races in rename() Signed-off-by: Al Viro --- fs/ufs/namei.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c index 12f39b9e4437..d6f681535eb8 100644 --- a/fs/ufs/namei.c +++ b/fs/ufs/namei.c @@ -306,7 +306,6 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry, new_de = ufs_find_entry(new_dir, &new_dentry->d_name, &new_page); if (!new_de) goto out_dir; - inode_inc_link_count(old_inode); ufs_set_link(new_dir, new_de, new_page, old_inode); new_inode->i_ctime = CURRENT_TIME_SEC; if (dir_de) @@ -318,12 +317,9 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry, if (new_dir->i_nlink >= UFS_LINK_MAX) goto out_dir; } - inode_inc_link_count(old_inode); err = ufs_add_link(new_dentry, old_inode); - if (err) { - inode_dec_link_count(old_inode); + if (err) goto out_dir; - } if (dir_de) inode_inc_link_count(new_dir); } @@ -331,12 +327,11 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry, /* * Like most other Unix systems, set the ctime for inodes on a * rename. - * inode_dec_link_count() will mark the inode dirty. */ old_inode->i_ctime = CURRENT_TIME_SEC; ufs_delete_entry(old_dir, old_de, old_page); - inode_dec_link_count(old_inode); + mark_inode_dirty(old_inode); if (dir_de) { ufs_set_link(old_inode, dir_de, dir_page, new_dir); -- cgit v1.2.2 From 6f88049caf56022c773272e03ddfa8cf79867059 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 2 Mar 2011 09:41:38 -0500 Subject: minix: i_nlink races in rename() Signed-off-by: Al Viro --- fs/minix/namei.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/minix/namei.c b/fs/minix/namei.c index ce7337ddfdbf..6e6777f1b4b2 100644 --- a/fs/minix/namei.c +++ b/fs/minix/namei.c @@ -213,7 +213,6 @@ static int minix_rename(struct inode * old_dir, struct dentry *old_dentry, new_de = minix_find_entry(new_dentry, &new_page); if (!new_de) goto out_dir; - inode_inc_link_count(old_inode); minix_set_link(new_de, new_page, old_inode); new_inode->i_ctime = CURRENT_TIME_SEC; if (dir_de) @@ -225,18 +224,15 @@ static int minix_rename(struct inode * old_dir, struct dentry *old_dentry, if (new_dir->i_nlink >= info->s_link_max) goto out_dir; } - inode_inc_link_count(old_inode); err = minix_add_link(new_dentry, old_inode); - if (err) { - inode_dec_link_count(old_inode); + if (err) goto out_dir; - } if (dir_de) inode_inc_link_count(new_dir); } minix_delete_entry(old_de, old_page); - inode_dec_link_count(old_inode); + mark_inode_dirty(old_inode); if (dir_de) { minix_set_link(dir_de, dir_page, new_dir); -- cgit v1.2.2 From 30eb43d31478f0fca28423623f3ec6af13f845fa Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 2 Mar 2011 12:01:13 -0500 Subject: nilfs2: i_nlink races in rename() Signed-off-by: Al Viro --- fs/nilfs2/namei.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) (limited to 'fs') diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c index 98034271cd02..161791d26458 100644 --- a/fs/nilfs2/namei.c +++ b/fs/nilfs2/namei.c @@ -397,7 +397,6 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry, new_de = nilfs_find_entry(new_dir, &new_dentry->d_name, &new_page); if (!new_de) goto out_dir; - inc_nlink(old_inode); nilfs_set_link(new_dir, new_de, new_page, old_inode); nilfs_mark_inode_dirty(new_dir); new_inode->i_ctime = CURRENT_TIME; @@ -411,13 +410,9 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry, if (new_dir->i_nlink >= NILFS_LINK_MAX) goto out_dir; } - inc_nlink(old_inode); err = nilfs_add_link(new_dentry, old_inode); - if (err) { - drop_nlink(old_inode); - nilfs_mark_inode_dirty(old_inode); + if (err) goto out_dir; - } if (dir_de) { inc_nlink(new_dir); nilfs_mark_inode_dirty(new_dir); @@ -431,7 +426,6 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry, old_inode->i_ctime = CURRENT_TIME; nilfs_delete_entry(old_de, old_page); - drop_nlink(old_inode); if (dir_de) { nilfs_set_link(old_inode, dir_de, dir_page, new_dir); -- cgit v1.2.2 From babfe56046885749b6a90a3c4409219a1f16cf48 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 2 Mar 2011 16:42:38 -0500 Subject: exofs: i_nlink races in rename() Signed-off-by: Al Viro --- fs/exofs/namei.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/exofs/namei.c b/fs/exofs/namei.c index 264e95d02830..4d70db110cfc 100644 --- a/fs/exofs/namei.c +++ b/fs/exofs/namei.c @@ -272,7 +272,6 @@ static int exofs_rename(struct inode *old_dir, struct dentry *old_dentry, new_de = exofs_find_entry(new_dir, new_dentry, &new_page); if (!new_de) goto out_dir; - inode_inc_link_count(old_inode); err = exofs_set_link(new_dir, new_de, new_page, old_inode); new_inode->i_ctime = CURRENT_TIME; if (dir_de) @@ -286,12 +285,9 @@ static int exofs_rename(struct inode *old_dir, struct dentry *old_dentry, if (new_dir->i_nlink >= EXOFS_LINK_MAX) goto out_dir; } - inode_inc_link_count(old_inode); err = exofs_add_link(new_dentry, old_inode); - if (err) { - inode_dec_link_count(old_inode); + if (err) goto out_dir; - } if (dir_de) inode_inc_link_count(new_dir); } @@ -299,7 +295,7 @@ static int exofs_rename(struct inode *old_dir, struct dentry *old_dentry, old_inode->i_ctime = CURRENT_TIME; exofs_delete_entry(old_de, old_page); - inode_dec_link_count(old_inode); + mark_inode_dirty(old_inode); if (dir_de) { err = exofs_set_link(old_inode, dir_de, dir_page, new_dir); -- cgit v1.2.2 From 99890a3be1ee67346300f1e0a873006588760f2a Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 2 Mar 2011 09:35:13 -0500 Subject: fix reiserfs mkdir() breakage if directory has so many subdirectories that its link count is set to 1 (i.e. "can't tell accurately") and reiserfs_new_inode() fails, we shouldn't decrement the parent's link count in cleanup path; that's what DEC_DIR_INODE_NLINK() is for. As it is, we end up with parent suddenly getting zero i_nlink, with very unpleasant effects. Signed-off-by: Al Viro --- fs/reiserfs/namei.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs') diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c index ba5f51ec3458..68fdf45cc6c9 100644 --- a/fs/reiserfs/namei.c +++ b/fs/reiserfs/namei.c @@ -771,7 +771,7 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) EMPTY_DIR_SIZE_V1 : EMPTY_DIR_SIZE, dentry, inode, &security); if (retval) { - dir->i_nlink--; + DEC_DIR_INODE_NLINK(dir) goto out_failed; } -- cgit v1.2.2 From 810c1b2e48d32a8605928c3609262d94853c3a76 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 2 Mar 2011 10:15:26 -0500 Subject: udf: fix i_nlink limit (256 << sizeof(x)) - 1 is not the maximal possible value of x... In reality, the maximal allowed value for UDF FileLinkCount is 65535. Signed-off-by: Al Viro --- fs/udf/namei.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) (limited to 'fs') diff --git a/fs/udf/namei.c b/fs/udf/namei.c index 2be0f9eb86d2..b7c338d5e9df 100644 --- a/fs/udf/namei.c +++ b/fs/udf/namei.c @@ -32,6 +32,8 @@ #include #include +enum { UDF_MAX_LINKS = 0xffff }; + static inline int udf_match(int len1, const unsigned char *name1, int len2, const unsigned char *name2) { @@ -650,7 +652,7 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, int mode) struct udf_inode_info *iinfo; err = -EMLINK; - if (dir->i_nlink >= (256 << sizeof(dir->i_nlink)) - 1) + if (dir->i_nlink >= UDF_MAX_LINKS) goto out; err = -EIO; @@ -1034,9 +1036,8 @@ static int udf_link(struct dentry *old_dentry, struct inode *dir, struct fileIdentDesc cfi, *fi; int err; - if (inode->i_nlink >= (256 << sizeof(inode->i_nlink)) - 1) { + if (inode->i_nlink >= UDF_MAX_LINKS) return -EMLINK; - } fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err); if (!fi) { @@ -1131,9 +1132,7 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry, goto end_rename; retval = -EMLINK; - if (!new_inode && - new_dir->i_nlink >= - (256 << sizeof(new_dir->i_nlink)) - 1) + if (!new_inode && new_dir->i_nlink >= UDF_MAX_LINKS) goto end_rename; } if (!nfi) { -- cgit v1.2.2 From 69102e9b4b61f56a26717659ec2e572a6b18458d Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 2 Mar 2011 23:46:51 -0500 Subject: hfs: fix rename() over non-empty directory merge hfs_unlink() and hfs_rmdir(), while we are at it. Signed-off-by: Al Viro --- fs/hfs/dir.c | 50 +++++++++++++------------------------------------- 1 file changed, 13 insertions(+), 37 deletions(-) (limited to 'fs') diff --git a/fs/hfs/dir.c b/fs/hfs/dir.c index afa66aaa2237..b4d70b13be92 100644 --- a/fs/hfs/dir.c +++ b/fs/hfs/dir.c @@ -238,46 +238,22 @@ static int hfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) } /* - * hfs_unlink() + * hfs_remove() * - * This is the unlink() entry in the inode_operations structure for - * regular HFS directories. The purpose is to delete an existing - * file, given the inode for the parent directory and the name - * (and its length) of the existing file. - */ -static int hfs_unlink(struct inode *dir, struct dentry *dentry) -{ - struct inode *inode; - int res; - - inode = dentry->d_inode; - res = hfs_cat_delete(inode->i_ino, dir, &dentry->d_name); - if (res) - return res; - - drop_nlink(inode); - hfs_delete_inode(inode); - inode->i_ctime = CURRENT_TIME_SEC; - mark_inode_dirty(inode); - - return res; -} - -/* - * hfs_rmdir() + * This serves as both unlink() and rmdir() in the inode_operations + * structure for regular HFS directories. The purpose is to delete + * an existing child, given the inode for the parent directory and + * the name (and its length) of the existing directory. * - * This is the rmdir() entry in the inode_operations structure for - * regular HFS directories. The purpose is to delete an existing - * directory, given the inode for the parent directory and the name - * (and its length) of the existing directory. + * HFS does not have hardlinks, so both rmdir and unlink set the + * link count to 0. The only difference is the emptiness check. */ -static int hfs_rmdir(struct inode *dir, struct dentry *dentry) +static int hfs_remove(struct inode *dir, struct dentry *dentry) { - struct inode *inode; + struct inode *inode = dentry->d_inode; int res; - inode = dentry->d_inode; - if (inode->i_size != 2) + if (S_ISDIR(inode->i_mode) && inode->i_size != 2) return -ENOTEMPTY; res = hfs_cat_delete(inode->i_ino, dir, &dentry->d_name); if (res) @@ -307,7 +283,7 @@ static int hfs_rename(struct inode *old_dir, struct dentry *old_dentry, /* Unlink destination if it already exists */ if (new_dentry->d_inode) { - res = hfs_unlink(new_dir, new_dentry); + res = hfs_remove(new_dir, new_dentry); if (res) return res; } @@ -332,9 +308,9 @@ const struct file_operations hfs_dir_operations = { const struct inode_operations hfs_dir_inode_operations = { .create = hfs_create, .lookup = hfs_lookup, - .unlink = hfs_unlink, + .unlink = hfs_remove, .mkdir = hfs_mkdir, - .rmdir = hfs_rmdir, + .rmdir = hfs_remove, .rename = hfs_rename, .setattr = hfs_inode_setattr, }; -- cgit v1.2.2 From 9bde178d052418af0b8e0f12932cf02ab4764c9d Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 28 Feb 2011 09:47:37 -0800 Subject: Revert "ceph: keep reference to parent inode on ceph_dentry" This reverts commit 97d79b403ef03f729883246208ef5d8a2ebc4d68. This fails to account for d_parent changes due to rename or disconnected dentries due to submounts or NFS reexports. Signed-off-by: Sage Weil --- fs/ceph/dir.c | 5 +---- fs/ceph/super.h | 1 - 2 files changed, 1 insertion(+), 5 deletions(-) (limited to 'fs') diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index f0aef787a102..0bc68de8edd7 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -60,7 +60,6 @@ int ceph_init_dentry(struct dentry *dentry) } di->dentry = dentry; di->lease_session = NULL; - di->parent_inode = igrab(dentry->d_parent->d_inode); dentry->d_fsdata = di; dentry->d_time = jiffies; ceph_dentry_lru_add(dentry); @@ -1034,7 +1033,7 @@ static void ceph_dentry_release(struct dentry *dentry) u64 snapid = CEPH_NOSNAP; if (!IS_ROOT(dentry)) { - parent_inode = di->parent_inode; + parent_inode = dentry->d_parent->d_inode; if (parent_inode) snapid = ceph_snap(parent_inode); } @@ -1059,8 +1058,6 @@ static void ceph_dentry_release(struct dentry *dentry) kmem_cache_free(ceph_dentry_cachep, di); dentry->d_fsdata = NULL; } - if (parent_inode) - iput(parent_inode); } static int ceph_snapdir_d_revalidate(struct dentry *dentry, diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 88fcaa21b801..20b907d76ae2 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -207,7 +207,6 @@ struct ceph_dentry_info { struct dentry *dentry; u64 time; u64 offset; - struct inode *parent_inode; }; struct ceph_inode_xattrs_info { -- cgit v1.2.2 From b545cc1505eb49247071ce9f4092665de788ca00 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 28 Feb 2011 12:46:46 -0800 Subject: ceph: do not set I_COMPLETE Do not set the I_COMPLETE flag on directories until we resolve races with dcache pruning. Signed-off-by: Sage Weil --- fs/ceph/dir.c | 2 +- fs/ceph/inode.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index 0bc68de8edd7..9b4f9d9947b3 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -409,7 +409,7 @@ more: spin_lock(&inode->i_lock); if (ci->i_release_count == fi->dir_release_count) { dout(" marking %p complete\n", inode); - ci->i_ceph_flags |= CEPH_I_COMPLETE; + /* ci->i_ceph_flags |= CEPH_I_COMPLETE; */ ci->i_max_offset = filp->f_pos; } spin_unlock(&inode->i_lock); diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 5625463aa479..193bfa5e9cbd 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -707,7 +707,7 @@ static int fill_inode(struct inode *inode, (issued & CEPH_CAP_FILE_EXCL) == 0 && (ci->i_ceph_flags & CEPH_I_COMPLETE) == 0) { dout(" marking %p complete (empty)\n", inode); - ci->i_ceph_flags |= CEPH_I_COMPLETE; + /* ci->i_ceph_flags |= CEPH_I_COMPLETE; */ ci->i_max_offset = 2; } break; -- cgit v1.2.2 From 16a8b70a5a757db513f036bbcc73309f6c507d81 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 28 Feb 2011 12:49:15 -0800 Subject: ceph: do not clear I_COMPLETE from d_release First, this was racy anyway: d_release isn't called until well after the dentry is unhashed. Second, this runs afoul of the recent dcache change that clears d_parent prior to calling d_release (949854d0), causing a NULL pointer dereference. Signed-off-by: Sage Weil --- fs/ceph/dir.c | 22 +--------------------- 1 file changed, 1 insertion(+), 21 deletions(-) (limited to 'fs') diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index 9b4f9d9947b3..196fd4c62db7 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -1029,28 +1029,8 @@ out_touch: static void ceph_dentry_release(struct dentry *dentry) { struct ceph_dentry_info *di = ceph_dentry(dentry); - struct inode *parent_inode = NULL; - u64 snapid = CEPH_NOSNAP; - if (!IS_ROOT(dentry)) { - parent_inode = dentry->d_parent->d_inode; - if (parent_inode) - snapid = ceph_snap(parent_inode); - } - dout("dentry_release %p parent %p\n", dentry, parent_inode); - if (parent_inode && snapid != CEPH_SNAPDIR) { - struct ceph_inode_info *ci = ceph_inode(parent_inode); - - spin_lock(&parent_inode->i_lock); - if (ci->i_shared_gen == di->lease_shared_gen || - snapid <= CEPH_MAXSNAP) { - dout(" clearing %p complete (d_release)\n", - parent_inode); - ci->i_ceph_flags &= ~CEPH_I_COMPLETE; - ci->i_release_count++; - } - spin_unlock(&parent_inode->i_lock); - } + dout("dentry_release %p\n", dentry); if (di) { ceph_dentry_lru_del(dentry); if (di->lease_session) -- cgit v1.2.2 From 455cec0abff563574cca432ced49f734117ca113 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Thu, 3 Mar 2011 13:44:35 -0800 Subject: ceph: no .snap inside of snapped namespace Otherwise you can do things like # mkdir .snap/foo # cd .snap/foo/.snap # ls Signed-off-by: Sage Weil --- fs/ceph/dir.c | 1 + 1 file changed, 1 insertion(+) (limited to 'fs') diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index 196fd4c62db7..099a58615b90 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -496,6 +496,7 @@ struct dentry *ceph_finish_lookup(struct ceph_mds_request *req, /* .snap dir? */ if (err == -ENOENT && + ceph_snap(parent) == CEPH_NOSNAP && strcmp(dentry->d_name.name, fsc->mount_options->snapdir_name) == 0) { struct inode *inode = ceph_get_snapdir(parent); -- cgit v1.2.2 From e9e3d724e2145f5039b423c290ce2b2c3d8f94bc Mon Sep 17 00:00:00 2001 From: Neil Horman Date: Fri, 4 Mar 2011 19:26:03 -0500 Subject: nfs4: Ensure that ACL pages sent over NFS were not allocated from the slab (v3) The "bad_page()" page allocator sanity check was reported recently (call chain as follows): bad_page+0x69/0x91 free_hot_cold_page+0x81/0x144 skb_release_data+0x5f/0x98 __kfree_skb+0x11/0x1a tcp_ack+0x6a3/0x1868 tcp_rcv_established+0x7a6/0x8b9 tcp_v4_do_rcv+0x2a/0x2fa tcp_v4_rcv+0x9a2/0x9f6 do_timer+0x2df/0x52c ip_local_deliver+0x19d/0x263 ip_rcv+0x539/0x57c netif_receive_skb+0x470/0x49f :virtio_net:virtnet_poll+0x46b/0x5c5 net_rx_action+0xac/0x1b3 __do_softirq+0x89/0x133 call_softirq+0x1c/0x28 do_softirq+0x2c/0x7d do_IRQ+0xec/0xf5 default_idle+0x0/0x50 ret_from_intr+0x0/0xa default_idle+0x29/0x50 cpu_idle+0x95/0xb8 start_kernel+0x220/0x225 _sinittext+0x22f/0x236 It occurs because an skb with a fraglist was freed from the tcp retransmit queue when it was acked, but a page on that fraglist had PG_Slab set (indicating it was allocated from the Slab allocator (which means the free path above can't safely free it via put_page. We tracked this back to an nfsv4 setacl operation, in which the nfs code attempted to fill convert the passed in buffer to an array of pages in __nfs4_proc_set_acl, which gets used by the skb->frags list in xs_sendpages. __nfs4_proc_set_acl just converts each page in the buffer to a page struct via virt_to_page, but the vfs allocates the buffer via kmalloc, meaning the PG_slab bit is set. We can't create a buffer with kmalloc and free it later in the tcp ack path with put_page, so we need to either: 1) ensure that when we create the list of pages, no page struct has PG_Slab set or 2) not use a page list to send this data Given that these buffers can be multiple pages and arbitrarily sized, I think (1) is the right way to go. I've written the below patch to allocate a page from the buddy allocator directly and copy the data over to it. This ensures that we have a put_page free-able page for every entry that winds up on an skb frag list, so it can be safely freed when the frame is acked. We do a put page on each entry after the rpc_call_sync call so as to drop our own reference count to the page, leaving only the ref count taken by tcp_sendpages. This way the data will be properly freed when the ack comes in Successfully tested by myself to solve the above oops. Note, as this is the result of a setacl operation that exceeded a page of data, I think this amounts to a local DOS triggerable by an uprivlidged user, so I'm CCing security on this as well. Signed-off-by: Neil Horman CC: Trond Myklebust CC: security@kernel.org CC: Jeff Layton Signed-off-by: Linus Torvalds --- fs/nfs/nfs4proc.c | 44 ++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 42 insertions(+), 2 deletions(-) (limited to 'fs') diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 78936a8f40ab..1ff76acc7e98 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -51,6 +51,7 @@ #include #include #include +#include #include "nfs4_fs.h" #include "delegation.h" @@ -3252,6 +3253,35 @@ static void buf_to_pages(const void *buf, size_t buflen, } } +static int buf_to_pages_noslab(const void *buf, size_t buflen, + struct page **pages, unsigned int *pgbase) +{ + struct page *newpage, **spages; + int rc = 0; + size_t len; + spages = pages; + + do { + len = min(PAGE_CACHE_SIZE, buflen); + newpage = alloc_page(GFP_KERNEL); + + if (newpage == NULL) + goto unwind; + memcpy(page_address(newpage), buf, len); + buf += len; + buflen -= len; + *pages++ = newpage; + rc++; + } while (buflen != 0); + + return rc; + +unwind: + for(; rc > 0; rc--) + __free_page(spages[rc-1]); + return -ENOMEM; +} + struct nfs4_cached_acl { int cached; size_t len; @@ -3420,13 +3450,23 @@ static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t bufl .rpc_argp = &arg, .rpc_resp = &res, }; - int ret; + int ret, i; if (!nfs4_server_supports_acls(server)) return -EOPNOTSUPP; + i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase); + if (i < 0) + return i; nfs_inode_return_delegation(inode); - buf_to_pages(buf, buflen, arg.acl_pages, &arg.acl_pgbase); ret = nfs4_call_sync(server, &msg, &arg, &res, 1); + + /* + * Free each page after tx, so the only ref left is + * held by the network stack + */ + for (; i > 0; i--) + put_page(pages[i-1]); + /* * Acl update can result in inode attribute update. * so mark the attribute cache invalid. -- cgit v1.2.2