aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2010-12-27 01:37:05 -0500
committerDavid S. Miller <davem@davemloft.net>2010-12-27 01:37:05 -0500
commit17f7f4d9fcce8f1b75b5f735569309dee7665968 (patch)
tree14d7e49ca0053a0fcab3c33b5023bf3f90c5c08a /fs
parent041110a439e21cd40709ead4ffbfa8034619ad77 (diff)
parentd7c1255a3a21e98bdc64df8ccf005a174d7e6289 (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts: net/ipv4/fib_frontend.c
Diffstat (limited to 'fs')
-rw-r--r--fs/autofs4/root.c12
-rw-r--r--fs/block_dev.c1
-rw-r--r--fs/btrfs/compression.c15
-rw-r--r--fs/btrfs/ctree.h6
-rw-r--r--fs/btrfs/disk-io.c41
-rw-r--r--fs/btrfs/export.c78
-rw-r--r--fs/btrfs/extent-tree.c77
-rw-r--r--fs/btrfs/extent_io.c77
-rw-r--r--fs/btrfs/extent_io.h3
-rw-r--r--fs/btrfs/file.c99
-rw-r--r--fs/btrfs/free-space-cache.c12
-rw-r--r--fs/btrfs/inode.c299
-rw-r--r--fs/btrfs/ioctl.c87
-rw-r--r--fs/btrfs/ioctl.h14
-rw-r--r--fs/btrfs/ordered-data.c67
-rw-r--r--fs/btrfs/ordered-data.h3
-rw-r--r--fs/btrfs/orphan.c6
-rw-r--r--fs/btrfs/super.c43
-rw-r--r--fs/btrfs/transaction.c5
-rw-r--r--fs/btrfs/tree-log.c21
-rw-r--r--fs/btrfs/volumes.c20
-rw-r--r--fs/btrfs/volumes.h2
-rw-r--r--fs/ceph/addr.c6
-rw-r--r--fs/ceph/caps.c17
-rw-r--r--fs/ceph/dir.c23
-rw-r--r--fs/ceph/file.c65
-rw-r--r--fs/ceph/inode.c50
-rw-r--r--fs/ceph/ioctl.h2
-rw-r--r--fs/ceph/locks.c94
-rw-r--r--fs/ceph/mds_client.c49
-rw-r--r--fs/ceph/mds_client.h33
-rw-r--r--fs/ceph/super.h4
-rw-r--r--fs/cifs/Kconfig8
-rw-r--r--fs/cifs/Makefile4
-rw-r--r--fs/cifs/README9
-rw-r--r--fs/cifs/cifs_fs_sb.h1
-rw-r--r--fs/cifs/cifsacl.c51
-rw-r--r--fs/cifs/cifsacl.h4
-rw-r--r--fs/cifs/cifsfs.c5
-rw-r--r--fs/cifs/cifsglob.h12
-rw-r--r--fs/cifs/cifsproto.h11
-rw-r--r--fs/cifs/cifssmb.c183
-rw-r--r--fs/cifs/connect.c47
-rw-r--r--fs/cifs/dns_resolve.c2
-rw-r--r--fs/cifs/file.c6
-rw-r--r--fs/cifs/fscache.c12
-rw-r--r--fs/cifs/inode.c57
-rw-r--r--fs/cifs/readdir.c41
-rw-r--r--fs/cifs/xattr.c55
-rw-r--r--fs/compat.c28
-rw-r--r--fs/compat_ioctl.c1
-rw-r--r--fs/ecryptfs/super.c1
-rw-r--r--fs/exec.c41
-rw-r--r--fs/ext3/super.c1
-rw-r--r--fs/ext4/ext4.h1
-rw-r--r--fs/ext4/inode.c5
-rw-r--r--fs/ext4/ioctl.c24
-rw-r--r--fs/ext4/namei.c2
-rw-r--r--fs/ext4/page-io.c4
-rw-r--r--fs/ext4/resize.c5
-rw-r--r--fs/ext4/super.c23
-rw-r--r--fs/fuse/file.c82
-rw-r--r--fs/gfs2/export.c46
-rw-r--r--fs/gfs2/glock.c21
-rw-r--r--fs/gfs2/inode.c152
-rw-r--r--fs/gfs2/inode.h4
-rw-r--r--fs/gfs2/quota.c15
-rw-r--r--fs/gfs2/rgrp.c91
-rw-r--r--fs/ioctl.c40
-rw-r--r--fs/ioprio.c31
-rw-r--r--fs/jbd2/journal.c16
-rw-r--r--fs/lockd/clntlock.c1
-rw-r--r--fs/lockd/clntproc.c1
-rw-r--r--fs/lockd/host.c11
-rw-r--r--fs/lockd/svc4proc.c1
-rw-r--r--fs/lockd/svclock.c1
-rw-r--r--fs/lockd/svcproc.c1
-rw-r--r--fs/locks.c1
-rw-r--r--fs/logfs/journal.c2
-rw-r--r--fs/logfs/readwrite.c3
-rw-r--r--fs/namei.c3
-rw-r--r--fs/namespace.c1
-rw-r--r--fs/ncpfs/dir.c1
-rw-r--r--fs/ncpfs/file.c1
-rw-r--r--fs/ncpfs/inode.c1
-rw-r--r--fs/ncpfs/ioctl.c1
-rw-r--r--fs/nfs/callback.c1
-rw-r--r--fs/nfs/delegation.c1
-rw-r--r--fs/nfs/dir.c220
-rw-r--r--fs/nfs/direct.c2
-rw-r--r--fs/nfs/file.c2
-rw-r--r--fs/nfs/inode.c1
-rw-r--r--fs/nfs/internal.h9
-rw-r--r--fs/nfs/mount_clnt.c4
-rw-r--r--fs/nfs/nfs2xdr.c8
-rw-r--r--fs/nfs/nfs3xdr.c8
-rw-r--r--fs/nfs/nfs4proc.c13
-rw-r--r--fs/nfs/nfs4xdr.c8
-rw-r--r--fs/nfs/pagelist.c4
-rw-r--r--fs/nfs/read.c1
-rw-r--r--fs/nfs/super.c13
-rw-r--r--fs/nfs/write.c3
-rw-r--r--fs/nfsd/nfs3xdr.c6
-rw-r--r--fs/nfsd/nfs4state.c8
-rw-r--r--fs/nfsd/xdr4.h21
-rw-r--r--fs/nilfs2/dat.c2
-rw-r--r--fs/nilfs2/gcinode.c9
-rw-r--r--fs/nilfs2/ioctl.c16
-rw-r--r--fs/notify/fanotify/fanotify.c6
-rw-r--r--fs/notify/fanotify/fanotify_user.c81
-rw-r--r--fs/notify/inotify/inotify_user.c1
-rw-r--r--fs/ocfs2/aops.c7
-rw-r--r--fs/ocfs2/aops.h23
-rw-r--r--fs/ocfs2/cluster/heartbeat.c14
-rw-r--r--fs/ocfs2/cluster/masklog.c3
-rw-r--r--fs/ocfs2/cluster/masklog.h15
-rw-r--r--fs/ocfs2/dcache.c1
-rw-r--r--fs/ocfs2/dir.c4
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c2
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c40
-rw-r--r--fs/ocfs2/file.c15
-rw-r--r--fs/ocfs2/ocfs2.h6
-rw-r--r--fs/ocfs2/ocfs2_fs.h2
-rw-r--r--fs/ocfs2/stack_user.c2
-rw-r--r--fs/ocfs2/super.c1
-rw-r--r--fs/pipe.c14
-rw-r--r--fs/proc/base.c2
-rw-r--r--fs/proc/inode.c1
-rw-r--r--fs/proc/task_mmu.c3
-rw-r--r--fs/read_write.c1
-rw-r--r--fs/reiserfs/inode.c1
-rw-r--r--fs/reiserfs/ioctl.c8
-rw-r--r--fs/reiserfs/journal.c1
-rw-r--r--fs/reiserfs/super.c1
-rw-r--r--fs/reiserfs/xattr_acl.c6
-rw-r--r--fs/splice.c24
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c94
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c35
-rw-r--r--fs/xfs/xfs_bmap.c85
-rw-r--r--fs/xfs/xfs_bmap.h5
-rw-r--r--fs/xfs/xfs_dfrag.c13
-rw-r--r--fs/xfs/xfs_error.c3
-rw-r--r--fs/xfs/xfs_error.h5
-rw-r--r--fs/xfs/xfs_inode_item.c31
-rw-r--r--fs/xfs/xfs_rename.c1
145 files changed, 2171 insertions, 1249 deletions
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index d5c1401f003..d34896cfb19 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -980,19 +980,11 @@ static int autofs4_root_ioctl_unlocked(struct inode *inode, struct file *filp,
980 } 980 }
981} 981}
982 982
983static DEFINE_MUTEX(autofs4_ioctl_mutex);
984
985static long autofs4_root_ioctl(struct file *filp, 983static long autofs4_root_ioctl(struct file *filp,
986 unsigned int cmd, unsigned long arg) 984 unsigned int cmd, unsigned long arg)
987{ 985{
988 long ret;
989 struct inode *inode = filp->f_dentry->d_inode; 986 struct inode *inode = filp->f_dentry->d_inode;
990 987 return autofs4_root_ioctl_unlocked(inode, filp, cmd, arg);
991 mutex_lock(&autofs4_ioctl_mutex);
992 ret = autofs4_root_ioctl_unlocked(inode, filp, cmd, arg);
993 mutex_unlock(&autofs4_ioctl_mutex);
994
995 return ret;
996} 988}
997 989
998#ifdef CONFIG_COMPAT 990#ifdef CONFIG_COMPAT
@@ -1002,13 +994,11 @@ static long autofs4_root_compat_ioctl(struct file *filp,
1002 struct inode *inode = filp->f_path.dentry->d_inode; 994 struct inode *inode = filp->f_path.dentry->d_inode;
1003 int ret; 995 int ret;
1004 996
1005 mutex_lock(&autofs4_ioctl_mutex);
1006 if (cmd == AUTOFS_IOC_READY || cmd == AUTOFS_IOC_FAIL) 997 if (cmd == AUTOFS_IOC_READY || cmd == AUTOFS_IOC_FAIL)
1007 ret = autofs4_root_ioctl_unlocked(inode, filp, cmd, arg); 998 ret = autofs4_root_ioctl_unlocked(inode, filp, cmd, arg);
1008 else 999 else
1009 ret = autofs4_root_ioctl_unlocked(inode, filp, cmd, 1000 ret = autofs4_root_ioctl_unlocked(inode, filp, cmd,
1010 (unsigned long)compat_ptr(arg)); 1001 (unsigned long)compat_ptr(arg));
1011 mutex_unlock(&autofs4_ioctl_mutex);
1012 1002
1013 return ret; 1003 return ret;
1014} 1004}
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 06e8ff12b97..4230252fd68 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -11,7 +11,6 @@
11#include <linux/slab.h> 11#include <linux/slab.h>
12#include <linux/kmod.h> 12#include <linux/kmod.h>
13#include <linux/major.h> 13#include <linux/major.h>
14#include <linux/smp_lock.h>
15#include <linux/device_cgroup.h> 14#include <linux/device_cgroup.h>
16#include <linux/highmem.h> 15#include <linux/highmem.h>
17#include <linux/blkdev.h> 16#include <linux/blkdev.h>
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 7845d1f7d1d..b50bc4bd5c5 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -91,23 +91,10 @@ static inline int compressed_bio_size(struct btrfs_root *root,
91static struct bio *compressed_bio_alloc(struct block_device *bdev, 91static struct bio *compressed_bio_alloc(struct block_device *bdev,
92 u64 first_byte, gfp_t gfp_flags) 92 u64 first_byte, gfp_t gfp_flags)
93{ 93{
94 struct bio *bio;
95 int nr_vecs; 94 int nr_vecs;
96 95
97 nr_vecs = bio_get_nr_vecs(bdev); 96 nr_vecs = bio_get_nr_vecs(bdev);
98 bio = bio_alloc(gfp_flags, nr_vecs); 97 return btrfs_bio_alloc(bdev, first_byte >> 9, nr_vecs, gfp_flags);
99
100 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
101 while (!bio && (nr_vecs /= 2))
102 bio = bio_alloc(gfp_flags, nr_vecs);
103 }
104
105 if (bio) {
106 bio->bi_size = 0;
107 bio->bi_bdev = bdev;
108 bio->bi_sector = first_byte >> 9;
109 }
110 return bio;
111} 98}
112 99
113static int check_compressed_csum(struct inode *inode, 100static int check_compressed_csum(struct inode *inode,
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 8db9234f6b4..af52f6d7a4d 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -808,9 +808,9 @@ struct btrfs_block_group_cache {
808 int extents_thresh; 808 int extents_thresh;
809 int free_extents; 809 int free_extents;
810 int total_bitmaps; 810 int total_bitmaps;
811 int ro:1; 811 unsigned int ro:1;
812 int dirty:1; 812 unsigned int dirty:1;
813 int iref:1; 813 unsigned int iref:1;
814 814
815 int disk_cache_state; 815 int disk_cache_state;
816 816
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index fb827d0d718..51d2e4de34e 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -28,6 +28,7 @@
28#include <linux/freezer.h> 28#include <linux/freezer.h>
29#include <linux/crc32c.h> 29#include <linux/crc32c.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/migrate.h>
31#include "compat.h" 32#include "compat.h"
32#include "ctree.h" 33#include "ctree.h"
33#include "disk-io.h" 34#include "disk-io.h"
@@ -355,6 +356,8 @@ static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
355 ret = btree_read_extent_buffer_pages(root, eb, start + PAGE_CACHE_SIZE, 356 ret = btree_read_extent_buffer_pages(root, eb, start + PAGE_CACHE_SIZE,
356 btrfs_header_generation(eb)); 357 btrfs_header_generation(eb));
357 BUG_ON(ret); 358 BUG_ON(ret);
359 WARN_ON(!btrfs_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN));
360
358 found_start = btrfs_header_bytenr(eb); 361 found_start = btrfs_header_bytenr(eb);
359 if (found_start != start) { 362 if (found_start != start) {
360 WARN_ON(1); 363 WARN_ON(1);
@@ -693,6 +696,27 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
693 __btree_submit_bio_done); 696 __btree_submit_bio_done);
694} 697}
695 698
699#ifdef CONFIG_MIGRATION
700static int btree_migratepage(struct address_space *mapping,
701 struct page *newpage, struct page *page)
702{
703 /*
704 * we can't safely write a btree page from here,
705 * we haven't done the locking hook
706 */
707 if (PageDirty(page))
708 return -EAGAIN;
709 /*
710 * Buffers may be managed in a filesystem specific way.
711 * We must have no buffers or drop them.
712 */
713 if (page_has_private(page) &&
714 !try_to_release_page(page, GFP_KERNEL))
715 return -EAGAIN;
716 return migrate_page(mapping, newpage, page);
717}
718#endif
719
696static int btree_writepage(struct page *page, struct writeback_control *wbc) 720static int btree_writepage(struct page *page, struct writeback_control *wbc)
697{ 721{
698 struct extent_io_tree *tree; 722 struct extent_io_tree *tree;
@@ -707,8 +731,7 @@ static int btree_writepage(struct page *page, struct writeback_control *wbc)
707 } 731 }
708 732
709 redirty_page_for_writepage(wbc, page); 733 redirty_page_for_writepage(wbc, page);
710 eb = btrfs_find_tree_block(root, page_offset(page), 734 eb = btrfs_find_tree_block(root, page_offset(page), PAGE_CACHE_SIZE);
711 PAGE_CACHE_SIZE);
712 WARN_ON(!eb); 735 WARN_ON(!eb);
713 736
714 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags); 737 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
@@ -799,6 +822,9 @@ static const struct address_space_operations btree_aops = {
799 .releasepage = btree_releasepage, 822 .releasepage = btree_releasepage,
800 .invalidatepage = btree_invalidatepage, 823 .invalidatepage = btree_invalidatepage,
801 .sync_page = block_sync_page, 824 .sync_page = block_sync_page,
825#ifdef CONFIG_MIGRATION
826 .migratepage = btree_migratepage,
827#endif
802}; 828};
803 829
804int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize, 830int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
@@ -981,7 +1007,10 @@ static int find_and_setup_root(struct btrfs_root *tree_root,
981 blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item)); 1007 blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
982 root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item), 1008 root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
983 blocksize, generation); 1009 blocksize, generation);
984 BUG_ON(!root->node); 1010 if (!root->node || !btrfs_buffer_uptodate(root->node, generation)) {
1011 free_extent_buffer(root->node);
1012 return -EIO;
1013 }
985 root->commit_root = btrfs_root_node(root); 1014 root->commit_root = btrfs_root_node(root);
986 return 0; 1015 return 0;
987} 1016}
@@ -1538,10 +1567,8 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1538 GFP_NOFS); 1567 GFP_NOFS);
1539 struct btrfs_root *csum_root = kzalloc(sizeof(struct btrfs_root), 1568 struct btrfs_root *csum_root = kzalloc(sizeof(struct btrfs_root),
1540 GFP_NOFS); 1569 GFP_NOFS);
1541 struct btrfs_root *tree_root = kzalloc(sizeof(struct btrfs_root), 1570 struct btrfs_root *tree_root = btrfs_sb(sb);
1542 GFP_NOFS); 1571 struct btrfs_fs_info *fs_info = tree_root->fs_info;
1543 struct btrfs_fs_info *fs_info = kzalloc(sizeof(*fs_info),
1544 GFP_NOFS);
1545 struct btrfs_root *chunk_root = kzalloc(sizeof(struct btrfs_root), 1572 struct btrfs_root *chunk_root = kzalloc(sizeof(struct btrfs_root),
1546 GFP_NOFS); 1573 GFP_NOFS);
1547 struct btrfs_root *dev_root = kzalloc(sizeof(struct btrfs_root), 1574 struct btrfs_root *dev_root = kzalloc(sizeof(struct btrfs_root),
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
index 951ef09b82f..659f532d26a 100644
--- a/fs/btrfs/export.c
+++ b/fs/btrfs/export.c
@@ -166,7 +166,7 @@ static struct dentry *btrfs_fh_to_dentry(struct super_block *sb, struct fid *fh,
166static struct dentry *btrfs_get_parent(struct dentry *child) 166static struct dentry *btrfs_get_parent(struct dentry *child)
167{ 167{
168 struct inode *dir = child->d_inode; 168 struct inode *dir = child->d_inode;
169 static struct dentry *dentry; 169 struct dentry *dentry;
170 struct btrfs_root *root = BTRFS_I(dir)->root; 170 struct btrfs_root *root = BTRFS_I(dir)->root;
171 struct btrfs_path *path; 171 struct btrfs_path *path;
172 struct extent_buffer *leaf; 172 struct extent_buffer *leaf;
@@ -232,9 +232,85 @@ fail:
232 return ERR_PTR(ret); 232 return ERR_PTR(ret);
233} 233}
234 234
235static int btrfs_get_name(struct dentry *parent, char *name,
236 struct dentry *child)
237{
238 struct inode *inode = child->d_inode;
239 struct inode *dir = parent->d_inode;
240 struct btrfs_path *path;
241 struct btrfs_root *root = BTRFS_I(dir)->root;
242 struct btrfs_inode_ref *iref;
243 struct btrfs_root_ref *rref;
244 struct extent_buffer *leaf;
245 unsigned long name_ptr;
246 struct btrfs_key key;
247 int name_len;
248 int ret;
249
250 if (!dir || !inode)
251 return -EINVAL;
252
253 if (!S_ISDIR(dir->i_mode))
254 return -EINVAL;
255
256 path = btrfs_alloc_path();
257 if (!path)
258 return -ENOMEM;
259 path->leave_spinning = 1;
260
261 if (inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) {
262 key.objectid = BTRFS_I(inode)->root->root_key.objectid;
263 key.type = BTRFS_ROOT_BACKREF_KEY;
264 key.offset = (u64)-1;
265 root = root->fs_info->tree_root;
266 } else {
267 key.objectid = inode->i_ino;
268 key.offset = dir->i_ino;
269 key.type = BTRFS_INODE_REF_KEY;
270 }
271
272 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
273 if (ret < 0) {
274 btrfs_free_path(path);
275 return ret;
276 } else if (ret > 0) {
277 if (inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) {
278 path->slots[0]--;
279 } else {
280 btrfs_free_path(path);
281 return -ENOENT;
282 }
283 }
284 leaf = path->nodes[0];
285
286 if (inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) {
287 rref = btrfs_item_ptr(leaf, path->slots[0],
288 struct btrfs_root_ref);
289 name_ptr = (unsigned long)(rref + 1);
290 name_len = btrfs_root_ref_name_len(leaf, rref);
291 } else {
292 iref = btrfs_item_ptr(leaf, path->slots[0],
293 struct btrfs_inode_ref);
294 name_ptr = (unsigned long)(iref + 1);
295 name_len = btrfs_inode_ref_name_len(leaf, iref);
296 }
297
298 read_extent_buffer(leaf, name, name_ptr, name_len);
299 btrfs_free_path(path);
300
301 /*
302 * have to add the null termination to make sure that reconnect_path
303 * gets the right len for strlen
304 */
305 name[name_len] = '\0';
306
307 return 0;
308}
309
235const struct export_operations btrfs_export_ops = { 310const struct export_operations btrfs_export_ops = {
236 .encode_fh = btrfs_encode_fh, 311 .encode_fh = btrfs_encode_fh,
237 .fh_to_dentry = btrfs_fh_to_dentry, 312 .fh_to_dentry = btrfs_fh_to_dentry,
238 .fh_to_parent = btrfs_fh_to_parent, 313 .fh_to_parent = btrfs_fh_to_parent,
239 .get_parent = btrfs_get_parent, 314 .get_parent = btrfs_get_parent,
315 .get_name = btrfs_get_name,
240}; 316};
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 0c097f3aec4..227e5815d83 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -429,6 +429,7 @@ err:
429 429
430static int cache_block_group(struct btrfs_block_group_cache *cache, 430static int cache_block_group(struct btrfs_block_group_cache *cache,
431 struct btrfs_trans_handle *trans, 431 struct btrfs_trans_handle *trans,
432 struct btrfs_root *root,
432 int load_cache_only) 433 int load_cache_only)
433{ 434{
434 struct btrfs_fs_info *fs_info = cache->fs_info; 435 struct btrfs_fs_info *fs_info = cache->fs_info;
@@ -442,9 +443,12 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
442 443
443 /* 444 /*
444 * We can't do the read from on-disk cache during a commit since we need 445 * We can't do the read from on-disk cache during a commit since we need
445 * to have the normal tree locking. 446 * to have the normal tree locking. Also if we are currently trying to
447 * allocate blocks for the tree root we can't do the fast caching since
448 * we likely hold important locks.
446 */ 449 */
447 if (!trans->transaction->in_commit) { 450 if (!trans->transaction->in_commit &&
451 (root && root != root->fs_info->tree_root)) {
448 spin_lock(&cache->lock); 452 spin_lock(&cache->lock);
449 if (cache->cached != BTRFS_CACHE_NO) { 453 if (cache->cached != BTRFS_CACHE_NO) {
450 spin_unlock(&cache->lock); 454 spin_unlock(&cache->lock);
@@ -2741,6 +2745,7 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group,
2741 struct btrfs_root *root = block_group->fs_info->tree_root; 2745 struct btrfs_root *root = block_group->fs_info->tree_root;
2742 struct inode *inode = NULL; 2746 struct inode *inode = NULL;
2743 u64 alloc_hint = 0; 2747 u64 alloc_hint = 0;
2748 int dcs = BTRFS_DC_ERROR;
2744 int num_pages = 0; 2749 int num_pages = 0;
2745 int retries = 0; 2750 int retries = 0;
2746 int ret = 0; 2751 int ret = 0;
@@ -2795,6 +2800,8 @@ again:
2795 2800
2796 spin_lock(&block_group->lock); 2801 spin_lock(&block_group->lock);
2797 if (block_group->cached != BTRFS_CACHE_FINISHED) { 2802 if (block_group->cached != BTRFS_CACHE_FINISHED) {
2803 /* We're not cached, don't bother trying to write stuff out */
2804 dcs = BTRFS_DC_WRITTEN;
2798 spin_unlock(&block_group->lock); 2805 spin_unlock(&block_group->lock);
2799 goto out_put; 2806 goto out_put;
2800 } 2807 }
@@ -2821,6 +2828,8 @@ again:
2821 ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages, 2828 ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
2822 num_pages, num_pages, 2829 num_pages, num_pages,
2823 &alloc_hint); 2830 &alloc_hint);
2831 if (!ret)
2832 dcs = BTRFS_DC_SETUP;
2824 btrfs_free_reserved_data_space(inode, num_pages); 2833 btrfs_free_reserved_data_space(inode, num_pages);
2825out_put: 2834out_put:
2826 iput(inode); 2835 iput(inode);
@@ -2828,10 +2837,7 @@ out_free:
2828 btrfs_release_path(root, path); 2837 btrfs_release_path(root, path);
2829out: 2838out:
2830 spin_lock(&block_group->lock); 2839 spin_lock(&block_group->lock);
2831 if (ret) 2840 block_group->disk_cache_state = dcs;
2832 block_group->disk_cache_state = BTRFS_DC_ERROR;
2833 else
2834 block_group->disk_cache_state = BTRFS_DC_SETUP;
2835 spin_unlock(&block_group->lock); 2841 spin_unlock(&block_group->lock);
2836 2842
2837 return ret; 2843 return ret;
@@ -3037,7 +3043,13 @@ static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3037 3043
3038u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags) 3044u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3039{ 3045{
3040 u64 num_devices = root->fs_info->fs_devices->rw_devices; 3046 /*
3047 * we add in the count of missing devices because we want
3048 * to make sure that any RAID levels on a degraded FS
3049 * continue to be honored.
3050 */
3051 u64 num_devices = root->fs_info->fs_devices->rw_devices +
3052 root->fs_info->fs_devices->missing_devices;
3041 3053
3042 if (num_devices == 1) 3054 if (num_devices == 1)
3043 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0); 3055 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
@@ -3412,7 +3424,7 @@ again:
3412 * our reservation. 3424 * our reservation.
3413 */ 3425 */
3414 if (unused <= space_info->total_bytes) { 3426 if (unused <= space_info->total_bytes) {
3415 unused -= space_info->total_bytes; 3427 unused = space_info->total_bytes - unused;
3416 if (unused >= num_bytes) { 3428 if (unused >= num_bytes) {
3417 if (!reserved) 3429 if (!reserved)
3418 space_info->bytes_reserved += orig_bytes; 3430 space_info->bytes_reserved += orig_bytes;
@@ -4080,7 +4092,7 @@ static int update_block_group(struct btrfs_trans_handle *trans,
4080 * space back to the block group, otherwise we will leak space. 4092 * space back to the block group, otherwise we will leak space.
4081 */ 4093 */
4082 if (!alloc && cache->cached == BTRFS_CACHE_NO) 4094 if (!alloc && cache->cached == BTRFS_CACHE_NO)
4083 cache_block_group(cache, trans, 1); 4095 cache_block_group(cache, trans, NULL, 1);
4084 4096
4085 byte_in_group = bytenr - cache->key.objectid; 4097 byte_in_group = bytenr - cache->key.objectid;
4086 WARN_ON(byte_in_group > cache->key.offset); 4098 WARN_ON(byte_in_group > cache->key.offset);
@@ -4930,11 +4942,31 @@ search:
4930 btrfs_get_block_group(block_group); 4942 btrfs_get_block_group(block_group);
4931 search_start = block_group->key.objectid; 4943 search_start = block_group->key.objectid;
4932 4944
4945 /*
4946 * this can happen if we end up cycling through all the
4947 * raid types, but we want to make sure we only allocate
4948 * for the proper type.
4949 */
4950 if (!block_group_bits(block_group, data)) {
4951 u64 extra = BTRFS_BLOCK_GROUP_DUP |
4952 BTRFS_BLOCK_GROUP_RAID1 |
4953 BTRFS_BLOCK_GROUP_RAID10;
4954
4955 /*
4956 * if they asked for extra copies and this block group
4957 * doesn't provide them, bail. This does allow us to
4958 * fill raid0 from raid1.
4959 */
4960 if ((data & extra) && !(block_group->flags & extra))
4961 goto loop;
4962 }
4963
4933have_block_group: 4964have_block_group:
4934 if (unlikely(block_group->cached == BTRFS_CACHE_NO)) { 4965 if (unlikely(block_group->cached == BTRFS_CACHE_NO)) {
4935 u64 free_percent; 4966 u64 free_percent;
4936 4967
4937 ret = cache_block_group(block_group, trans, 1); 4968 ret = cache_block_group(block_group, trans,
4969 orig_root, 1);
4938 if (block_group->cached == BTRFS_CACHE_FINISHED) 4970 if (block_group->cached == BTRFS_CACHE_FINISHED)
4939 goto have_block_group; 4971 goto have_block_group;
4940 4972
@@ -4958,7 +4990,8 @@ have_block_group:
4958 if (loop > LOOP_CACHING_NOWAIT || 4990 if (loop > LOOP_CACHING_NOWAIT ||
4959 (loop > LOOP_FIND_IDEAL && 4991 (loop > LOOP_FIND_IDEAL &&
4960 atomic_read(&space_info->caching_threads) < 2)) { 4992 atomic_read(&space_info->caching_threads) < 2)) {
4961 ret = cache_block_group(block_group, trans, 0); 4993 ret = cache_block_group(block_group, trans,
4994 orig_root, 0);
4962 BUG_ON(ret); 4995 BUG_ON(ret);
4963 } 4996 }
4964 found_uncached_bg = true; 4997 found_uncached_bg = true;
@@ -5515,7 +5548,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
5515 u64 num_bytes = ins->offset; 5548 u64 num_bytes = ins->offset;
5516 5549
5517 block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid); 5550 block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
5518 cache_block_group(block_group, trans, 0); 5551 cache_block_group(block_group, trans, NULL, 0);
5519 caching_ctl = get_caching_control(block_group); 5552 caching_ctl = get_caching_control(block_group);
5520 5553
5521 if (!caching_ctl) { 5554 if (!caching_ctl) {
@@ -6300,9 +6333,13 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
6300 NULL, NULL); 6333 NULL, NULL);
6301 BUG_ON(ret < 0); 6334 BUG_ON(ret < 0);
6302 if (ret > 0) { 6335 if (ret > 0) {
6303 ret = btrfs_del_orphan_item(trans, tree_root, 6336 /* if we fail to delete the orphan item this time
6304 root->root_key.objectid); 6337 * around, it'll get picked up the next time.
6305 BUG_ON(ret); 6338 *
6339 * The most common failure here is just -ENOENT.
6340 */
6341 btrfs_del_orphan_item(trans, tree_root,
6342 root->root_key.objectid);
6306 } 6343 }
6307 } 6344 }
6308 6345
@@ -7878,7 +7915,14 @@ static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
7878 u64 stripped = BTRFS_BLOCK_GROUP_RAID0 | 7915 u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
7879 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10; 7916 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
7880 7917
7881 num_devices = root->fs_info->fs_devices->rw_devices; 7918 /*
7919 * we add in the count of missing devices because we want
7920 * to make sure that any RAID levels on a degraded FS
7921 * continue to be honored.
7922 */
7923 num_devices = root->fs_info->fs_devices->rw_devices +
7924 root->fs_info->fs_devices->missing_devices;
7925
7882 if (num_devices == 1) { 7926 if (num_devices == 1) {
7883 stripped |= BTRFS_BLOCK_GROUP_DUP; 7927 stripped |= BTRFS_BLOCK_GROUP_DUP;
7884 stripped = flags & ~stripped; 7928 stripped = flags & ~stripped;
@@ -8247,7 +8291,6 @@ int btrfs_read_block_groups(struct btrfs_root *root)
8247 break; 8291 break;
8248 if (ret != 0) 8292 if (ret != 0)
8249 goto error; 8293 goto error;
8250
8251 leaf = path->nodes[0]; 8294 leaf = path->nodes[0];
8252 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); 8295 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
8253 cache = kzalloc(sizeof(*cache), GFP_NOFS); 8296 cache = kzalloc(sizeof(*cache), GFP_NOFS);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index eac10e3260a..3e86b9f3650 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1828,9 +1828,9 @@ static void end_bio_extent_preparewrite(struct bio *bio, int err)
1828 bio_put(bio); 1828 bio_put(bio);
1829} 1829}
1830 1830
1831static struct bio * 1831struct bio *
1832extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs, 1832btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1833 gfp_t gfp_flags) 1833 gfp_t gfp_flags)
1834{ 1834{
1835 struct bio *bio; 1835 struct bio *bio;
1836 1836
@@ -1919,7 +1919,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
1919 else 1919 else
1920 nr = bio_get_nr_vecs(bdev); 1920 nr = bio_get_nr_vecs(bdev);
1921 1921
1922 bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH); 1922 bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
1923 1923
1924 bio_add_page(bio, page, page_size, offset); 1924 bio_add_page(bio, page, page_size, offset);
1925 bio->bi_end_io = end_io_func; 1925 bio->bi_end_io = end_io_func;
@@ -2901,21 +2901,53 @@ out:
2901int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 2901int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
2902 __u64 start, __u64 len, get_extent_t *get_extent) 2902 __u64 start, __u64 len, get_extent_t *get_extent)
2903{ 2903{
2904 int ret; 2904 int ret = 0;
2905 u64 off = start; 2905 u64 off = start;
2906 u64 max = start + len; 2906 u64 max = start + len;
2907 u32 flags = 0; 2907 u32 flags = 0;
2908 u32 found_type;
2909 u64 last;
2908 u64 disko = 0; 2910 u64 disko = 0;
2911 struct btrfs_key found_key;
2909 struct extent_map *em = NULL; 2912 struct extent_map *em = NULL;
2910 struct extent_state *cached_state = NULL; 2913 struct extent_state *cached_state = NULL;
2914 struct btrfs_path *path;
2915 struct btrfs_file_extent_item *item;
2911 int end = 0; 2916 int end = 0;
2912 u64 em_start = 0, em_len = 0; 2917 u64 em_start = 0, em_len = 0;
2913 unsigned long emflags; 2918 unsigned long emflags;
2914 ret = 0; 2919 int hole = 0;
2915 2920
2916 if (len == 0) 2921 if (len == 0)
2917 return -EINVAL; 2922 return -EINVAL;
2918 2923
2924 path = btrfs_alloc_path();
2925 if (!path)
2926 return -ENOMEM;
2927 path->leave_spinning = 1;
2928
2929 ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root,
2930 path, inode->i_ino, -1, 0);
2931 if (ret < 0) {
2932 btrfs_free_path(path);
2933 return ret;
2934 }
2935 WARN_ON(!ret);
2936 path->slots[0]--;
2937 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2938 struct btrfs_file_extent_item);
2939 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
2940 found_type = btrfs_key_type(&found_key);
2941
2942 /* No extents, just return */
2943 if (found_key.objectid != inode->i_ino ||
2944 found_type != BTRFS_EXTENT_DATA_KEY) {
2945 btrfs_free_path(path);
2946 return 0;
2947 }
2948 last = found_key.offset;
2949 btrfs_free_path(path);
2950
2919 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0, 2951 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
2920 &cached_state, GFP_NOFS); 2952 &cached_state, GFP_NOFS);
2921 em = get_extent(inode, NULL, 0, off, max - off, 0); 2953 em = get_extent(inode, NULL, 0, off, max - off, 0);
@@ -2925,11 +2957,18 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
2925 ret = PTR_ERR(em); 2957 ret = PTR_ERR(em);
2926 goto out; 2958 goto out;
2927 } 2959 }
2960
2928 while (!end) { 2961 while (!end) {
2962 hole = 0;
2929 off = em->start + em->len; 2963 off = em->start + em->len;
2930 if (off >= max) 2964 if (off >= max)
2931 end = 1; 2965 end = 1;
2932 2966
2967 if (em->block_start == EXTENT_MAP_HOLE) {
2968 hole = 1;
2969 goto next;
2970 }
2971
2933 em_start = em->start; 2972 em_start = em->start;
2934 em_len = em->len; 2973 em_len = em->len;
2935 2974
@@ -2939,8 +2978,6 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
2939 if (em->block_start == EXTENT_MAP_LAST_BYTE) { 2978 if (em->block_start == EXTENT_MAP_LAST_BYTE) {
2940 end = 1; 2979 end = 1;
2941 flags |= FIEMAP_EXTENT_LAST; 2980 flags |= FIEMAP_EXTENT_LAST;
2942 } else if (em->block_start == EXTENT_MAP_HOLE) {
2943 flags |= FIEMAP_EXTENT_UNWRITTEN;
2944 } else if (em->block_start == EXTENT_MAP_INLINE) { 2981 } else if (em->block_start == EXTENT_MAP_INLINE) {
2945 flags |= (FIEMAP_EXTENT_DATA_INLINE | 2982 flags |= (FIEMAP_EXTENT_DATA_INLINE |
2946 FIEMAP_EXTENT_NOT_ALIGNED); 2983 FIEMAP_EXTENT_NOT_ALIGNED);
@@ -2953,10 +2990,10 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
2953 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) 2990 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
2954 flags |= FIEMAP_EXTENT_ENCODED; 2991 flags |= FIEMAP_EXTENT_ENCODED;
2955 2992
2993next:
2956 emflags = em->flags; 2994 emflags = em->flags;
2957 free_extent_map(em); 2995 free_extent_map(em);
2958 em = NULL; 2996 em = NULL;
2959
2960 if (!end) { 2997 if (!end) {
2961 em = get_extent(inode, NULL, 0, off, max - off, 0); 2998 em = get_extent(inode, NULL, 0, off, max - off, 0);
2962 if (!em) 2999 if (!em)
@@ -2967,15 +3004,23 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
2967 } 3004 }
2968 emflags = em->flags; 3005 emflags = em->flags;
2969 } 3006 }
3007
2970 if (test_bit(EXTENT_FLAG_VACANCY, &emflags)) { 3008 if (test_bit(EXTENT_FLAG_VACANCY, &emflags)) {
2971 flags |= FIEMAP_EXTENT_LAST; 3009 flags |= FIEMAP_EXTENT_LAST;
2972 end = 1; 3010 end = 1;
2973 } 3011 }
2974 3012
2975 ret = fiemap_fill_next_extent(fieinfo, em_start, disko, 3013 if (em_start == last) {
2976 em_len, flags); 3014 flags |= FIEMAP_EXTENT_LAST;
2977 if (ret) 3015 end = 1;
2978 goto out_free; 3016 }
3017
3018 if (!hole) {
3019 ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
3020 em_len, flags);
3021 if (ret)
3022 goto out_free;
3023 }
2979 } 3024 }
2980out_free: 3025out_free:
2981 free_extent_map(em); 3026 free_extent_map(em);
@@ -3836,8 +3881,10 @@ int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
3836 3881
3837 spin_lock(&tree->buffer_lock); 3882 spin_lock(&tree->buffer_lock);
3838 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT); 3883 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
3839 if (!eb) 3884 if (!eb) {
3840 goto out; 3885 spin_unlock(&tree->buffer_lock);
3886 return ret;
3887 }
3841 3888
3842 if (test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) { 3889 if (test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3843 ret = 0; 3890 ret = 0;
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 1c6d4f342ef..4183c8178f0 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -310,4 +310,7 @@ int extent_clear_unlock_delalloc(struct inode *inode,
310 struct extent_io_tree *tree, 310 struct extent_io_tree *tree,
311 u64 start, u64 end, struct page *locked_page, 311 u64 start, u64 end, struct page *locked_page,
312 unsigned long op); 312 unsigned long op);
313struct bio *
314btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
315 gfp_t gfp_flags);
313#endif 316#endif
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index e354c33df08..66836d85763 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -48,30 +48,34 @@ static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
48 struct page **prepared_pages, 48 struct page **prepared_pages,
49 struct iov_iter *i) 49 struct iov_iter *i)
50{ 50{
51 size_t copied; 51 size_t copied = 0;
52 int pg = 0; 52 int pg = 0;
53 int offset = pos & (PAGE_CACHE_SIZE - 1); 53 int offset = pos & (PAGE_CACHE_SIZE - 1);
54 int total_copied = 0;
54 55
55 while (write_bytes > 0) { 56 while (write_bytes > 0) {
56 size_t count = min_t(size_t, 57 size_t count = min_t(size_t,
57 PAGE_CACHE_SIZE - offset, write_bytes); 58 PAGE_CACHE_SIZE - offset, write_bytes);
58 struct page *page = prepared_pages[pg]; 59 struct page *page = prepared_pages[pg];
59again: 60 /*
60 if (unlikely(iov_iter_fault_in_readable(i, count))) 61 * Copy data from userspace to the current page
61 return -EFAULT; 62 *
62 63 * Disable pagefault to avoid recursive lock since
63 /* Copy data from userspace to the current page */ 64 * the pages are already locked
64 copied = iov_iter_copy_from_user(page, i, offset, count); 65 */
66 pagefault_disable();
67 copied = iov_iter_copy_from_user_atomic(page, i, offset, count);
68 pagefault_enable();
65 69
66 /* Flush processor's dcache for this page */ 70 /* Flush processor's dcache for this page */
67 flush_dcache_page(page); 71 flush_dcache_page(page);
68 iov_iter_advance(i, copied); 72 iov_iter_advance(i, copied);
69 write_bytes -= copied; 73 write_bytes -= copied;
74 total_copied += copied;
70 75
76 /* Return to btrfs_file_aio_write to fault page */
71 if (unlikely(copied == 0)) { 77 if (unlikely(copied == 0)) {
72 count = min_t(size_t, PAGE_CACHE_SIZE - offset, 78 break;
73 iov_iter_single_seg_count(i));
74 goto again;
75 } 79 }
76 80
77 if (unlikely(copied < PAGE_CACHE_SIZE - offset)) { 81 if (unlikely(copied < PAGE_CACHE_SIZE - offset)) {
@@ -81,7 +85,7 @@ again:
81 offset = 0; 85 offset = 0;
82 } 86 }
83 } 87 }
84 return 0; 88 return total_copied;
85} 89}
86 90
87/* 91/*
@@ -854,6 +858,8 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
854 unsigned long last_index; 858 unsigned long last_index;
855 int will_write; 859 int will_write;
856 int buffered = 0; 860 int buffered = 0;
861 int copied = 0;
862 int dirty_pages = 0;
857 863
858 will_write = ((file->f_flags & O_DSYNC) || IS_SYNC(inode) || 864 will_write = ((file->f_flags & O_DSYNC) || IS_SYNC(inode) ||
859 (file->f_flags & O_DIRECT)); 865 (file->f_flags & O_DIRECT));
@@ -970,7 +976,17 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
970 WARN_ON(num_pages > nrptrs); 976 WARN_ON(num_pages > nrptrs);
971 memset(pages, 0, sizeof(struct page *) * nrptrs); 977 memset(pages, 0, sizeof(struct page *) * nrptrs);
972 978
973 ret = btrfs_delalloc_reserve_space(inode, write_bytes); 979 /*
980 * Fault pages before locking them in prepare_pages
981 * to avoid recursive lock
982 */
983 if (unlikely(iov_iter_fault_in_readable(&i, write_bytes))) {
984 ret = -EFAULT;
985 goto out;
986 }
987
988 ret = btrfs_delalloc_reserve_space(inode,
989 num_pages << PAGE_CACHE_SHIFT);
974 if (ret) 990 if (ret)
975 goto out; 991 goto out;
976 992
@@ -978,37 +994,49 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
978 pos, first_index, last_index, 994 pos, first_index, last_index,
979 write_bytes); 995 write_bytes);
980 if (ret) { 996 if (ret) {
981 btrfs_delalloc_release_space(inode, write_bytes); 997 btrfs_delalloc_release_space(inode,
998 num_pages << PAGE_CACHE_SHIFT);
982 goto out; 999 goto out;
983 } 1000 }
984 1001
985 ret = btrfs_copy_from_user(pos, num_pages, 1002 copied = btrfs_copy_from_user(pos, num_pages,
986 write_bytes, pages, &i); 1003 write_bytes, pages, &i);
987 if (ret == 0) { 1004 dirty_pages = (copied + PAGE_CACHE_SIZE - 1) >>
1005 PAGE_CACHE_SHIFT;
1006
1007 if (num_pages > dirty_pages) {
1008 if (copied > 0)
1009 atomic_inc(
1010 &BTRFS_I(inode)->outstanding_extents);
1011 btrfs_delalloc_release_space(inode,
1012 (num_pages - dirty_pages) <<
1013 PAGE_CACHE_SHIFT);
1014 }
1015
1016 if (copied > 0) {
988 dirty_and_release_pages(NULL, root, file, pages, 1017 dirty_and_release_pages(NULL, root, file, pages,
989 num_pages, pos, write_bytes); 1018 dirty_pages, pos, copied);
990 } 1019 }
991 1020
992 btrfs_drop_pages(pages, num_pages); 1021 btrfs_drop_pages(pages, num_pages);
993 if (ret) {
994 btrfs_delalloc_release_space(inode, write_bytes);
995 goto out;
996 }
997 1022
998 if (will_write) { 1023 if (copied > 0) {
999 filemap_fdatawrite_range(inode->i_mapping, pos, 1024 if (will_write) {
1000 pos + write_bytes - 1); 1025 filemap_fdatawrite_range(inode->i_mapping, pos,
1001 } else { 1026 pos + copied - 1);
1002 balance_dirty_pages_ratelimited_nr(inode->i_mapping, 1027 } else {
1003 num_pages); 1028 balance_dirty_pages_ratelimited_nr(
1004 if (num_pages < 1029 inode->i_mapping,
1005 (root->leafsize >> PAGE_CACHE_SHIFT) + 1) 1030 dirty_pages);
1006 btrfs_btree_balance_dirty(root, 1); 1031 if (dirty_pages <
1007 btrfs_throttle(root); 1032 (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
1033 btrfs_btree_balance_dirty(root, 1);
1034 btrfs_throttle(root);
1035 }
1008 } 1036 }
1009 1037
1010 pos += write_bytes; 1038 pos += copied;
1011 num_written += write_bytes; 1039 num_written += copied;
1012 1040
1013 cond_resched(); 1041 cond_resched();
1014 } 1042 }
@@ -1047,8 +1075,14 @@ out:
1047 1075
1048 if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) { 1076 if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) {
1049 trans = btrfs_start_transaction(root, 0); 1077 trans = btrfs_start_transaction(root, 0);
1078 if (IS_ERR(trans)) {
1079 num_written = PTR_ERR(trans);
1080 goto done;
1081 }
1082 mutex_lock(&inode->i_mutex);
1050 ret = btrfs_log_dentry_safe(trans, root, 1083 ret = btrfs_log_dentry_safe(trans, root,
1051 file->f_dentry); 1084 file->f_dentry);
1085 mutex_unlock(&inode->i_mutex);
1052 if (ret == 0) { 1086 if (ret == 0) {
1053 ret = btrfs_sync_log(trans, root); 1087 ret = btrfs_sync_log(trans, root);
1054 if (ret == 0) 1088 if (ret == 0)
@@ -1067,6 +1101,7 @@ out:
1067 (start_pos + num_written - 1) >> PAGE_CACHE_SHIFT); 1101 (start_pos + num_written - 1) >> PAGE_CACHE_SHIFT);
1068 } 1102 }
1069 } 1103 }
1104done:
1070 current->backing_dev_info = NULL; 1105 current->backing_dev_info = NULL;
1071 return num_written ? num_written : err; 1106 return num_written ? num_written : err;
1072} 1107}
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 22ee0dc2e6b..60d68426695 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -290,7 +290,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
290 (unsigned long long)BTRFS_I(inode)->generation, 290 (unsigned long long)BTRFS_I(inode)->generation,
291 (unsigned long long)generation, 291 (unsigned long long)generation,
292 (unsigned long long)block_group->key.objectid); 292 (unsigned long long)block_group->key.objectid);
293 goto out; 293 goto free_cache;
294 } 294 }
295 295
296 if (!num_entries) 296 if (!num_entries)
@@ -524,6 +524,12 @@ int btrfs_write_out_cache(struct btrfs_root *root,
524 return 0; 524 return 0;
525 } 525 }
526 526
527 node = rb_first(&block_group->free_space_offset);
528 if (!node) {
529 iput(inode);
530 return 0;
531 }
532
527 last_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; 533 last_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
528 filemap_write_and_wait(inode->i_mapping); 534 filemap_write_and_wait(inode->i_mapping);
529 btrfs_wait_ordered_range(inode, inode->i_size & 535 btrfs_wait_ordered_range(inode, inode->i_size &
@@ -543,10 +549,6 @@ int btrfs_write_out_cache(struct btrfs_root *root,
543 */ 549 */
544 first_page_offset = (sizeof(u32) * num_checksums) + sizeof(u64); 550 first_page_offset = (sizeof(u32) * num_checksums) + sizeof(u64);
545 551
546 node = rb_first(&block_group->free_space_offset);
547 if (!node)
548 goto out_free;
549
550 /* 552 /*
551 * Lock all pages first so we can lock the extent safely. 553 * Lock all pages first so we can lock the extent safely.
552 * 554 *
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 558cac2dfa5..72f31ecb5c9 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -495,7 +495,7 @@ again:
495 add_async_extent(async_cow, start, num_bytes, 495 add_async_extent(async_cow, start, num_bytes,
496 total_compressed, pages, nr_pages_ret); 496 total_compressed, pages, nr_pages_ret);
497 497
498 if (start + num_bytes < end && start + num_bytes < actual_end) { 498 if (start + num_bytes < end) {
499 start += num_bytes; 499 start += num_bytes;
500 pages = NULL; 500 pages = NULL;
501 cond_resched(); 501 cond_resched();
@@ -4501,6 +4501,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
4501 BTRFS_I(inode)->index_cnt = 2; 4501 BTRFS_I(inode)->index_cnt = 2;
4502 BTRFS_I(inode)->root = root; 4502 BTRFS_I(inode)->root = root;
4503 BTRFS_I(inode)->generation = trans->transid; 4503 BTRFS_I(inode)->generation = trans->transid;
4504 inode->i_generation = BTRFS_I(inode)->generation;
4504 btrfs_set_inode_space_info(root, inode); 4505 btrfs_set_inode_space_info(root, inode);
4505 4506
4506 if (mode & S_IFDIR) 4507 if (mode & S_IFDIR)
@@ -4622,12 +4623,12 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
4622} 4623}
4623 4624
4624static int btrfs_add_nondir(struct btrfs_trans_handle *trans, 4625static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
4625 struct dentry *dentry, struct inode *inode, 4626 struct inode *dir, struct dentry *dentry,
4626 int backref, u64 index) 4627 struct inode *inode, int backref, u64 index)
4627{ 4628{
4628 int err = btrfs_add_link(trans, dentry->d_parent->d_inode, 4629 int err = btrfs_add_link(trans, dir, inode,
4629 inode, dentry->d_name.name, 4630 dentry->d_name.name, dentry->d_name.len,
4630 dentry->d_name.len, backref, index); 4631 backref, index);
4631 if (!err) { 4632 if (!err) {
4632 d_instantiate(dentry, inode); 4633 d_instantiate(dentry, inode);
4633 return 0; 4634 return 0;
@@ -4668,8 +4669,7 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
4668 btrfs_set_trans_block_group(trans, dir); 4669 btrfs_set_trans_block_group(trans, dir);
4669 4670
4670 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 4671 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4671 dentry->d_name.len, 4672 dentry->d_name.len, dir->i_ino, objectid,
4672 dentry->d_parent->d_inode->i_ino, objectid,
4673 BTRFS_I(dir)->block_group, mode, &index); 4673 BTRFS_I(dir)->block_group, mode, &index);
4674 err = PTR_ERR(inode); 4674 err = PTR_ERR(inode);
4675 if (IS_ERR(inode)) 4675 if (IS_ERR(inode))
@@ -4682,7 +4682,7 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
4682 } 4682 }
4683 4683
4684 btrfs_set_trans_block_group(trans, inode); 4684 btrfs_set_trans_block_group(trans, inode);
4685 err = btrfs_add_nondir(trans, dentry, inode, 0, index); 4685 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
4686 if (err) 4686 if (err)
4687 drop_inode = 1; 4687 drop_inode = 1;
4688 else { 4688 else {
@@ -4730,10 +4730,8 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
4730 btrfs_set_trans_block_group(trans, dir); 4730 btrfs_set_trans_block_group(trans, dir);
4731 4731
4732 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 4732 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4733 dentry->d_name.len, 4733 dentry->d_name.len, dir->i_ino, objectid,
4734 dentry->d_parent->d_inode->i_ino, 4734 BTRFS_I(dir)->block_group, mode, &index);
4735 objectid, BTRFS_I(dir)->block_group, mode,
4736 &index);
4737 err = PTR_ERR(inode); 4735 err = PTR_ERR(inode);
4738 if (IS_ERR(inode)) 4736 if (IS_ERR(inode))
4739 goto out_unlock; 4737 goto out_unlock;
@@ -4745,7 +4743,7 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
4745 } 4743 }
4746 4744
4747 btrfs_set_trans_block_group(trans, inode); 4745 btrfs_set_trans_block_group(trans, inode);
4748 err = btrfs_add_nondir(trans, dentry, inode, 0, index); 4746 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
4749 if (err) 4747 if (err)
4750 drop_inode = 1; 4748 drop_inode = 1;
4751 else { 4749 else {
@@ -4787,6 +4785,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
4787 return -EPERM; 4785 return -EPERM;
4788 4786
4789 btrfs_inc_nlink(inode); 4787 btrfs_inc_nlink(inode);
4788 inode->i_ctime = CURRENT_TIME;
4790 4789
4791 err = btrfs_set_inode_index(dir, &index); 4790 err = btrfs_set_inode_index(dir, &index);
4792 if (err) 4791 if (err)
@@ -4805,15 +4804,17 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
4805 btrfs_set_trans_block_group(trans, dir); 4804 btrfs_set_trans_block_group(trans, dir);
4806 ihold(inode); 4805 ihold(inode);
4807 4806
4808 err = btrfs_add_nondir(trans, dentry, inode, 1, index); 4807 err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index);
4809 4808
4810 if (err) { 4809 if (err) {
4811 drop_inode = 1; 4810 drop_inode = 1;
4812 } else { 4811 } else {
4812 struct dentry *parent = dget_parent(dentry);
4813 btrfs_update_inode_block_group(trans, dir); 4813 btrfs_update_inode_block_group(trans, dir);
4814 err = btrfs_update_inode(trans, root, inode); 4814 err = btrfs_update_inode(trans, root, inode);
4815 BUG_ON(err); 4815 BUG_ON(err);
4816 btrfs_log_new_name(trans, inode, NULL, dentry->d_parent); 4816 btrfs_log_new_name(trans, inode, NULL, parent);
4817 dput(parent);
4817 } 4818 }
4818 4819
4819 nr = trans->blocks_used; 4820 nr = trans->blocks_used;
@@ -4853,8 +4854,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
4853 btrfs_set_trans_block_group(trans, dir); 4854 btrfs_set_trans_block_group(trans, dir);
4854 4855
4855 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 4856 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4856 dentry->d_name.len, 4857 dentry->d_name.len, dir->i_ino, objectid,
4857 dentry->d_parent->d_inode->i_ino, objectid,
4858 BTRFS_I(dir)->block_group, S_IFDIR | mode, 4858 BTRFS_I(dir)->block_group, S_IFDIR | mode,
4859 &index); 4859 &index);
4860 if (IS_ERR(inode)) { 4860 if (IS_ERR(inode)) {
@@ -4877,9 +4877,8 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
4877 if (err) 4877 if (err)
4878 goto out_fail; 4878 goto out_fail;
4879 4879
4880 err = btrfs_add_link(trans, dentry->d_parent->d_inode, 4880 err = btrfs_add_link(trans, dir, inode, dentry->d_name.name,
4881 inode, dentry->d_name.name, 4881 dentry->d_name.len, 0, index);
4882 dentry->d_name.len, 0, index);
4883 if (err) 4882 if (err)
4884 goto out_fail; 4883 goto out_fail;
4885 4884
@@ -5535,13 +5534,21 @@ struct btrfs_dio_private {
5535 u64 bytes; 5534 u64 bytes;
5536 u32 *csums; 5535 u32 *csums;
5537 void *private; 5536 void *private;
5537
5538 /* number of bios pending for this dio */
5539 atomic_t pending_bios;
5540
5541 /* IO errors */
5542 int errors;
5543
5544 struct bio *orig_bio;
5538}; 5545};
5539 5546
5540static void btrfs_endio_direct_read(struct bio *bio, int err) 5547static void btrfs_endio_direct_read(struct bio *bio, int err)
5541{ 5548{
5549 struct btrfs_dio_private *dip = bio->bi_private;
5542 struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1; 5550 struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
5543 struct bio_vec *bvec = bio->bi_io_vec; 5551 struct bio_vec *bvec = bio->bi_io_vec;
5544 struct btrfs_dio_private *dip = bio->bi_private;
5545 struct inode *inode = dip->inode; 5552 struct inode *inode = dip->inode;
5546 struct btrfs_root *root = BTRFS_I(inode)->root; 5553 struct btrfs_root *root = BTRFS_I(inode)->root;
5547 u64 start; 5554 u64 start;
@@ -5595,15 +5602,18 @@ static void btrfs_endio_direct_write(struct bio *bio, int err)
5595 struct btrfs_trans_handle *trans; 5602 struct btrfs_trans_handle *trans;
5596 struct btrfs_ordered_extent *ordered = NULL; 5603 struct btrfs_ordered_extent *ordered = NULL;
5597 struct extent_state *cached_state = NULL; 5604 struct extent_state *cached_state = NULL;
5605 u64 ordered_offset = dip->logical_offset;
5606 u64 ordered_bytes = dip->bytes;
5598 int ret; 5607 int ret;
5599 5608
5600 if (err) 5609 if (err)
5601 goto out_done; 5610 goto out_done;
5602 5611again:
5603 ret = btrfs_dec_test_ordered_pending(inode, &ordered, 5612 ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
5604 dip->logical_offset, dip->bytes); 5613 &ordered_offset,
5614 ordered_bytes);
5605 if (!ret) 5615 if (!ret)
5606 goto out_done; 5616 goto out_test;
5607 5617
5608 BUG_ON(!ordered); 5618 BUG_ON(!ordered);
5609 5619
@@ -5663,8 +5673,20 @@ out_unlock:
5663out: 5673out:
5664 btrfs_delalloc_release_metadata(inode, ordered->len); 5674 btrfs_delalloc_release_metadata(inode, ordered->len);
5665 btrfs_end_transaction(trans, root); 5675 btrfs_end_transaction(trans, root);
5676 ordered_offset = ordered->file_offset + ordered->len;
5666 btrfs_put_ordered_extent(ordered); 5677 btrfs_put_ordered_extent(ordered);
5667 btrfs_put_ordered_extent(ordered); 5678 btrfs_put_ordered_extent(ordered);
5679
5680out_test:
5681 /*
5682 * our bio might span multiple ordered extents. If we haven't
5683 * completed the accounting for the whole dio, go back and try again
5684 */
5685 if (ordered_offset < dip->logical_offset + dip->bytes) {
5686 ordered_bytes = dip->logical_offset + dip->bytes -
5687 ordered_offset;
5688 goto again;
5689 }
5668out_done: 5690out_done:
5669 bio->bi_private = dip->private; 5691 bio->bi_private = dip->private;
5670 5692
@@ -5684,6 +5706,176 @@ static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw,
5684 return 0; 5706 return 0;
5685} 5707}
5686 5708
5709static void btrfs_end_dio_bio(struct bio *bio, int err)
5710{
5711 struct btrfs_dio_private *dip = bio->bi_private;
5712
5713 if (err) {
5714 printk(KERN_ERR "btrfs direct IO failed ino %lu rw %lu "
5715 "sector %#Lx len %u err no %d\n",
5716 dip->inode->i_ino, bio->bi_rw,
5717 (unsigned long long)bio->bi_sector, bio->bi_size, err);
5718 dip->errors = 1;
5719
5720 /*
5721 * before atomic variable goto zero, we must make sure
5722 * dip->errors is perceived to be set.
5723 */
5724 smp_mb__before_atomic_dec();
5725 }
5726
5727 /* if there are more bios still pending for this dio, just exit */
5728 if (!atomic_dec_and_test(&dip->pending_bios))
5729 goto out;
5730
5731 if (dip->errors)
5732 bio_io_error(dip->orig_bio);
5733 else {
5734 set_bit(BIO_UPTODATE, &dip->orig_bio->bi_flags);
5735 bio_endio(dip->orig_bio, 0);
5736 }
5737out:
5738 bio_put(bio);
5739}
5740
5741static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
5742 u64 first_sector, gfp_t gfp_flags)
5743{
5744 int nr_vecs = bio_get_nr_vecs(bdev);
5745 return btrfs_bio_alloc(bdev, first_sector, nr_vecs, gfp_flags);
5746}
5747
5748static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
5749 int rw, u64 file_offset, int skip_sum,
5750 u32 *csums)
5751{
5752 int write = rw & REQ_WRITE;
5753 struct btrfs_root *root = BTRFS_I(inode)->root;
5754 int ret;
5755
5756 bio_get(bio);
5757 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
5758 if (ret)
5759 goto err;
5760
5761 if (write && !skip_sum) {
5762 ret = btrfs_wq_submit_bio(root->fs_info,
5763 inode, rw, bio, 0, 0,
5764 file_offset,
5765 __btrfs_submit_bio_start_direct_io,
5766 __btrfs_submit_bio_done);
5767 goto err;
5768 } else if (!skip_sum)
5769 btrfs_lookup_bio_sums_dio(root, inode, bio,
5770 file_offset, csums);
5771
5772 ret = btrfs_map_bio(root, rw, bio, 0, 1);
5773err:
5774 bio_put(bio);
5775 return ret;
5776}
5777
5778static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
5779 int skip_sum)
5780{
5781 struct inode *inode = dip->inode;
5782 struct btrfs_root *root = BTRFS_I(inode)->root;
5783 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
5784 struct bio *bio;
5785 struct bio *orig_bio = dip->orig_bio;
5786 struct bio_vec *bvec = orig_bio->bi_io_vec;
5787 u64 start_sector = orig_bio->bi_sector;
5788 u64 file_offset = dip->logical_offset;
5789 u64 submit_len = 0;
5790 u64 map_length;
5791 int nr_pages = 0;
5792 u32 *csums = dip->csums;
5793 int ret = 0;
5794
5795 bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
5796 if (!bio)
5797 return -ENOMEM;
5798 bio->bi_private = dip;
5799 bio->bi_end_io = btrfs_end_dio_bio;
5800 atomic_inc(&dip->pending_bios);
5801
5802 map_length = orig_bio->bi_size;
5803 ret = btrfs_map_block(map_tree, READ, start_sector << 9,
5804 &map_length, NULL, 0);
5805 if (ret) {
5806 bio_put(bio);
5807 return -EIO;
5808 }
5809
5810 while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) {
5811 if (unlikely(map_length < submit_len + bvec->bv_len ||
5812 bio_add_page(bio, bvec->bv_page, bvec->bv_len,
5813 bvec->bv_offset) < bvec->bv_len)) {
5814 /*
5815 * inc the count before we submit the bio so
5816 * we know the end IO handler won't happen before
5817 * we inc the count. Otherwise, the dip might get freed
5818 * before we're done setting it up
5819 */
5820 atomic_inc(&dip->pending_bios);
5821 ret = __btrfs_submit_dio_bio(bio, inode, rw,
5822 file_offset, skip_sum,
5823 csums);
5824 if (ret) {
5825 bio_put(bio);
5826 atomic_dec(&dip->pending_bios);
5827 goto out_err;
5828 }
5829
5830 if (!skip_sum)
5831 csums = csums + nr_pages;
5832 start_sector += submit_len >> 9;
5833 file_offset += submit_len;
5834
5835 submit_len = 0;
5836 nr_pages = 0;
5837
5838 bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev,
5839 start_sector, GFP_NOFS);
5840 if (!bio)
5841 goto out_err;
5842 bio->bi_private = dip;
5843 bio->bi_end_io = btrfs_end_dio_bio;
5844
5845 map_length = orig_bio->bi_size;
5846 ret = btrfs_map_block(map_tree, READ, start_sector << 9,
5847 &map_length, NULL, 0);
5848 if (ret) {
5849 bio_put(bio);
5850 goto out_err;
5851 }
5852 } else {
5853 submit_len += bvec->bv_len;
5854 nr_pages ++;
5855 bvec++;
5856 }
5857 }
5858
5859 ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum,
5860 csums);
5861 if (!ret)
5862 return 0;
5863
5864 bio_put(bio);
5865out_err:
5866 dip->errors = 1;
5867 /*
5868 * before atomic variable goto zero, we must
5869 * make sure dip->errors is perceived to be set.
5870 */
5871 smp_mb__before_atomic_dec();
5872 if (atomic_dec_and_test(&dip->pending_bios))
5873 bio_io_error(dip->orig_bio);
5874
5875 /* bio_end_io() will handle error, so we needn't return it */
5876 return 0;
5877}
5878
5687static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode, 5879static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode,
5688 loff_t file_offset) 5880 loff_t file_offset)
5689{ 5881{
@@ -5723,36 +5915,18 @@ static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode,
5723 5915
5724 dip->disk_bytenr = (u64)bio->bi_sector << 9; 5916 dip->disk_bytenr = (u64)bio->bi_sector << 9;
5725 bio->bi_private = dip; 5917 bio->bi_private = dip;
5918 dip->errors = 0;
5919 dip->orig_bio = bio;
5920 atomic_set(&dip->pending_bios, 0);
5726 5921
5727 if (write) 5922 if (write)
5728 bio->bi_end_io = btrfs_endio_direct_write; 5923 bio->bi_end_io = btrfs_endio_direct_write;
5729 else 5924 else
5730 bio->bi_end_io = btrfs_endio_direct_read; 5925 bio->bi_end_io = btrfs_endio_direct_read;
5731 5926
5732 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); 5927 ret = btrfs_submit_direct_hook(rw, dip, skip_sum);
5733 if (ret) 5928 if (!ret)
5734 goto out_err;
5735
5736 if (write && !skip_sum) {
5737 ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
5738 inode, rw, bio, 0, 0,
5739 dip->logical_offset,
5740 __btrfs_submit_bio_start_direct_io,
5741 __btrfs_submit_bio_done);
5742 if (ret)
5743 goto out_err;
5744 return; 5929 return;
5745 } else if (!skip_sum)
5746 btrfs_lookup_bio_sums_dio(root, inode, bio,
5747 dip->logical_offset, dip->csums);
5748
5749 ret = btrfs_map_bio(root, rw, bio, 0, 1);
5750 if (ret)
5751 goto out_err;
5752 return;
5753out_err:
5754 kfree(dip->csums);
5755 kfree(dip);
5756free_ordered: 5930free_ordered:
5757 /* 5931 /*
5758 * If this is a write, we need to clean up the reserved space and kill 5932 * If this is a write, we need to clean up the reserved space and kill
@@ -5760,8 +5934,7 @@ free_ordered:
5760 */ 5934 */
5761 if (write) { 5935 if (write) {
5762 struct btrfs_ordered_extent *ordered; 5936 struct btrfs_ordered_extent *ordered;
5763 ordered = btrfs_lookup_ordered_extent(inode, 5937 ordered = btrfs_lookup_ordered_extent(inode, file_offset);
5764 dip->logical_offset);
5765 if (!test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags) && 5938 if (!test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags) &&
5766 !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) 5939 !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags))
5767 btrfs_free_reserved_extent(root, ordered->start, 5940 btrfs_free_reserved_extent(root, ordered->start,
@@ -6607,8 +6780,9 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
6607 BUG_ON(ret); 6780 BUG_ON(ret);
6608 6781
6609 if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) { 6782 if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) {
6610 btrfs_log_new_name(trans, old_inode, old_dir, 6783 struct dentry *parent = dget_parent(new_dentry);
6611 new_dentry->d_parent); 6784 btrfs_log_new_name(trans, old_inode, old_dir, parent);
6785 dput(parent);
6612 btrfs_end_log_trans(root); 6786 btrfs_end_log_trans(root);
6613 } 6787 }
6614out_fail: 6788out_fail:
@@ -6758,8 +6932,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
6758 btrfs_set_trans_block_group(trans, dir); 6932 btrfs_set_trans_block_group(trans, dir);
6759 6933
6760 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 6934 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
6761 dentry->d_name.len, 6935 dentry->d_name.len, dir->i_ino, objectid,
6762 dentry->d_parent->d_inode->i_ino, objectid,
6763 BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO, 6936 BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO,
6764 &index); 6937 &index);
6765 err = PTR_ERR(inode); 6938 err = PTR_ERR(inode);
@@ -6773,7 +6946,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
6773 } 6946 }
6774 6947
6775 btrfs_set_trans_block_group(trans, inode); 6948 btrfs_set_trans_block_group(trans, inode);
6776 err = btrfs_add_nondir(trans, dentry, inode, 0, index); 6949 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
6777 if (err) 6950 if (err)
6778 drop_inode = 1; 6951 drop_inode = 1;
6779 else { 6952 else {
@@ -6844,6 +7017,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
6844 struct btrfs_root *root = BTRFS_I(inode)->root; 7017 struct btrfs_root *root = BTRFS_I(inode)->root;
6845 struct btrfs_key ins; 7018 struct btrfs_key ins;
6846 u64 cur_offset = start; 7019 u64 cur_offset = start;
7020 u64 i_size;
6847 int ret = 0; 7021 int ret = 0;
6848 bool own_trans = true; 7022 bool own_trans = true;
6849 7023
@@ -6885,11 +7059,11 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
6885 (actual_len > inode->i_size) && 7059 (actual_len > inode->i_size) &&
6886 (cur_offset > inode->i_size)) { 7060 (cur_offset > inode->i_size)) {
6887 if (cur_offset > actual_len) 7061 if (cur_offset > actual_len)
6888 i_size_write(inode, actual_len); 7062 i_size = actual_len;
6889 else 7063 else
6890 i_size_write(inode, cur_offset); 7064 i_size = cur_offset;
6891 i_size_write(inode, cur_offset); 7065 i_size_write(inode, i_size);
6892 btrfs_ordered_update_i_size(inode, cur_offset, NULL); 7066 btrfs_ordered_update_i_size(inode, i_size, NULL);
6893 } 7067 }
6894 7068
6895 ret = btrfs_update_inode(trans, root, inode); 7069 ret = btrfs_update_inode(trans, root, inode);
@@ -6943,6 +7117,10 @@ static long btrfs_fallocate(struct inode *inode, int mode,
6943 btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start); 7117 btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start);
6944 7118
6945 mutex_lock(&inode->i_mutex); 7119 mutex_lock(&inode->i_mutex);
7120 ret = inode_newsize_ok(inode, alloc_end);
7121 if (ret)
7122 goto out;
7123
6946 if (alloc_start > inode->i_size) { 7124 if (alloc_start > inode->i_size) {
6947 ret = btrfs_cont_expand(inode, alloc_start); 7125 ret = btrfs_cont_expand(inode, alloc_start);
6948 if (ret) 7126 if (ret)
@@ -7139,6 +7317,7 @@ static const struct inode_operations btrfs_symlink_inode_operations = {
7139 .readlink = generic_readlink, 7317 .readlink = generic_readlink,
7140 .follow_link = page_follow_link_light, 7318 .follow_link = page_follow_link_light,
7141 .put_link = page_put_link, 7319 .put_link = page_put_link,
7320 .getattr = btrfs_getattr,
7142 .permission = btrfs_permission, 7321 .permission = btrfs_permission,
7143 .setxattr = btrfs_setxattr, 7322 .setxattr = btrfs_setxattr,
7144 .getxattr = btrfs_getxattr, 7323 .getxattr = btrfs_getxattr,
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 463d91b4dd3..f87552a1d7e 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -233,7 +233,8 @@ static noinline int create_subvol(struct btrfs_root *root,
233 struct btrfs_inode_item *inode_item; 233 struct btrfs_inode_item *inode_item;
234 struct extent_buffer *leaf; 234 struct extent_buffer *leaf;
235 struct btrfs_root *new_root; 235 struct btrfs_root *new_root;
236 struct inode *dir = dentry->d_parent->d_inode; 236 struct dentry *parent = dget_parent(dentry);
237 struct inode *dir;
237 int ret; 238 int ret;
238 int err; 239 int err;
239 u64 objectid; 240 u64 objectid;
@@ -242,8 +243,13 @@ static noinline int create_subvol(struct btrfs_root *root,
242 243
243 ret = btrfs_find_free_objectid(NULL, root->fs_info->tree_root, 244 ret = btrfs_find_free_objectid(NULL, root->fs_info->tree_root,
244 0, &objectid); 245 0, &objectid);
245 if (ret) 246 if (ret) {
247 dput(parent);
246 return ret; 248 return ret;
249 }
250
251 dir = parent->d_inode;
252
247 /* 253 /*
248 * 1 - inode item 254 * 1 - inode item
249 * 2 - refs 255 * 2 - refs
@@ -251,8 +257,10 @@ static noinline int create_subvol(struct btrfs_root *root,
251 * 2 - dir items 257 * 2 - dir items
252 */ 258 */
253 trans = btrfs_start_transaction(root, 6); 259 trans = btrfs_start_transaction(root, 6);
254 if (IS_ERR(trans)) 260 if (IS_ERR(trans)) {
261 dput(parent);
255 return PTR_ERR(trans); 262 return PTR_ERR(trans);
263 }
256 264
257 leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 265 leaf = btrfs_alloc_free_block(trans, root, root->leafsize,
258 0, objectid, NULL, 0, 0, 0); 266 0, objectid, NULL, 0, 0, 0);
@@ -339,6 +347,7 @@ static noinline int create_subvol(struct btrfs_root *root,
339 347
340 d_instantiate(dentry, btrfs_lookup_dentry(dir, dentry)); 348 d_instantiate(dentry, btrfs_lookup_dentry(dir, dentry));
341fail: 349fail:
350 dput(parent);
342 if (async_transid) { 351 if (async_transid) {
343 *async_transid = trans->transid; 352 *async_transid = trans->transid;
344 err = btrfs_commit_transaction_async(trans, root, 1); 353 err = btrfs_commit_transaction_async(trans, root, 1);
@@ -354,6 +363,7 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
354 char *name, int namelen, u64 *async_transid) 363 char *name, int namelen, u64 *async_transid)
355{ 364{
356 struct inode *inode; 365 struct inode *inode;
366 struct dentry *parent;
357 struct btrfs_pending_snapshot *pending_snapshot; 367 struct btrfs_pending_snapshot *pending_snapshot;
358 struct btrfs_trans_handle *trans; 368 struct btrfs_trans_handle *trans;
359 int ret; 369 int ret;
@@ -396,7 +406,9 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
396 406
397 btrfs_orphan_cleanup(pending_snapshot->snap); 407 btrfs_orphan_cleanup(pending_snapshot->snap);
398 408
399 inode = btrfs_lookup_dentry(dentry->d_parent->d_inode, dentry); 409 parent = dget_parent(dentry);
410 inode = btrfs_lookup_dentry(parent->d_inode, dentry);
411 dput(parent);
400 if (IS_ERR(inode)) { 412 if (IS_ERR(inode)) {
401 ret = PTR_ERR(inode); 413 ret = PTR_ERR(inode);
402 goto fail; 414 goto fail;
@@ -935,23 +947,42 @@ out:
935 947
936static noinline int btrfs_ioctl_snap_create(struct file *file, 948static noinline int btrfs_ioctl_snap_create(struct file *file,
937 void __user *arg, int subvol, 949 void __user *arg, int subvol,
938 int async) 950 int v2)
939{ 951{
940 struct btrfs_ioctl_vol_args *vol_args = NULL; 952 struct btrfs_ioctl_vol_args *vol_args = NULL;
941 struct btrfs_ioctl_async_vol_args *async_vol_args = NULL; 953 struct btrfs_ioctl_vol_args_v2 *vol_args_v2 = NULL;
942 char *name; 954 char *name;
943 u64 fd; 955 u64 fd;
944 u64 transid = 0;
945 int ret; 956 int ret;
946 957
947 if (async) { 958 if (v2) {
948 async_vol_args = memdup_user(arg, sizeof(*async_vol_args)); 959 u64 transid = 0;
949 if (IS_ERR(async_vol_args)) 960 u64 *ptr = NULL;
950 return PTR_ERR(async_vol_args); 961
962 vol_args_v2 = memdup_user(arg, sizeof(*vol_args_v2));
963 if (IS_ERR(vol_args_v2))
964 return PTR_ERR(vol_args_v2);
965
966 if (vol_args_v2->flags & ~BTRFS_SUBVOL_CREATE_ASYNC) {
967 ret = -EINVAL;
968 goto out;
969 }
970
971 name = vol_args_v2->name;
972 fd = vol_args_v2->fd;
973 vol_args_v2->name[BTRFS_SUBVOL_NAME_MAX] = '\0';
974
975 if (vol_args_v2->flags & BTRFS_SUBVOL_CREATE_ASYNC)
976 ptr = &transid;
977
978 ret = btrfs_ioctl_snap_create_transid(file, name, fd,
979 subvol, ptr);
951 980
952 name = async_vol_args->name; 981 if (ret == 0 && ptr &&
953 fd = async_vol_args->fd; 982 copy_to_user(arg +
954 async_vol_args->name[BTRFS_SNAPSHOT_NAME_MAX] = '\0'; 983 offsetof(struct btrfs_ioctl_vol_args_v2,
984 transid), ptr, sizeof(*ptr)))
985 ret = -EFAULT;
955 } else { 986 } else {
956 vol_args = memdup_user(arg, sizeof(*vol_args)); 987 vol_args = memdup_user(arg, sizeof(*vol_args));
957 if (IS_ERR(vol_args)) 988 if (IS_ERR(vol_args))
@@ -959,20 +990,13 @@ static noinline int btrfs_ioctl_snap_create(struct file *file,
959 name = vol_args->name; 990 name = vol_args->name;
960 fd = vol_args->fd; 991 fd = vol_args->fd;
961 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0'; 992 vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
962 }
963
964 ret = btrfs_ioctl_snap_create_transid(file, name, fd,
965 subvol, &transid);
966 993
967 if (!ret && async) { 994 ret = btrfs_ioctl_snap_create_transid(file, name, fd,
968 if (copy_to_user(arg + 995 subvol, NULL);
969 offsetof(struct btrfs_ioctl_async_vol_args,
970 transid), &transid, sizeof(transid)))
971 return -EFAULT;
972 } 996 }
973 997out:
974 kfree(vol_args); 998 kfree(vol_args);
975 kfree(async_vol_args); 999 kfree(vol_args_v2);
976 1000
977 return ret; 1001 return ret;
978} 1002}
@@ -1669,12 +1693,11 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
1669 olen = len = src->i_size - off; 1693 olen = len = src->i_size - off;
1670 /* if we extend to eof, continue to block boundary */ 1694 /* if we extend to eof, continue to block boundary */
1671 if (off + len == src->i_size) 1695 if (off + len == src->i_size)
1672 len = ((src->i_size + bs-1) & ~(bs-1)) 1696 len = ALIGN(src->i_size, bs) - off;
1673 - off;
1674 1697
1675 /* verify the end result is block aligned */ 1698 /* verify the end result is block aligned */
1676 if ((off & (bs-1)) || 1699 if (!IS_ALIGNED(off, bs) || !IS_ALIGNED(off + len, bs) ||
1677 ((off + len) & (bs-1))) 1700 !IS_ALIGNED(destoff, bs))
1678 goto out_unlock; 1701 goto out_unlock;
1679 1702
1680 /* do any pending delalloc/csum calc on src, one way or 1703 /* do any pending delalloc/csum calc on src, one way or
@@ -1874,8 +1897,8 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
1874 * but shouldn't round up the file size 1897 * but shouldn't round up the file size
1875 */ 1898 */
1876 endoff = new_key.offset + datal; 1899 endoff = new_key.offset + datal;
1877 if (endoff > off+olen) 1900 if (endoff > destoff+olen)
1878 endoff = off+olen; 1901 endoff = destoff+olen;
1879 if (endoff > inode->i_size) 1902 if (endoff > inode->i_size)
1880 btrfs_i_size_write(inode, endoff); 1903 btrfs_i_size_write(inode, endoff);
1881 1904
@@ -2235,7 +2258,7 @@ long btrfs_ioctl(struct file *file, unsigned int
2235 return btrfs_ioctl_getversion(file, argp); 2258 return btrfs_ioctl_getversion(file, argp);
2236 case BTRFS_IOC_SNAP_CREATE: 2259 case BTRFS_IOC_SNAP_CREATE:
2237 return btrfs_ioctl_snap_create(file, argp, 0, 0); 2260 return btrfs_ioctl_snap_create(file, argp, 0, 0);
2238 case BTRFS_IOC_SNAP_CREATE_ASYNC: 2261 case BTRFS_IOC_SNAP_CREATE_V2:
2239 return btrfs_ioctl_snap_create(file, argp, 0, 1); 2262 return btrfs_ioctl_snap_create(file, argp, 0, 1);
2240 case BTRFS_IOC_SUBVOL_CREATE: 2263 case BTRFS_IOC_SUBVOL_CREATE:
2241 return btrfs_ioctl_snap_create(file, argp, 1, 0); 2264 return btrfs_ioctl_snap_create(file, argp, 1, 0);
diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h
index 17c99ebdf96..c344d12c646 100644
--- a/fs/btrfs/ioctl.h
+++ b/fs/btrfs/ioctl.h
@@ -30,11 +30,15 @@ struct btrfs_ioctl_vol_args {
30 char name[BTRFS_PATH_NAME_MAX + 1]; 30 char name[BTRFS_PATH_NAME_MAX + 1];
31}; 31};
32 32
33#define BTRFS_SNAPSHOT_NAME_MAX 4079 33#define BTRFS_SUBVOL_CREATE_ASYNC (1ULL << 0)
34struct btrfs_ioctl_async_vol_args { 34
35#define BTRFS_SUBVOL_NAME_MAX 4039
36struct btrfs_ioctl_vol_args_v2 {
35 __s64 fd; 37 __s64 fd;
36 __u64 transid; 38 __u64 transid;
37 char name[BTRFS_SNAPSHOT_NAME_MAX + 1]; 39 __u64 flags;
40 __u64 unused[4];
41 char name[BTRFS_SUBVOL_NAME_MAX + 1];
38}; 42};
39 43
40#define BTRFS_INO_LOOKUP_PATH_MAX 4080 44#define BTRFS_INO_LOOKUP_PATH_MAX 4080
@@ -187,6 +191,6 @@ struct btrfs_ioctl_space_args {
187 struct btrfs_ioctl_space_args) 191 struct btrfs_ioctl_space_args)
188#define BTRFS_IOC_START_SYNC _IOR(BTRFS_IOCTL_MAGIC, 24, __u64) 192#define BTRFS_IOC_START_SYNC _IOR(BTRFS_IOCTL_MAGIC, 24, __u64)
189#define BTRFS_IOC_WAIT_SYNC _IOW(BTRFS_IOCTL_MAGIC, 22, __u64) 193#define BTRFS_IOC_WAIT_SYNC _IOW(BTRFS_IOCTL_MAGIC, 22, __u64)
190#define BTRFS_IOC_SNAP_CREATE_ASYNC _IOW(BTRFS_IOCTL_MAGIC, 23, \ 194#define BTRFS_IOC_SNAP_CREATE_V2 _IOW(BTRFS_IOCTL_MAGIC, 23, \
191 struct btrfs_ioctl_async_vol_args) 195 struct btrfs_ioctl_vol_args_v2)
192#endif 196#endif
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index f4621f6deca..ae7737e352c 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -250,6 +250,73 @@ int btrfs_add_ordered_sum(struct inode *inode,
250 250
251/* 251/*
252 * this is used to account for finished IO across a given range 252 * this is used to account for finished IO across a given range
253 * of the file. The IO may span ordered extents. If
254 * a given ordered_extent is completely done, 1 is returned, otherwise
255 * 0.
256 *
257 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
258 * to make sure this function only returns 1 once for a given ordered extent.
259 *
260 * file_offset is updated to one byte past the range that is recorded as
261 * complete. This allows you to walk forward in the file.
262 */
263int btrfs_dec_test_first_ordered_pending(struct inode *inode,
264 struct btrfs_ordered_extent **cached,
265 u64 *file_offset, u64 io_size)
266{
267 struct btrfs_ordered_inode_tree *tree;
268 struct rb_node *node;
269 struct btrfs_ordered_extent *entry = NULL;
270 int ret;
271 u64 dec_end;
272 u64 dec_start;
273 u64 to_dec;
274
275 tree = &BTRFS_I(inode)->ordered_tree;
276 spin_lock(&tree->lock);
277 node = tree_search(tree, *file_offset);
278 if (!node) {
279 ret = 1;
280 goto out;
281 }
282
283 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
284 if (!offset_in_entry(entry, *file_offset)) {
285 ret = 1;
286 goto out;
287 }
288
289 dec_start = max(*file_offset, entry->file_offset);
290 dec_end = min(*file_offset + io_size, entry->file_offset +
291 entry->len);
292 *file_offset = dec_end;
293 if (dec_start > dec_end) {
294 printk(KERN_CRIT "bad ordering dec_start %llu end %llu\n",
295 (unsigned long long)dec_start,
296 (unsigned long long)dec_end);
297 }
298 to_dec = dec_end - dec_start;
299 if (to_dec > entry->bytes_left) {
300 printk(KERN_CRIT "bad ordered accounting left %llu size %llu\n",
301 (unsigned long long)entry->bytes_left,
302 (unsigned long long)to_dec);
303 }
304 entry->bytes_left -= to_dec;
305 if (entry->bytes_left == 0)
306 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
307 else
308 ret = 1;
309out:
310 if (!ret && cached && entry) {
311 *cached = entry;
312 atomic_inc(&entry->refs);
313 }
314 spin_unlock(&tree->lock);
315 return ret == 0;
316}
317
318/*
319 * this is used to account for finished IO across a given range
253 * of the file. The IO should not span ordered extents. If 320 * of the file. The IO should not span ordered extents. If
254 * a given ordered_extent is completely done, 1 is returned, otherwise 321 * a given ordered_extent is completely done, 1 is returned, otherwise
255 * 0. 322 * 0.
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index 8ac365492a3..61dca83119d 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -141,6 +141,9 @@ int btrfs_remove_ordered_extent(struct inode *inode,
141int btrfs_dec_test_ordered_pending(struct inode *inode, 141int btrfs_dec_test_ordered_pending(struct inode *inode,
142 struct btrfs_ordered_extent **cached, 142 struct btrfs_ordered_extent **cached,
143 u64 file_offset, u64 io_size); 143 u64 file_offset, u64 io_size);
144int btrfs_dec_test_first_ordered_pending(struct inode *inode,
145 struct btrfs_ordered_extent **cached,
146 u64 *file_offset, u64 io_size);
144int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, 147int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
145 u64 start, u64 len, u64 disk_len, int type); 148 u64 start, u64 len, u64 disk_len, int type);
146int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset, 149int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset,
diff --git a/fs/btrfs/orphan.c b/fs/btrfs/orphan.c
index 79cba5fbc28..f8be250963a 100644
--- a/fs/btrfs/orphan.c
+++ b/fs/btrfs/orphan.c
@@ -56,8 +56,12 @@ int btrfs_del_orphan_item(struct btrfs_trans_handle *trans,
56 return -ENOMEM; 56 return -ENOMEM;
57 57
58 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 58 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
59 if (ret) 59 if (ret < 0)
60 goto out; 60 goto out;
61 if (ret) {
62 ret = -ENOENT;
63 goto out;
64 }
61 65
62 ret = btrfs_del_item(trans, root, path); 66 ret = btrfs_del_item(trans, root, path);
63 67
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 8299a25ffc8..883c6fa1367 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -244,6 +244,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
244 case Opt_space_cache: 244 case Opt_space_cache:
245 printk(KERN_INFO "btrfs: enabling disk space caching\n"); 245 printk(KERN_INFO "btrfs: enabling disk space caching\n");
246 btrfs_set_opt(info->mount_opt, SPACE_CACHE); 246 btrfs_set_opt(info->mount_opt, SPACE_CACHE);
247 break;
247 case Opt_clear_cache: 248 case Opt_clear_cache:
248 printk(KERN_INFO "btrfs: force clearing of disk cache\n"); 249 printk(KERN_INFO "btrfs: force clearing of disk cache\n");
249 btrfs_set_opt(info->mount_opt, CLEAR_CACHE); 250 btrfs_set_opt(info->mount_opt, CLEAR_CACHE);
@@ -562,12 +563,26 @@ static int btrfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
562 563
563static int btrfs_test_super(struct super_block *s, void *data) 564static int btrfs_test_super(struct super_block *s, void *data)
564{ 565{
565 struct btrfs_fs_devices *test_fs_devices = data; 566 struct btrfs_root *test_root = data;
566 struct btrfs_root *root = btrfs_sb(s); 567 struct btrfs_root *root = btrfs_sb(s);
567 568
568 return root->fs_info->fs_devices == test_fs_devices; 569 /*
570 * If this super block is going away, return false as it
571 * can't match as an existing super block.
572 */
573 if (!atomic_read(&s->s_active))
574 return 0;
575 return root->fs_info->fs_devices == test_root->fs_info->fs_devices;
576}
577
578static int btrfs_set_super(struct super_block *s, void *data)
579{
580 s->s_fs_info = data;
581
582 return set_anon_super(s, data);
569} 583}
570 584
585
571/* 586/*
572 * Find a superblock for the given device / mount point. 587 * Find a superblock for the given device / mount point.
573 * 588 *
@@ -581,6 +596,8 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
581 struct super_block *s; 596 struct super_block *s;
582 struct dentry *root; 597 struct dentry *root;
583 struct btrfs_fs_devices *fs_devices = NULL; 598 struct btrfs_fs_devices *fs_devices = NULL;
599 struct btrfs_root *tree_root = NULL;
600 struct btrfs_fs_info *fs_info = NULL;
584 fmode_t mode = FMODE_READ; 601 fmode_t mode = FMODE_READ;
585 char *subvol_name = NULL; 602 char *subvol_name = NULL;
586 u64 subvol_objectid = 0; 603 u64 subvol_objectid = 0;
@@ -608,8 +625,24 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
608 goto error_close_devices; 625 goto error_close_devices;
609 } 626 }
610 627
628 /*
629 * Setup a dummy root and fs_info for test/set super. This is because
630 * we don't actually fill this stuff out until open_ctree, but we need
631 * it for searching for existing supers, so this lets us do that and
632 * then open_ctree will properly initialize everything later.
633 */
634 fs_info = kzalloc(sizeof(struct btrfs_fs_info), GFP_NOFS);
635 tree_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
636 if (!fs_info || !tree_root) {
637 error = -ENOMEM;
638 goto error_close_devices;
639 }
640 fs_info->tree_root = tree_root;
641 fs_info->fs_devices = fs_devices;
642 tree_root->fs_info = fs_info;
643
611 bdev = fs_devices->latest_bdev; 644 bdev = fs_devices->latest_bdev;
612 s = sget(fs_type, btrfs_test_super, set_anon_super, fs_devices); 645 s = sget(fs_type, btrfs_test_super, btrfs_set_super, tree_root);
613 if (IS_ERR(s)) 646 if (IS_ERR(s))
614 goto error_s; 647 goto error_s;
615 648
@@ -652,9 +685,9 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
652 mutex_unlock(&root->d_inode->i_mutex); 685 mutex_unlock(&root->d_inode->i_mutex);
653 686
654 if (IS_ERR(new_root)) { 687 if (IS_ERR(new_root)) {
688 dput(root);
655 deactivate_locked_super(s); 689 deactivate_locked_super(s);
656 error = PTR_ERR(new_root); 690 error = PTR_ERR(new_root);
657 dput(root);
658 goto error_free_subvol_name; 691 goto error_free_subvol_name;
659 } 692 }
660 if (!new_root->d_inode) { 693 if (!new_root->d_inode) {
@@ -675,6 +708,8 @@ error_s:
675 error = PTR_ERR(s); 708 error = PTR_ERR(s);
676error_close_devices: 709error_close_devices:
677 btrfs_close_devices(fs_devices); 710 btrfs_close_devices(fs_devices);
711 kfree(fs_info);
712 kfree(tree_root);
678error_free_subvol_name: 713error_free_subvol_name:
679 kfree(subvol_name); 714 kfree(subvol_name);
680 return ERR_PTR(error); 715 return ERR_PTR(error);
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 1fffbc017bd..f50e931fc21 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -902,6 +902,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
902 struct btrfs_root *root = pending->root; 902 struct btrfs_root *root = pending->root;
903 struct btrfs_root *parent_root; 903 struct btrfs_root *parent_root;
904 struct inode *parent_inode; 904 struct inode *parent_inode;
905 struct dentry *parent;
905 struct dentry *dentry; 906 struct dentry *dentry;
906 struct extent_buffer *tmp; 907 struct extent_buffer *tmp;
907 struct extent_buffer *old; 908 struct extent_buffer *old;
@@ -941,7 +942,8 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
941 trans->block_rsv = &pending->block_rsv; 942 trans->block_rsv = &pending->block_rsv;
942 943
943 dentry = pending->dentry; 944 dentry = pending->dentry;
944 parent_inode = dentry->d_parent->d_inode; 945 parent = dget_parent(dentry);
946 parent_inode = parent->d_inode;
945 parent_root = BTRFS_I(parent_inode)->root; 947 parent_root = BTRFS_I(parent_inode)->root;
946 record_root_in_trans(trans, parent_root); 948 record_root_in_trans(trans, parent_root);
947 949
@@ -989,6 +991,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
989 parent_inode->i_ino, index, 991 parent_inode->i_ino, index,
990 dentry->d_name.name, dentry->d_name.len); 992 dentry->d_name.name, dentry->d_name.len);
991 BUG_ON(ret); 993 BUG_ON(ret);
994 dput(parent);
992 995
993 key.offset = (u64)-1; 996 key.offset = (u64)-1;
994 pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key); 997 pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key);
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index a29f19384a2..054744ac571 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -2869,6 +2869,7 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
2869{ 2869{
2870 int ret = 0; 2870 int ret = 0;
2871 struct btrfs_root *root; 2871 struct btrfs_root *root;
2872 struct dentry *old_parent = NULL;
2872 2873
2873 /* 2874 /*
2874 * for regular files, if its inode is already on disk, we don't 2875 * for regular files, if its inode is already on disk, we don't
@@ -2910,10 +2911,13 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
2910 if (IS_ROOT(parent)) 2911 if (IS_ROOT(parent))
2911 break; 2912 break;
2912 2913
2913 parent = parent->d_parent; 2914 parent = dget_parent(parent);
2915 dput(old_parent);
2916 old_parent = parent;
2914 inode = parent->d_inode; 2917 inode = parent->d_inode;
2915 2918
2916 } 2919 }
2920 dput(old_parent);
2917out: 2921out:
2918 return ret; 2922 return ret;
2919} 2923}
@@ -2945,6 +2949,7 @@ int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
2945{ 2949{
2946 int inode_only = exists_only ? LOG_INODE_EXISTS : LOG_INODE_ALL; 2950 int inode_only = exists_only ? LOG_INODE_EXISTS : LOG_INODE_ALL;
2947 struct super_block *sb; 2951 struct super_block *sb;
2952 struct dentry *old_parent = NULL;
2948 int ret = 0; 2953 int ret = 0;
2949 u64 last_committed = root->fs_info->last_trans_committed; 2954 u64 last_committed = root->fs_info->last_trans_committed;
2950 2955
@@ -3016,10 +3021,13 @@ int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
3016 if (IS_ROOT(parent)) 3021 if (IS_ROOT(parent))
3017 break; 3022 break;
3018 3023
3019 parent = parent->d_parent; 3024 parent = dget_parent(parent);
3025 dput(old_parent);
3026 old_parent = parent;
3020 } 3027 }
3021 ret = 0; 3028 ret = 0;
3022end_trans: 3029end_trans:
3030 dput(old_parent);
3023 if (ret < 0) { 3031 if (ret < 0) {
3024 BUG_ON(ret != -ENOSPC); 3032 BUG_ON(ret != -ENOSPC);
3025 root->fs_info->last_trans_log_full_commit = trans->transid; 3033 root->fs_info->last_trans_log_full_commit = trans->transid;
@@ -3039,8 +3047,13 @@ end_no_trans:
3039int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans, 3047int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
3040 struct btrfs_root *root, struct dentry *dentry) 3048 struct btrfs_root *root, struct dentry *dentry)
3041{ 3049{
3042 return btrfs_log_inode_parent(trans, root, dentry->d_inode, 3050 struct dentry *parent = dget_parent(dentry);
3043 dentry->d_parent, 0); 3051 int ret;
3052
3053 ret = btrfs_log_inode_parent(trans, root, dentry->d_inode, parent, 0);
3054 dput(parent);
3055
3056 return ret;
3044} 3057}
3045 3058
3046/* 3059/*
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index cc04dc1445d..6b988450783 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -412,12 +412,16 @@ static noinline int device_list_add(const char *path,
412 412
413 device->fs_devices = fs_devices; 413 device->fs_devices = fs_devices;
414 fs_devices->num_devices++; 414 fs_devices->num_devices++;
415 } else if (strcmp(device->name, path)) { 415 } else if (!device->name || strcmp(device->name, path)) {
416 name = kstrdup(path, GFP_NOFS); 416 name = kstrdup(path, GFP_NOFS);
417 if (!name) 417 if (!name)
418 return -ENOMEM; 418 return -ENOMEM;
419 kfree(device->name); 419 kfree(device->name);
420 device->name = name; 420 device->name = name;
421 if (device->missing) {
422 fs_devices->missing_devices--;
423 device->missing = 0;
424 }
421 } 425 }
422 426
423 if (found_transid > fs_devices->latest_trans) { 427 if (found_transid > fs_devices->latest_trans) {
@@ -1236,6 +1240,9 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1236 1240
1237 device->fs_devices->num_devices--; 1241 device->fs_devices->num_devices--;
1238 1242
1243 if (device->missing)
1244 root->fs_info->fs_devices->missing_devices--;
1245
1239 next_device = list_entry(root->fs_info->fs_devices->devices.next, 1246 next_device = list_entry(root->fs_info->fs_devices->devices.next,
1240 struct btrfs_device, dev_list); 1247 struct btrfs_device, dev_list);
1241 if (device->bdev == root->fs_info->sb->s_bdev) 1248 if (device->bdev == root->fs_info->sb->s_bdev)
@@ -3080,7 +3087,9 @@ static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
3080 device->devid = devid; 3087 device->devid = devid;
3081 device->work.func = pending_bios_fn; 3088 device->work.func = pending_bios_fn;
3082 device->fs_devices = fs_devices; 3089 device->fs_devices = fs_devices;
3090 device->missing = 1;
3083 fs_devices->num_devices++; 3091 fs_devices->num_devices++;
3092 fs_devices->missing_devices++;
3084 spin_lock_init(&device->io_lock); 3093 spin_lock_init(&device->io_lock);
3085 INIT_LIST_HEAD(&device->dev_alloc_list); 3094 INIT_LIST_HEAD(&device->dev_alloc_list);
3086 memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE); 3095 memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
@@ -3278,6 +3287,15 @@ static int read_one_dev(struct btrfs_root *root,
3278 device = add_missing_dev(root, devid, dev_uuid); 3287 device = add_missing_dev(root, devid, dev_uuid);
3279 if (!device) 3288 if (!device)
3280 return -ENOMEM; 3289 return -ENOMEM;
3290 } else if (!device->missing) {
3291 /*
3292 * this happens when a device that was properly setup
3293 * in the device info lists suddenly goes bad.
3294 * device->bdev is NULL, and so we have to set
3295 * device->missing to one here
3296 */
3297 root->fs_info->fs_devices->missing_devices++;
3298 device->missing = 1;
3281 } 3299 }
3282 } 3300 }
3283 3301
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 2b638b6e4ee..2740db49eb0 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -44,6 +44,7 @@ struct btrfs_device {
44 44
45 int writeable; 45 int writeable;
46 int in_fs_metadata; 46 int in_fs_metadata;
47 int missing;
47 48
48 spinlock_t io_lock; 49 spinlock_t io_lock;
49 50
@@ -93,6 +94,7 @@ struct btrfs_fs_devices {
93 u64 num_devices; 94 u64 num_devices;
94 u64 open_devices; 95 u64 open_devices;
95 u64 rw_devices; 96 u64 rw_devices;
97 u64 missing_devices;
96 u64 total_rw_bytes; 98 u64 total_rw_bytes;
97 struct block_device *latest_bdev; 99 struct block_device *latest_bdev;
98 100
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index e9c874abc9e..561438b6a50 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -204,7 +204,7 @@ static int readpage_nounlock(struct file *filp, struct page *page)
204 err = ceph_osdc_readpages(osdc, ceph_vino(inode), &ci->i_layout, 204 err = ceph_osdc_readpages(osdc, ceph_vino(inode), &ci->i_layout,
205 page->index << PAGE_CACHE_SHIFT, &len, 205 page->index << PAGE_CACHE_SHIFT, &len,
206 ci->i_truncate_seq, ci->i_truncate_size, 206 ci->i_truncate_seq, ci->i_truncate_size,
207 &page, 1); 207 &page, 1, 0);
208 if (err == -ENOENT) 208 if (err == -ENOENT)
209 err = 0; 209 err = 0;
210 if (err < 0) { 210 if (err < 0) {
@@ -287,7 +287,7 @@ static int ceph_readpages(struct file *file, struct address_space *mapping,
287 rc = ceph_osdc_readpages(osdc, ceph_vino(inode), &ci->i_layout, 287 rc = ceph_osdc_readpages(osdc, ceph_vino(inode), &ci->i_layout,
288 offset, &len, 288 offset, &len,
289 ci->i_truncate_seq, ci->i_truncate_size, 289 ci->i_truncate_seq, ci->i_truncate_size,
290 pages, nr_pages); 290 pages, nr_pages, 0);
291 if (rc == -ENOENT) 291 if (rc == -ENOENT)
292 rc = 0; 292 rc = 0;
293 if (rc < 0) 293 if (rc < 0)
@@ -774,7 +774,7 @@ get_more_pages:
774 snapc, do_sync, 774 snapc, do_sync,
775 ci->i_truncate_seq, 775 ci->i_truncate_seq,
776 ci->i_truncate_size, 776 ci->i_truncate_size,
777 &inode->i_mtime, true, 1); 777 &inode->i_mtime, true, 1, 0);
778 max_pages = req->r_num_pages; 778 max_pages = req->r_num_pages;
779 779
780 alloc_page_vec(fsc, req); 780 alloc_page_vec(fsc, req);
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 98ab13e2b71..60d27bc9eb8 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1430,8 +1430,8 @@ static int try_nonblocking_invalidate(struct inode *inode)
1430 invalidating_gen == ci->i_rdcache_gen) { 1430 invalidating_gen == ci->i_rdcache_gen) {
1431 /* success. */ 1431 /* success. */
1432 dout("try_nonblocking_invalidate %p success\n", inode); 1432 dout("try_nonblocking_invalidate %p success\n", inode);
1433 ci->i_rdcache_gen = 0; 1433 /* save any racing async invalidate some trouble */
1434 ci->i_rdcache_revoking = 0; 1434 ci->i_rdcache_revoking = ci->i_rdcache_gen - 1;
1435 return 0; 1435 return 0;
1436 } 1436 }
1437 dout("try_nonblocking_invalidate %p failed\n", inode); 1437 dout("try_nonblocking_invalidate %p failed\n", inode);
@@ -2273,8 +2273,7 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
2273{ 2273{
2274 struct ceph_inode_info *ci = ceph_inode(inode); 2274 struct ceph_inode_info *ci = ceph_inode(inode);
2275 int mds = session->s_mds; 2275 int mds = session->s_mds;
2276 unsigned seq = le32_to_cpu(grant->seq); 2276 int seq = le32_to_cpu(grant->seq);
2277 unsigned issue_seq = le32_to_cpu(grant->issue_seq);
2278 int newcaps = le32_to_cpu(grant->caps); 2277 int newcaps = le32_to_cpu(grant->caps);
2279 int issued, implemented, used, wanted, dirty; 2278 int issued, implemented, used, wanted, dirty;
2280 u64 size = le64_to_cpu(grant->size); 2279 u64 size = le64_to_cpu(grant->size);
@@ -2286,8 +2285,8 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
2286 int revoked_rdcache = 0; 2285 int revoked_rdcache = 0;
2287 int queue_invalidate = 0; 2286 int queue_invalidate = 0;
2288 2287
2289 dout("handle_cap_grant inode %p cap %p mds%d seq %u/%u %s\n", 2288 dout("handle_cap_grant inode %p cap %p mds%d seq %d %s\n",
2290 inode, cap, mds, seq, issue_seq, ceph_cap_string(newcaps)); 2289 inode, cap, mds, seq, ceph_cap_string(newcaps));
2291 dout(" size %llu max_size %llu, i_size %llu\n", size, max_size, 2290 dout(" size %llu max_size %llu, i_size %llu\n", size, max_size,
2292 inode->i_size); 2291 inode->i_size);
2293 2292
@@ -2383,7 +2382,6 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
2383 } 2382 }
2384 2383
2385 cap->seq = seq; 2384 cap->seq = seq;
2386 cap->issue_seq = issue_seq;
2387 2385
2388 /* file layout may have changed */ 2386 /* file layout may have changed */
2389 ci->i_layout = grant->layout; 2387 ci->i_layout = grant->layout;
@@ -2691,6 +2689,11 @@ static void handle_cap_import(struct ceph_mds_client *mdsc,
2691 NULL /* no caps context */); 2689 NULL /* no caps context */);
2692 try_flush_caps(inode, session, NULL); 2690 try_flush_caps(inode, session, NULL);
2693 up_read(&mdsc->snap_rwsem); 2691 up_read(&mdsc->snap_rwsem);
2692
2693 /* make sure we re-request max_size, if necessary */
2694 spin_lock(&inode->i_lock);
2695 ci->i_requested_max_size = 0;
2696 spin_unlock(&inode->i_lock);
2694} 2697}
2695 2698
2696/* 2699/*
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index e0a2dc6fcaf..d902948a90d 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -40,7 +40,8 @@ int ceph_init_dentry(struct dentry *dentry)
40 if (dentry->d_fsdata) 40 if (dentry->d_fsdata)
41 return 0; 41 return 0;
42 42
43 if (ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP) 43 if (dentry->d_parent == NULL || /* nfs fh_to_dentry */
44 ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP)
44 dentry->d_op = &ceph_dentry_ops; 45 dentry->d_op = &ceph_dentry_ops;
45 else if (ceph_snap(dentry->d_parent->d_inode) == CEPH_SNAPDIR) 46 else if (ceph_snap(dentry->d_parent->d_inode) == CEPH_SNAPDIR)
46 dentry->d_op = &ceph_snapdir_dentry_ops; 47 dentry->d_op = &ceph_snapdir_dentry_ops;
@@ -114,8 +115,8 @@ static int __dcache_readdir(struct file *filp,
114 spin_lock(&dcache_lock); 115 spin_lock(&dcache_lock);
115 116
116 /* start at beginning? */ 117 /* start at beginning? */
117 if (filp->f_pos == 2 || (last && 118 if (filp->f_pos == 2 || last == NULL ||
118 filp->f_pos < ceph_dentry(last)->offset)) { 119 filp->f_pos < ceph_dentry(last)->offset) {
119 if (list_empty(&parent->d_subdirs)) 120 if (list_empty(&parent->d_subdirs))
120 goto out_unlock; 121 goto out_unlock;
121 p = parent->d_subdirs.prev; 122 p = parent->d_subdirs.prev;
@@ -336,7 +337,10 @@ more:
336 if (req->r_reply_info.dir_end) { 337 if (req->r_reply_info.dir_end) {
337 kfree(fi->last_name); 338 kfree(fi->last_name);
338 fi->last_name = NULL; 339 fi->last_name = NULL;
339 fi->next_offset = 2; 340 if (ceph_frag_is_rightmost(frag))
341 fi->next_offset = 2;
342 else
343 fi->next_offset = 0;
340 } else { 344 } else {
341 rinfo = &req->r_reply_info; 345 rinfo = &req->r_reply_info;
342 err = note_last_dentry(fi, 346 err = note_last_dentry(fi,
@@ -355,18 +359,22 @@ more:
355 u64 pos = ceph_make_fpos(frag, off); 359 u64 pos = ceph_make_fpos(frag, off);
356 struct ceph_mds_reply_inode *in = 360 struct ceph_mds_reply_inode *in =
357 rinfo->dir_in[off - fi->offset].in; 361 rinfo->dir_in[off - fi->offset].in;
362 struct ceph_vino vino;
363 ino_t ino;
364
358 dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n", 365 dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n",
359 off, off - fi->offset, rinfo->dir_nr, pos, 366 off, off - fi->offset, rinfo->dir_nr, pos,
360 rinfo->dir_dname_len[off - fi->offset], 367 rinfo->dir_dname_len[off - fi->offset],
361 rinfo->dir_dname[off - fi->offset], in); 368 rinfo->dir_dname[off - fi->offset], in);
362 BUG_ON(!in); 369 BUG_ON(!in);
363 ftype = le32_to_cpu(in->mode) >> 12; 370 ftype = le32_to_cpu(in->mode) >> 12;
371 vino.ino = le64_to_cpu(in->ino);
372 vino.snap = le64_to_cpu(in->snapid);
373 ino = ceph_vino_to_ino(vino);
364 if (filldir(dirent, 374 if (filldir(dirent,
365 rinfo->dir_dname[off - fi->offset], 375 rinfo->dir_dname[off - fi->offset],
366 rinfo->dir_dname_len[off - fi->offset], 376 rinfo->dir_dname_len[off - fi->offset],
367 pos, 377 pos, ino, ftype) < 0) {
368 le64_to_cpu(in->ino),
369 ftype) < 0) {
370 dout("filldir stopping us...\n"); 378 dout("filldir stopping us...\n");
371 return 0; 379 return 0;
372 } 380 }
@@ -414,6 +422,7 @@ static void reset_readdir(struct ceph_file_info *fi)
414 fi->last_readdir = NULL; 422 fi->last_readdir = NULL;
415 } 423 }
416 kfree(fi->last_name); 424 kfree(fi->last_name);
425 fi->last_name = NULL;
417 fi->next_offset = 2; /* compensate for . and .. */ 426 fi->next_offset = 2; /* compensate for . and .. */
418 if (fi->dentry) { 427 if (fi->dentry) {
419 dput(fi->dentry); 428 dput(fi->dentry);
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index e77c28cf369..7d0e4a82d89 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -154,11 +154,13 @@ int ceph_open(struct inode *inode, struct file *file)
154 } 154 }
155 155
156 /* 156 /*
157 * No need to block if we have any caps. Update wanted set 157 * No need to block if we have caps on the auth MDS (for
158 * write) or any MDS (for read). Update wanted set
158 * asynchronously. 159 * asynchronously.
159 */ 160 */
160 spin_lock(&inode->i_lock); 161 spin_lock(&inode->i_lock);
161 if (__ceph_is_any_real_caps(ci)) { 162 if (__ceph_is_any_real_caps(ci) &&
163 (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
162 int mds_wanted = __ceph_caps_mds_wanted(ci); 164 int mds_wanted = __ceph_caps_mds_wanted(ci);
163 int issued = __ceph_caps_issued(ci, NULL); 165 int issued = __ceph_caps_issued(ci, NULL);
164 166
@@ -280,11 +282,13 @@ int ceph_release(struct inode *inode, struct file *file)
280static int striped_read(struct inode *inode, 282static int striped_read(struct inode *inode,
281 u64 off, u64 len, 283 u64 off, u64 len,
282 struct page **pages, int num_pages, 284 struct page **pages, int num_pages,
283 int *checkeof) 285 int *checkeof, bool align_to_pages,
286 unsigned long buf_align)
284{ 287{
285 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 288 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
286 struct ceph_inode_info *ci = ceph_inode(inode); 289 struct ceph_inode_info *ci = ceph_inode(inode);
287 u64 pos, this_len; 290 u64 pos, this_len;
291 int io_align, page_align;
288 int page_off = off & ~PAGE_CACHE_MASK; /* first byte's offset in page */ 292 int page_off = off & ~PAGE_CACHE_MASK; /* first byte's offset in page */
289 int left, pages_left; 293 int left, pages_left;
290 int read; 294 int read;
@@ -300,14 +304,19 @@ static int striped_read(struct inode *inode,
300 page_pos = pages; 304 page_pos = pages;
301 pages_left = num_pages; 305 pages_left = num_pages;
302 read = 0; 306 read = 0;
307 io_align = off & ~PAGE_MASK;
303 308
304more: 309more:
310 if (align_to_pages)
311 page_align = (pos - io_align + buf_align) & ~PAGE_MASK;
312 else
313 page_align = pos & ~PAGE_MASK;
305 this_len = left; 314 this_len = left;
306 ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode), 315 ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode),
307 &ci->i_layout, pos, &this_len, 316 &ci->i_layout, pos, &this_len,
308 ci->i_truncate_seq, 317 ci->i_truncate_seq,
309 ci->i_truncate_size, 318 ci->i_truncate_size,
310 page_pos, pages_left); 319 page_pos, pages_left, page_align);
311 hit_stripe = this_len < left; 320 hit_stripe = this_len < left;
312 was_short = ret >= 0 && ret < this_len; 321 was_short = ret >= 0 && ret < this_len;
313 if (ret == -ENOENT) 322 if (ret == -ENOENT)
@@ -368,32 +377,34 @@ static ssize_t ceph_sync_read(struct file *file, char __user *data,
368 struct inode *inode = file->f_dentry->d_inode; 377 struct inode *inode = file->f_dentry->d_inode;
369 struct page **pages; 378 struct page **pages;
370 u64 off = *poff; 379 u64 off = *poff;
371 int num_pages = calc_pages_for(off, len); 380 int num_pages, ret;
372 int ret;
373 381
374 dout("sync_read on file %p %llu~%u %s\n", file, off, len, 382 dout("sync_read on file %p %llu~%u %s\n", file, off, len,
375 (file->f_flags & O_DIRECT) ? "O_DIRECT" : ""); 383 (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
376 384
377 if (file->f_flags & O_DIRECT) { 385 if (file->f_flags & O_DIRECT) {
378 pages = ceph_get_direct_page_vector(data, num_pages, off, len); 386 num_pages = calc_pages_for((unsigned long)data, len);
379 387 pages = ceph_get_direct_page_vector(data, num_pages, true);
380 /*
381 * flush any page cache pages in this range. this
382 * will make concurrent normal and O_DIRECT io slow,
383 * but it will at least behave sensibly when they are
384 * in sequence.
385 */
386 } else { 388 } else {
389 num_pages = calc_pages_for(off, len);
387 pages = ceph_alloc_page_vector(num_pages, GFP_NOFS); 390 pages = ceph_alloc_page_vector(num_pages, GFP_NOFS);
388 } 391 }
389 if (IS_ERR(pages)) 392 if (IS_ERR(pages))
390 return PTR_ERR(pages); 393 return PTR_ERR(pages);
391 394
395 /*
396 * flush any page cache pages in this range. this
397 * will make concurrent normal and sync io slow,
398 * but it will at least behave sensibly when they are
399 * in sequence.
400 */
392 ret = filemap_write_and_wait(inode->i_mapping); 401 ret = filemap_write_and_wait(inode->i_mapping);
393 if (ret < 0) 402 if (ret < 0)
394 goto done; 403 goto done;
395 404
396 ret = striped_read(inode, off, len, pages, num_pages, checkeof); 405 ret = striped_read(inode, off, len, pages, num_pages, checkeof,
406 file->f_flags & O_DIRECT,
407 (unsigned long)data & ~PAGE_MASK);
397 408
398 if (ret >= 0 && (file->f_flags & O_DIRECT) == 0) 409 if (ret >= 0 && (file->f_flags & O_DIRECT) == 0)
399 ret = ceph_copy_page_vector_to_user(pages, data, off, ret); 410 ret = ceph_copy_page_vector_to_user(pages, data, off, ret);
@@ -402,7 +413,7 @@ static ssize_t ceph_sync_read(struct file *file, char __user *data,
402 413
403done: 414done:
404 if (file->f_flags & O_DIRECT) 415 if (file->f_flags & O_DIRECT)
405 ceph_put_page_vector(pages, num_pages); 416 ceph_put_page_vector(pages, num_pages, true);
406 else 417 else
407 ceph_release_page_vector(pages, num_pages); 418 ceph_release_page_vector(pages, num_pages);
408 dout("sync_read result %d\n", ret); 419 dout("sync_read result %d\n", ret);
@@ -448,6 +459,8 @@ static ssize_t ceph_sync_write(struct file *file, const char __user *data,
448 int flags; 459 int flags;
449 int do_sync = 0; 460 int do_sync = 0;
450 int check_caps = 0; 461 int check_caps = 0;
462 int page_align, io_align;
463 unsigned long buf_align;
451 int ret; 464 int ret;
452 struct timespec mtime = CURRENT_TIME; 465 struct timespec mtime = CURRENT_TIME;
453 466
@@ -462,6 +475,9 @@ static ssize_t ceph_sync_write(struct file *file, const char __user *data,
462 else 475 else
463 pos = *offset; 476 pos = *offset;
464 477
478 io_align = pos & ~PAGE_MASK;
479 buf_align = (unsigned long)data & ~PAGE_MASK;
480
465 ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + left); 481 ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + left);
466 if (ret < 0) 482 if (ret < 0)
467 return ret; 483 return ret;
@@ -486,20 +502,27 @@ static ssize_t ceph_sync_write(struct file *file, const char __user *data,
486 */ 502 */
487more: 503more:
488 len = left; 504 len = left;
505 if (file->f_flags & O_DIRECT) {
506 /* write from beginning of first page, regardless of
507 io alignment */
508 page_align = (pos - io_align + buf_align) & ~PAGE_MASK;
509 num_pages = calc_pages_for((unsigned long)data, len);
510 } else {
511 page_align = pos & ~PAGE_MASK;
512 num_pages = calc_pages_for(pos, len);
513 }
489 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 514 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
490 ceph_vino(inode), pos, &len, 515 ceph_vino(inode), pos, &len,
491 CEPH_OSD_OP_WRITE, flags, 516 CEPH_OSD_OP_WRITE, flags,
492 ci->i_snap_realm->cached_context, 517 ci->i_snap_realm->cached_context,
493 do_sync, 518 do_sync,
494 ci->i_truncate_seq, ci->i_truncate_size, 519 ci->i_truncate_seq, ci->i_truncate_size,
495 &mtime, false, 2); 520 &mtime, false, 2, page_align);
496 if (!req) 521 if (!req)
497 return -ENOMEM; 522 return -ENOMEM;
498 523
499 num_pages = calc_pages_for(pos, len);
500
501 if (file->f_flags & O_DIRECT) { 524 if (file->f_flags & O_DIRECT) {
502 pages = ceph_get_direct_page_vector(data, num_pages, pos, len); 525 pages = ceph_get_direct_page_vector(data, num_pages, false);
503 if (IS_ERR(pages)) { 526 if (IS_ERR(pages)) {
504 ret = PTR_ERR(pages); 527 ret = PTR_ERR(pages);
505 goto out; 528 goto out;
@@ -549,7 +572,7 @@ more:
549 } 572 }
550 573
551 if (file->f_flags & O_DIRECT) 574 if (file->f_flags & O_DIRECT)
552 ceph_put_page_vector(pages, num_pages); 575 ceph_put_page_vector(pages, num_pages, false);
553 else if (file->f_flags & O_SYNC) 576 else if (file->f_flags & O_SYNC)
554 ceph_release_page_vector(pages, num_pages); 577 ceph_release_page_vector(pages, num_pages);
555 578
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 1d6a45b5a04..bf1286588f2 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -2,7 +2,6 @@
2 2
3#include <linux/module.h> 3#include <linux/module.h>
4#include <linux/fs.h> 4#include <linux/fs.h>
5#include <linux/smp_lock.h>
6#include <linux/slab.h> 5#include <linux/slab.h>
7#include <linux/string.h> 6#include <linux/string.h>
8#include <linux/uaccess.h> 7#include <linux/uaccess.h>
@@ -471,7 +470,9 @@ void ceph_fill_file_time(struct inode *inode, int issued,
471 470
472 if (issued & (CEPH_CAP_FILE_EXCL| 471 if (issued & (CEPH_CAP_FILE_EXCL|
473 CEPH_CAP_FILE_WR| 472 CEPH_CAP_FILE_WR|
474 CEPH_CAP_FILE_BUFFER)) { 473 CEPH_CAP_FILE_BUFFER|
474 CEPH_CAP_AUTH_EXCL|
475 CEPH_CAP_XATTR_EXCL)) {
475 if (timespec_compare(ctime, &inode->i_ctime) > 0) { 476 if (timespec_compare(ctime, &inode->i_ctime) > 0) {
476 dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n", 477 dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n",
477 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec, 478 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
@@ -511,7 +512,7 @@ void ceph_fill_file_time(struct inode *inode, int issued,
511 warn = 1; 512 warn = 1;
512 } 513 }
513 } else { 514 } else {
514 /* we have no write caps; whatever the MDS says is true */ 515 /* we have no write|excl caps; whatever the MDS says is true */
515 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) { 516 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
516 inode->i_ctime = *ctime; 517 inode->i_ctime = *ctime;
517 inode->i_mtime = *mtime; 518 inode->i_mtime = *mtime;
@@ -567,12 +568,17 @@ static int fill_inode(struct inode *inode,
567 568
568 /* 569 /*
569 * provided version will be odd if inode value is projected, 570 * provided version will be odd if inode value is projected,
570 * even if stable. skip the update if we have a newer info 571 * even if stable. skip the update if we have newer stable
571 * (e.g., due to inode info racing form multiple MDSs), or if 572 * info (ours>=theirs, e.g. due to racing mds replies), unless
572 * we are getting projected (unstable) inode info. 573 * we are getting projected (unstable) info (in which case the
574 * version is odd, and we want ours>theirs).
575 * us them
576 * 2 2 skip
577 * 3 2 skip
578 * 3 3 update
573 */ 579 */
574 if (le64_to_cpu(info->version) > 0 && 580 if (le64_to_cpu(info->version) > 0 &&
575 (ci->i_version & ~1) > le64_to_cpu(info->version)) 581 (ci->i_version & ~1) >= le64_to_cpu(info->version))
576 goto no_change; 582 goto no_change;
577 583
578 issued = __ceph_caps_issued(ci, &implemented); 584 issued = __ceph_caps_issued(ci, &implemented);
@@ -606,7 +612,14 @@ static int fill_inode(struct inode *inode,
606 le32_to_cpu(info->time_warp_seq), 612 le32_to_cpu(info->time_warp_seq),
607 &ctime, &mtime, &atime); 613 &ctime, &mtime, &atime);
608 614
609 ci->i_max_size = le64_to_cpu(info->max_size); 615 /* only update max_size on auth cap */
616 if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
617 ci->i_max_size != le64_to_cpu(info->max_size)) {
618 dout("max_size %lld -> %llu\n", ci->i_max_size,
619 le64_to_cpu(info->max_size));
620 ci->i_max_size = le64_to_cpu(info->max_size);
621 }
622
610 ci->i_layout = info->layout; 623 ci->i_layout = info->layout;
611 inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1; 624 inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
612 625
@@ -1055,7 +1068,8 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
1055 ininfo = rinfo->targeti.in; 1068 ininfo = rinfo->targeti.in;
1056 vino.ino = le64_to_cpu(ininfo->ino); 1069 vino.ino = le64_to_cpu(ininfo->ino);
1057 vino.snap = le64_to_cpu(ininfo->snapid); 1070 vino.snap = le64_to_cpu(ininfo->snapid);
1058 if (!dn->d_inode) { 1071 in = dn->d_inode;
1072 if (!in) {
1059 in = ceph_get_inode(sb, vino); 1073 in = ceph_get_inode(sb, vino);
1060 if (IS_ERR(in)) { 1074 if (IS_ERR(in)) {
1061 pr_err("fill_trace bad get_inode " 1075 pr_err("fill_trace bad get_inode "
@@ -1386,11 +1400,8 @@ static void ceph_invalidate_work(struct work_struct *work)
1386 spin_lock(&inode->i_lock); 1400 spin_lock(&inode->i_lock);
1387 dout("invalidate_pages %p gen %d revoking %d\n", inode, 1401 dout("invalidate_pages %p gen %d revoking %d\n", inode,
1388 ci->i_rdcache_gen, ci->i_rdcache_revoking); 1402 ci->i_rdcache_gen, ci->i_rdcache_revoking);
1389 if (ci->i_rdcache_gen == 0 || 1403 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
1390 ci->i_rdcache_revoking != ci->i_rdcache_gen) {
1391 BUG_ON(ci->i_rdcache_revoking > ci->i_rdcache_gen);
1392 /* nevermind! */ 1404 /* nevermind! */
1393 ci->i_rdcache_revoking = 0;
1394 spin_unlock(&inode->i_lock); 1405 spin_unlock(&inode->i_lock);
1395 goto out; 1406 goto out;
1396 } 1407 }
@@ -1400,15 +1411,16 @@ static void ceph_invalidate_work(struct work_struct *work)
1400 ceph_invalidate_nondirty_pages(inode->i_mapping); 1411 ceph_invalidate_nondirty_pages(inode->i_mapping);
1401 1412
1402 spin_lock(&inode->i_lock); 1413 spin_lock(&inode->i_lock);
1403 if (orig_gen == ci->i_rdcache_gen) { 1414 if (orig_gen == ci->i_rdcache_gen &&
1415 orig_gen == ci->i_rdcache_revoking) {
1404 dout("invalidate_pages %p gen %d successful\n", inode, 1416 dout("invalidate_pages %p gen %d successful\n", inode,
1405 ci->i_rdcache_gen); 1417 ci->i_rdcache_gen);
1406 ci->i_rdcache_gen = 0; 1418 ci->i_rdcache_revoking--;
1407 ci->i_rdcache_revoking = 0;
1408 check = 1; 1419 check = 1;
1409 } else { 1420 } else {
1410 dout("invalidate_pages %p gen %d raced, gen now %d\n", 1421 dout("invalidate_pages %p gen %d raced, now %d revoking %d\n",
1411 inode, orig_gen, ci->i_rdcache_gen); 1422 inode, orig_gen, ci->i_rdcache_gen,
1423 ci->i_rdcache_revoking);
1412 } 1424 }
1413 spin_unlock(&inode->i_lock); 1425 spin_unlock(&inode->i_lock);
1414 1426
@@ -1739,7 +1751,7 @@ int ceph_do_getattr(struct inode *inode, int mask)
1739 return 0; 1751 return 0;
1740 } 1752 }
1741 1753
1742 dout("do_getattr inode %p mask %s\n", inode, ceph_cap_string(mask)); 1754 dout("do_getattr inode %p mask %s mode 0%o\n", inode, ceph_cap_string(mask), inode->i_mode);
1743 if (ceph_caps_issued_mask(ceph_inode(inode), mask, 1)) 1755 if (ceph_caps_issued_mask(ceph_inode(inode), mask, 1))
1744 return 0; 1756 return 0;
1745 1757
diff --git a/fs/ceph/ioctl.h b/fs/ceph/ioctl.h
index a6ce54e94eb..52e8fd74d45 100644
--- a/fs/ceph/ioctl.h
+++ b/fs/ceph/ioctl.h
@@ -4,7 +4,7 @@
4#include <linux/ioctl.h> 4#include <linux/ioctl.h>
5#include <linux/types.h> 5#include <linux/types.h>
6 6
7#define CEPH_IOCTL_MAGIC 0x98 7#define CEPH_IOCTL_MAGIC 0x97
8 8
9/* just use u64 to align sanely on all archs */ 9/* just use u64 to align sanely on all archs */
10struct ceph_ioctl_layout { 10struct ceph_ioctl_layout {
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
index 40abde93c34..476b329867d 100644
--- a/fs/ceph/locks.c
+++ b/fs/ceph/locks.c
@@ -11,40 +11,68 @@
11 * Implement fcntl and flock locking functions. 11 * Implement fcntl and flock locking functions.
12 */ 12 */
13static int ceph_lock_message(u8 lock_type, u16 operation, struct file *file, 13static int ceph_lock_message(u8 lock_type, u16 operation, struct file *file,
14 u64 pid, u64 pid_ns, 14 int cmd, u8 wait, struct file_lock *fl)
15 int cmd, u64 start, u64 length, u8 wait)
16{ 15{
17 struct inode *inode = file->f_dentry->d_inode; 16 struct inode *inode = file->f_dentry->d_inode;
18 struct ceph_mds_client *mdsc = 17 struct ceph_mds_client *mdsc =
19 ceph_sb_to_client(inode->i_sb)->mdsc; 18 ceph_sb_to_client(inode->i_sb)->mdsc;
20 struct ceph_mds_request *req; 19 struct ceph_mds_request *req;
21 int err; 20 int err;
21 u64 length = 0;
22 22
23 req = ceph_mdsc_create_request(mdsc, operation, USE_AUTH_MDS); 23 req = ceph_mdsc_create_request(mdsc, operation, USE_AUTH_MDS);
24 if (IS_ERR(req)) 24 if (IS_ERR(req))
25 return PTR_ERR(req); 25 return PTR_ERR(req);
26 req->r_inode = igrab(inode); 26 req->r_inode = igrab(inode);
27 27
28 /* mds requires start and length rather than start and end */
29 if (LLONG_MAX == fl->fl_end)
30 length = 0;
31 else
32 length = fl->fl_end - fl->fl_start + 1;
33
28 dout("ceph_lock_message: rule: %d, op: %d, pid: %llu, start: %llu, " 34 dout("ceph_lock_message: rule: %d, op: %d, pid: %llu, start: %llu, "
29 "length: %llu, wait: %d, type`: %d", (int)lock_type, 35 "length: %llu, wait: %d, type`: %d", (int)lock_type,
30 (int)operation, pid, start, length, wait, cmd); 36 (int)operation, (u64)fl->fl_pid, fl->fl_start,
37 length, wait, fl->fl_type);
38
31 39
32 req->r_args.filelock_change.rule = lock_type; 40 req->r_args.filelock_change.rule = lock_type;
33 req->r_args.filelock_change.type = cmd; 41 req->r_args.filelock_change.type = cmd;
34 req->r_args.filelock_change.pid = cpu_to_le64(pid); 42 req->r_args.filelock_change.pid = cpu_to_le64((u64)fl->fl_pid);
35 /* This should be adjusted, but I'm not sure if 43 /* This should be adjusted, but I'm not sure if
36 namespaces actually get id numbers*/ 44 namespaces actually get id numbers*/
37 req->r_args.filelock_change.pid_namespace = 45 req->r_args.filelock_change.pid_namespace =
38 cpu_to_le64((u64)pid_ns); 46 cpu_to_le64((u64)(unsigned long)fl->fl_nspid);
39 req->r_args.filelock_change.start = cpu_to_le64(start); 47 req->r_args.filelock_change.start = cpu_to_le64(fl->fl_start);
40 req->r_args.filelock_change.length = cpu_to_le64(length); 48 req->r_args.filelock_change.length = cpu_to_le64(length);
41 req->r_args.filelock_change.wait = wait; 49 req->r_args.filelock_change.wait = wait;
42 50
43 err = ceph_mdsc_do_request(mdsc, inode, req); 51 err = ceph_mdsc_do_request(mdsc, inode, req);
52
53 if ( operation == CEPH_MDS_OP_GETFILELOCK){
54 fl->fl_pid = le64_to_cpu(req->r_reply_info.filelock_reply->pid);
55 if (CEPH_LOCK_SHARED == req->r_reply_info.filelock_reply->type)
56 fl->fl_type = F_RDLCK;
57 else if (CEPH_LOCK_EXCL == req->r_reply_info.filelock_reply->type)
58 fl->fl_type = F_WRLCK;
59 else
60 fl->fl_type = F_UNLCK;
61
62 fl->fl_start = le64_to_cpu(req->r_reply_info.filelock_reply->start);
63 length = le64_to_cpu(req->r_reply_info.filelock_reply->start) +
64 le64_to_cpu(req->r_reply_info.filelock_reply->length);
65 if (length >= 1)
66 fl->fl_end = length -1;
67 else
68 fl->fl_end = 0;
69
70 }
44 ceph_mdsc_put_request(req); 71 ceph_mdsc_put_request(req);
45 dout("ceph_lock_message: rule: %d, op: %d, pid: %llu, start: %llu, " 72 dout("ceph_lock_message: rule: %d, op: %d, pid: %llu, start: %llu, "
46 "length: %llu, wait: %d, type`: %d err code %d", (int)lock_type, 73 "length: %llu, wait: %d, type`: %d, err code %d", (int)lock_type,
47 (int)operation, pid, start, length, wait, cmd, err); 74 (int)operation, (u64)fl->fl_pid, fl->fl_start,
75 length, wait, fl->fl_type, err);
48 return err; 76 return err;
49} 77}
50 78
@@ -54,7 +82,6 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct file *file,
54 */ 82 */
55int ceph_lock(struct file *file, int cmd, struct file_lock *fl) 83int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
56{ 84{
57 u64 length;
58 u8 lock_cmd; 85 u8 lock_cmd;
59 int err; 86 int err;
60 u8 wait = 0; 87 u8 wait = 0;
@@ -76,29 +103,20 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
76 else 103 else
77 lock_cmd = CEPH_LOCK_UNLOCK; 104 lock_cmd = CEPH_LOCK_UNLOCK;
78 105
79 if (LLONG_MAX == fl->fl_end) 106 err = ceph_lock_message(CEPH_LOCK_FCNTL, op, file, lock_cmd, wait, fl);
80 length = 0;
81 else
82 length = fl->fl_end - fl->fl_start + 1;
83
84 err = ceph_lock_message(CEPH_LOCK_FCNTL, op, file,
85 (u64)fl->fl_pid,
86 (u64)(unsigned long)fl->fl_nspid,
87 lock_cmd, fl->fl_start,
88 length, wait);
89 if (!err) { 107 if (!err) {
90 dout("mds locked, locking locally"); 108 if ( op != CEPH_MDS_OP_GETFILELOCK ){
91 err = posix_lock_file(file, fl, NULL); 109 dout("mds locked, locking locally");
92 if (err && (CEPH_MDS_OP_SETFILELOCK == op)) { 110 err = posix_lock_file(file, fl, NULL);
93 /* undo! This should only happen if the kernel detects 111 if (err && (CEPH_MDS_OP_SETFILELOCK == op)) {
94 * local deadlock. */ 112 /* undo! This should only happen if the kernel detects
95 ceph_lock_message(CEPH_LOCK_FCNTL, op, file, 113 * local deadlock. */
96 (u64)fl->fl_pid, 114 ceph_lock_message(CEPH_LOCK_FCNTL, op, file,
97 (u64)(unsigned long)fl->fl_nspid, 115 CEPH_LOCK_UNLOCK, 0, fl);
98 CEPH_LOCK_UNLOCK, fl->fl_start, 116 dout("got %d on posix_lock_file, undid lock", err);
99 length, 0); 117 }
100 dout("got %d on posix_lock_file, undid lock", err);
101 } 118 }
119
102 } else { 120 } else {
103 dout("mds returned error code %d", err); 121 dout("mds returned error code %d", err);
104 } 122 }
@@ -107,7 +125,6 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
107 125
108int ceph_flock(struct file *file, int cmd, struct file_lock *fl) 126int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
109{ 127{
110 u64 length;
111 u8 lock_cmd; 128 u8 lock_cmd;
112 int err; 129 int err;
113 u8 wait = 1; 130 u8 wait = 1;
@@ -127,26 +144,15 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
127 lock_cmd = CEPH_LOCK_EXCL; 144 lock_cmd = CEPH_LOCK_EXCL;
128 else 145 else
129 lock_cmd = CEPH_LOCK_UNLOCK; 146 lock_cmd = CEPH_LOCK_UNLOCK;
130 /* mds requires start and length rather than start and end */
131 if (LLONG_MAX == fl->fl_end)
132 length = 0;
133 else
134 length = fl->fl_end - fl->fl_start + 1;
135 147
136 err = ceph_lock_message(CEPH_LOCK_FLOCK, CEPH_MDS_OP_SETFILELOCK, 148 err = ceph_lock_message(CEPH_LOCK_FLOCK, CEPH_MDS_OP_SETFILELOCK,
137 file, (u64)fl->fl_pid, 149 file, lock_cmd, wait, fl);
138 (u64)(unsigned long)fl->fl_nspid,
139 lock_cmd, fl->fl_start,
140 length, wait);
141 if (!err) { 150 if (!err) {
142 err = flock_lock_file_wait(file, fl); 151 err = flock_lock_file_wait(file, fl);
143 if (err) { 152 if (err) {
144 ceph_lock_message(CEPH_LOCK_FLOCK, 153 ceph_lock_message(CEPH_LOCK_FLOCK,
145 CEPH_MDS_OP_SETFILELOCK, 154 CEPH_MDS_OP_SETFILELOCK,
146 file, (u64)fl->fl_pid, 155 file, CEPH_LOCK_UNLOCK, 0, fl);
147 (u64)(unsigned long)fl->fl_nspid,
148 CEPH_LOCK_UNLOCK, fl->fl_start,
149 length, 0);
150 dout("got %d on flock_lock_file_wait, undid lock", err); 156 dout("got %d on flock_lock_file_wait, undid lock", err);
151 } 157 }
152 } else { 158 } else {
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 3142b15940c..38800eaa81d 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -6,7 +6,6 @@
6#include <linux/sched.h> 6#include <linux/sched.h>
7#include <linux/debugfs.h> 7#include <linux/debugfs.h>
8#include <linux/seq_file.h> 8#include <linux/seq_file.h>
9#include <linux/smp_lock.h>
10 9
11#include "super.h" 10#include "super.h"
12#include "mds_client.h" 11#include "mds_client.h"
@@ -203,6 +202,38 @@ out_bad:
203} 202}
204 203
205/* 204/*
205 * parse fcntl F_GETLK results
206 */
207static int parse_reply_info_filelock(void **p, void *end,
208 struct ceph_mds_reply_info_parsed *info)
209{
210 if (*p + sizeof(*info->filelock_reply) > end)
211 goto bad;
212
213 info->filelock_reply = *p;
214 *p += sizeof(*info->filelock_reply);
215
216 if (unlikely(*p != end))
217 goto bad;
218 return 0;
219
220bad:
221 return -EIO;
222}
223
224/*
225 * parse extra results
226 */
227static int parse_reply_info_extra(void **p, void *end,
228 struct ceph_mds_reply_info_parsed *info)
229{
230 if (info->head->op == CEPH_MDS_OP_GETFILELOCK)
231 return parse_reply_info_filelock(p, end, info);
232 else
233 return parse_reply_info_dir(p, end, info);
234}
235
236/*
206 * parse entire mds reply 237 * parse entire mds reply
207 */ 238 */
208static int parse_reply_info(struct ceph_msg *msg, 239static int parse_reply_info(struct ceph_msg *msg,
@@ -224,10 +255,10 @@ static int parse_reply_info(struct ceph_msg *msg,
224 goto out_bad; 255 goto out_bad;
225 } 256 }
226 257
227 /* dir content */ 258 /* extra */
228 ceph_decode_32_safe(&p, end, len, bad); 259 ceph_decode_32_safe(&p, end, len, bad);
229 if (len > 0) { 260 if (len > 0) {
230 err = parse_reply_info_dir(&p, p+len, info); 261 err = parse_reply_info_extra(&p, p+len, info);
231 if (err < 0) 262 if (err < 0)
232 goto out_bad; 263 goto out_bad;
233 } 264 }
@@ -529,6 +560,9 @@ static void __register_request(struct ceph_mds_client *mdsc,
529 ceph_mdsc_get_request(req); 560 ceph_mdsc_get_request(req);
530 __insert_request(mdsc, req); 561 __insert_request(mdsc, req);
531 562
563 req->r_uid = current_fsuid();
564 req->r_gid = current_fsgid();
565
532 if (dir) { 566 if (dir) {
533 struct ceph_inode_info *ci = ceph_inode(dir); 567 struct ceph_inode_info *ci = ceph_inode(dir);
534 568
@@ -1588,8 +1622,8 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
1588 1622
1589 head->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch); 1623 head->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch);
1590 head->op = cpu_to_le32(req->r_op); 1624 head->op = cpu_to_le32(req->r_op);
1591 head->caller_uid = cpu_to_le32(current_fsuid()); 1625 head->caller_uid = cpu_to_le32(req->r_uid);
1592 head->caller_gid = cpu_to_le32(current_fsgid()); 1626 head->caller_gid = cpu_to_le32(req->r_gid);
1593 head->args = req->r_args; 1627 head->args = req->r_args;
1594 1628
1595 ceph_encode_filepath(&p, end, ino1, path1); 1629 ceph_encode_filepath(&p, end, ino1, path1);
@@ -2072,7 +2106,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
2072 2106
2073 mutex_lock(&session->s_mutex); 2107 mutex_lock(&session->s_mutex);
2074 if (err < 0) { 2108 if (err < 0) {
2075 pr_err("mdsc_handle_reply got corrupt reply mds%d\n", mds); 2109 pr_err("mdsc_handle_reply got corrupt reply mds%d(tid:%lld)\n", mds, tid);
2076 ceph_msg_dump(msg); 2110 ceph_msg_dump(msg);
2077 goto out_err; 2111 goto out_err;
2078 } 2112 }
@@ -2092,7 +2126,8 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
2092 mutex_lock(&req->r_fill_mutex); 2126 mutex_lock(&req->r_fill_mutex);
2093 err = ceph_fill_trace(mdsc->fsc->sb, req, req->r_session); 2127 err = ceph_fill_trace(mdsc->fsc->sb, req, req->r_session);
2094 if (err == 0) { 2128 if (err == 0) {
2095 if (result == 0 && rinfo->dir_nr) 2129 if (result == 0 && req->r_op != CEPH_MDS_OP_GETFILELOCK &&
2130 rinfo->dir_nr)
2096 ceph_readdir_prepopulate(req, req->r_session); 2131 ceph_readdir_prepopulate(req, req->r_session);
2097 ceph_unreserve_caps(mdsc, &req->r_caps_reservation); 2132 ceph_unreserve_caps(mdsc, &req->r_caps_reservation);
2098 } 2133 }
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index d66d63c7235..aabe563b54d 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -42,26 +42,37 @@ struct ceph_mds_reply_info_in {
42}; 42};
43 43
44/* 44/*
45 * parsed info about an mds reply, including information about the 45 * parsed info about an mds reply, including information about
46 * target inode and/or its parent directory and dentry, and directory 46 * either: 1) the target inode and/or its parent directory and dentry,
47 * contents (for readdir results). 47 * and directory contents (for readdir results), or
48 * 2) the file range lock info (for fcntl F_GETLK results).
48 */ 49 */
49struct ceph_mds_reply_info_parsed { 50struct ceph_mds_reply_info_parsed {
50 struct ceph_mds_reply_head *head; 51 struct ceph_mds_reply_head *head;
51 52
53 /* trace */
52 struct ceph_mds_reply_info_in diri, targeti; 54 struct ceph_mds_reply_info_in diri, targeti;
53 struct ceph_mds_reply_dirfrag *dirfrag; 55 struct ceph_mds_reply_dirfrag *dirfrag;
54 char *dname; 56 char *dname;
55 u32 dname_len; 57 u32 dname_len;
56 struct ceph_mds_reply_lease *dlease; 58 struct ceph_mds_reply_lease *dlease;
57 59
58 struct ceph_mds_reply_dirfrag *dir_dir; 60 /* extra */
59 int dir_nr; 61 union {
60 char **dir_dname; 62 /* for fcntl F_GETLK results */
61 u32 *dir_dname_len; 63 struct ceph_filelock *filelock_reply;
62 struct ceph_mds_reply_lease **dir_dlease; 64
63 struct ceph_mds_reply_info_in *dir_in; 65 /* for readdir results */
64 u8 dir_complete, dir_end; 66 struct {
67 struct ceph_mds_reply_dirfrag *dir_dir;
68 int dir_nr;
69 char **dir_dname;
70 u32 *dir_dname_len;
71 struct ceph_mds_reply_lease **dir_dlease;
72 struct ceph_mds_reply_info_in *dir_in;
73 u8 dir_complete, dir_end;
74 };
75 };
65 76
66 /* encoded blob describing snapshot contexts for certain 77 /* encoded blob describing snapshot contexts for certain
67 operations (e.g., open) */ 78 operations (e.g., open) */
@@ -170,6 +181,8 @@ struct ceph_mds_request {
170 181
171 union ceph_mds_request_args r_args; 182 union ceph_mds_request_args r_args;
172 int r_fmode; /* file mode, if expecting cap */ 183 int r_fmode; /* file mode, if expecting cap */
184 uid_t r_uid;
185 gid_t r_gid;
173 186
174 /* for choosing which mds to send this request to */ 187 /* for choosing which mds to send this request to */
175 int r_direct_mode; 188 int r_direct_mode;
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 1886294e12f..7f01728a465 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -293,9 +293,7 @@ struct ceph_inode_info {
293 int i_rd_ref, i_rdcache_ref, i_wr_ref; 293 int i_rd_ref, i_rdcache_ref, i_wr_ref;
294 int i_wrbuffer_ref, i_wrbuffer_ref_head; 294 int i_wrbuffer_ref, i_wrbuffer_ref_head;
295 u32 i_shared_gen; /* increment each time we get FILE_SHARED */ 295 u32 i_shared_gen; /* increment each time we get FILE_SHARED */
296 u32 i_rdcache_gen; /* we increment this each time we get 296 u32 i_rdcache_gen; /* incremented each time we get FILE_CACHE. */
297 FILE_CACHE. If it's non-zero, we
298 _may_ have cached pages. */
299 u32 i_rdcache_revoking; /* RDCACHE gen to async invalidate, if any */ 297 u32 i_rdcache_revoking; /* RDCACHE gen to async invalidate, if any */
300 298
301 struct list_head i_unsafe_writes; /* uncommitted sync writes */ 299 struct list_head i_unsafe_writes; /* uncommitted sync writes */
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
index 0ed213970ce..ee45648b0d1 100644
--- a/fs/cifs/Kconfig
+++ b/fs/cifs/Kconfig
@@ -4,6 +4,7 @@ config CIFS
4 select NLS 4 select NLS
5 select CRYPTO 5 select CRYPTO
6 select CRYPTO_MD5 6 select CRYPTO_MD5
7 select CRYPTO_HMAC
7 select CRYPTO_ARC4 8 select CRYPTO_ARC4
8 help 9 help
9 This is the client VFS module for the Common Internet File System 10 This is the client VFS module for the Common Internet File System
@@ -143,6 +144,13 @@ config CIFS_FSCACHE
143 to be cached locally on disk through the general filesystem cache 144 to be cached locally on disk through the general filesystem cache
144 manager. If unsure, say N. 145 manager. If unsure, say N.
145 146
147config CIFS_ACL
148 bool "Provide CIFS ACL support (EXPERIMENTAL)"
149 depends on EXPERIMENTAL && CIFS_XATTR
150 help
151 Allows to fetch CIFS/NTFS ACL from the server. The DACL blob
152 is handed over to the application/caller.
153
146config CIFS_EXPERIMENTAL 154config CIFS_EXPERIMENTAL
147 bool "CIFS Experimental Features (EXPERIMENTAL)" 155 bool "CIFS Experimental Features (EXPERIMENTAL)"
148 depends on CIFS && EXPERIMENTAL 156 depends on CIFS && EXPERIMENTAL
diff --git a/fs/cifs/Makefile b/fs/cifs/Makefile
index adefa60a9bd..43b19dd3919 100644
--- a/fs/cifs/Makefile
+++ b/fs/cifs/Makefile
@@ -6,7 +6,9 @@ obj-$(CONFIG_CIFS) += cifs.o
6cifs-y := cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o inode.o \ 6cifs-y := cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o inode.o \
7 link.o misc.o netmisc.o smbdes.o smbencrypt.o transport.o asn1.o \ 7 link.o misc.o netmisc.o smbdes.o smbencrypt.o transport.o asn1.o \
8 md4.o md5.o cifs_unicode.o nterr.o xattr.o cifsencrypt.o \ 8 md4.o md5.o cifs_unicode.o nterr.o xattr.o cifsencrypt.o \
9 readdir.o ioctl.o sess.o export.o cifsacl.o 9 readdir.o ioctl.o sess.o export.o
10
11cifs-$(CONFIG_CIFS_ACL) += cifsacl.o
10 12
11cifs-$(CONFIG_CIFS_UPCALL) += cifs_spnego.o 13cifs-$(CONFIG_CIFS_UPCALL) += cifs_spnego.o
12 14
diff --git a/fs/cifs/README b/fs/cifs/README
index ee68d103654..46af99ab361 100644
--- a/fs/cifs/README
+++ b/fs/cifs/README
@@ -337,6 +337,15 @@ A partial list of the supported mount options follows:
337 wsize default write size (default 57344) 337 wsize default write size (default 57344)
338 maximum wsize currently allowed by CIFS is 57344 (fourteen 338 maximum wsize currently allowed by CIFS is 57344 (fourteen
339 4096 byte pages) 339 4096 byte pages)
340 actimeo=n attribute cache timeout in seconds (default 1 second).
341 After this timeout, the cifs client requests fresh attribute
342 information from the server. This option allows to tune the
343 attribute cache timeout to suit the workload needs. Shorter
344 timeouts mean better the cache coherency, but increased number
345 of calls to the server. Longer timeouts mean reduced number
346 of calls to the server at the expense of less stricter cache
347 coherency checks (i.e. incorrect attribute cache for a short
348 period of time).
340 rw mount the network share read-write (note that the 349 rw mount the network share read-write (note that the
341 server may still consider the share read-only) 350 server may still consider the share read-only)
342 ro mount network share read-only 351 ro mount network share read-only
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
index e9a393c9c2c..7852cd67705 100644
--- a/fs/cifs/cifs_fs_sb.h
+++ b/fs/cifs/cifs_fs_sb.h
@@ -48,6 +48,7 @@ struct cifs_sb_info {
48 struct nls_table *local_nls; 48 struct nls_table *local_nls;
49 unsigned int rsize; 49 unsigned int rsize;
50 unsigned int wsize; 50 unsigned int wsize;
51 unsigned long actimeo; /* attribute cache timeout (jiffies) */
51 atomic_t active; 52 atomic_t active;
52 uid_t mnt_uid; 53 uid_t mnt_uid;
53 gid_t mnt_gid; 54 gid_t mnt_gid;
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index c9b4792ae82..a437ec391a0 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -30,8 +30,6 @@
30#include "cifs_debug.h" 30#include "cifs_debug.h"
31 31
32 32
33#ifdef CONFIG_CIFS_EXPERIMENTAL
34
35static struct cifs_wksid wksidarr[NUM_WK_SIDS] = { 33static struct cifs_wksid wksidarr[NUM_WK_SIDS] = {
36 {{1, 0, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0} }, "null user"}, 34 {{1, 0, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0} }, "null user"},
37 {{1, 1, {0, 0, 0, 0, 0, 1}, {0, 0, 0, 0, 0} }, "nobody"}, 35 {{1, 1, {0, 0, 0, 0, 0, 1}, {0, 0, 0, 0, 0} }, "nobody"},
@@ -560,7 +558,7 @@ static struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
560 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb); 558 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
561 559
562 if (IS_ERR(tlink)) 560 if (IS_ERR(tlink))
563 return NULL; 561 return ERR_CAST(tlink);
564 562
565 xid = GetXid(); 563 xid = GetXid();
566 rc = CIFSSMBGetCIFSACL(xid, tlink_tcon(tlink), fid, &pntsd, pacllen); 564 rc = CIFSSMBGetCIFSACL(xid, tlink_tcon(tlink), fid, &pntsd, pacllen);
@@ -568,7 +566,9 @@ static struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
568 566
569 cifs_put_tlink(tlink); 567 cifs_put_tlink(tlink);
570 568
571 cFYI(1, "GetCIFSACL rc = %d ACL len %d", rc, *pacllen); 569 cFYI(1, "%s: rc = %d ACL len %d", __func__, rc, *pacllen);
570 if (rc)
571 return ERR_PTR(rc);
572 return pntsd; 572 return pntsd;
573} 573}
574 574
@@ -583,7 +583,7 @@ static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
583 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb); 583 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
584 584
585 if (IS_ERR(tlink)) 585 if (IS_ERR(tlink))
586 return NULL; 586 return ERR_CAST(tlink);
587 587
588 tcon = tlink_tcon(tlink); 588 tcon = tlink_tcon(tlink);
589 xid = GetXid(); 589 xid = GetXid();
@@ -591,23 +591,22 @@ static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
591 rc = CIFSSMBOpen(xid, tcon, path, FILE_OPEN, READ_CONTROL, 0, 591 rc = CIFSSMBOpen(xid, tcon, path, FILE_OPEN, READ_CONTROL, 0,
592 &fid, &oplock, NULL, cifs_sb->local_nls, 592 &fid, &oplock, NULL, cifs_sb->local_nls,
593 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); 593 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
594 if (rc) { 594 if (!rc) {
595 cERROR(1, "Unable to open file to get ACL"); 595 rc = CIFSSMBGetCIFSACL(xid, tcon, fid, &pntsd, pacllen);
596 goto out; 596 CIFSSMBClose(xid, tcon, fid);
597 } 597 }
598 598
599 rc = CIFSSMBGetCIFSACL(xid, tcon, fid, &pntsd, pacllen);
600 cFYI(1, "GetCIFSACL rc = %d ACL len %d", rc, *pacllen);
601
602 CIFSSMBClose(xid, tcon, fid);
603 out:
604 cifs_put_tlink(tlink); 599 cifs_put_tlink(tlink);
605 FreeXid(xid); 600 FreeXid(xid);
601
602 cFYI(1, "%s: rc = %d ACL len %d", __func__, rc, *pacllen);
603 if (rc)
604 return ERR_PTR(rc);
606 return pntsd; 605 return pntsd;
607} 606}
608 607
609/* Retrieve an ACL from the server */ 608/* Retrieve an ACL from the server */
610static struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *cifs_sb, 609struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *cifs_sb,
611 struct inode *inode, const char *path, 610 struct inode *inode, const char *path,
612 u32 *pacllen) 611 u32 *pacllen)
613{ 612{
@@ -695,7 +694,7 @@ static int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
695} 694}
696 695
697/* Translate the CIFS ACL (simlar to NTFS ACL) for a file into mode bits */ 696/* Translate the CIFS ACL (simlar to NTFS ACL) for a file into mode bits */
698void 697int
699cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr, 698cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
700 struct inode *inode, const char *path, const __u16 *pfid) 699 struct inode *inode, const char *path, const __u16 *pfid)
701{ 700{
@@ -711,17 +710,21 @@ cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
711 pntsd = get_cifs_acl(cifs_sb, inode, path, &acllen); 710 pntsd = get_cifs_acl(cifs_sb, inode, path, &acllen);
712 711
713 /* if we can retrieve the ACL, now parse Access Control Entries, ACEs */ 712 /* if we can retrieve the ACL, now parse Access Control Entries, ACEs */
714 if (pntsd) 713 if (IS_ERR(pntsd)) {
714 rc = PTR_ERR(pntsd);
715 cERROR(1, "%s: error %d getting sec desc", __func__, rc);
716 } else {
715 rc = parse_sec_desc(pntsd, acllen, fattr); 717 rc = parse_sec_desc(pntsd, acllen, fattr);
716 if (rc) 718 kfree(pntsd);
717 cFYI(1, "parse sec desc failed rc = %d", rc); 719 if (rc)
720 cERROR(1, "parse sec desc failed rc = %d", rc);
721 }
718 722
719 kfree(pntsd); 723 return rc;
720 return;
721} 724}
722 725
723/* Convert mode bits to an ACL so we can update the ACL on the server */ 726/* Convert mode bits to an ACL so we can update the ACL on the server */
724int mode_to_acl(struct inode *inode, const char *path, __u64 nmode) 727int mode_to_cifs_acl(struct inode *inode, const char *path, __u64 nmode)
725{ 728{
726 int rc = 0; 729 int rc = 0;
727 __u32 secdesclen = 0; 730 __u32 secdesclen = 0;
@@ -736,7 +739,10 @@ int mode_to_acl(struct inode *inode, const char *path, __u64 nmode)
736 /* Add three ACEs for owner, group, everyone getting rid of 739 /* Add three ACEs for owner, group, everyone getting rid of
737 other ACEs as chmod disables ACEs and set the security descriptor */ 740 other ACEs as chmod disables ACEs and set the security descriptor */
738 741
739 if (pntsd) { 742 if (IS_ERR(pntsd)) {
743 rc = PTR_ERR(pntsd);
744 cERROR(1, "%s: error %d getting sec desc", __func__, rc);
745 } else {
740 /* allocate memory for the smb header, 746 /* allocate memory for the smb header,
741 set security descriptor request security descriptor 747 set security descriptor request security descriptor
742 parameters, and secuirty descriptor itself */ 748 parameters, and secuirty descriptor itself */
@@ -766,4 +772,3 @@ int mode_to_acl(struct inode *inode, const char *path, __u64 nmode)
766 772
767 return rc; 773 return rc;
768} 774}
769#endif /* CONFIG_CIFS_EXPERIMENTAL */
diff --git a/fs/cifs/cifsacl.h b/fs/cifs/cifsacl.h
index 6c8096cf515..c4ae7d03656 100644
--- a/fs/cifs/cifsacl.h
+++ b/fs/cifs/cifsacl.h
@@ -74,11 +74,7 @@ struct cifs_wksid {
74 char sidname[SIDNAMELENGTH]; 74 char sidname[SIDNAMELENGTH];
75} __attribute__((packed)); 75} __attribute__((packed));
76 76
77#ifdef CONFIG_CIFS_EXPERIMENTAL
78
79extern int match_sid(struct cifs_sid *); 77extern int match_sid(struct cifs_sid *);
80extern int compare_sids(const struct cifs_sid *, const struct cifs_sid *); 78extern int compare_sids(const struct cifs_sid *, const struct cifs_sid *);
81 79
82#endif /* CONFIG_CIFS_EXPERIMENTAL */
83
84#endif /* _CIFSACL_H */ 80#endif /* _CIFSACL_H */
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 9c3789762ab..3936aa7f2c2 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -458,9 +458,13 @@ cifs_show_options(struct seq_file *s, struct vfsmount *m)
458 seq_printf(s, ",acl"); 458 seq_printf(s, ",acl");
459 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) 459 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
460 seq_printf(s, ",mfsymlinks"); 460 seq_printf(s, ",mfsymlinks");
461 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
462 seq_printf(s, ",fsc");
461 463
462 seq_printf(s, ",rsize=%d", cifs_sb->rsize); 464 seq_printf(s, ",rsize=%d", cifs_sb->rsize);
463 seq_printf(s, ",wsize=%d", cifs_sb->wsize); 465 seq_printf(s, ",wsize=%d", cifs_sb->wsize);
466 /* convert actimeo and display it in seconds */
467 seq_printf(s, ",actimeo=%lu", cifs_sb->actimeo / HZ);
464 468
465 return 0; 469 return 0;
466} 470}
@@ -933,7 +937,6 @@ init_cifs(void)
933 GlobalCurrentXid = 0; 937 GlobalCurrentXid = 0;
934 GlobalTotalActiveXid = 0; 938 GlobalTotalActiveXid = 0;
935 GlobalMaxActiveXid = 0; 939 GlobalMaxActiveXid = 0;
936 memset(Local_System_Name, 0, 15);
937 spin_lock_init(&cifs_tcp_ses_lock); 940 spin_lock_init(&cifs_tcp_ses_lock);
938 spin_lock_init(&cifs_file_list_lock); 941 spin_lock_init(&cifs_file_list_lock);
939 spin_lock_init(&GlobalMid_Lock); 942 spin_lock_init(&GlobalMid_Lock);
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index b577bf0a1bb..7136c0c3e2f 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -45,6 +45,16 @@
45#define CIFS_MIN_RCV_POOL 4 45#define CIFS_MIN_RCV_POOL 4
46 46
47/* 47/*
48 * default attribute cache timeout (jiffies)
49 */
50#define CIFS_DEF_ACTIMEO (1 * HZ)
51
52/*
53 * max attribute cache timeout (jiffies) - 2^30
54 */
55#define CIFS_MAX_ACTIMEO (1 << 30)
56
57/*
48 * MAX_REQ is the maximum number of requests that WE will send 58 * MAX_REQ is the maximum number of requests that WE will send
49 * on one socket concurrently. It also matches the most common 59 * on one socket concurrently. It also matches the most common
50 * value of max multiplex returned by servers. We may 60 * value of max multiplex returned by servers. We may
@@ -746,8 +756,6 @@ GLOBAL_EXTERN unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */
746GLOBAL_EXTERN unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Sem */ 756GLOBAL_EXTERN unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Sem */
747GLOBAL_EXTERN spinlock_t GlobalMid_Lock; /* protects above & list operations */ 757GLOBAL_EXTERN spinlock_t GlobalMid_Lock; /* protects above & list operations */
748 /* on midQ entries */ 758 /* on midQ entries */
749GLOBAL_EXTERN char Local_System_Name[15];
750
751/* 759/*
752 * Global counters, updated atomically 760 * Global counters, updated atomically
753 */ 761 */
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 7ed69b6b5fe..e6d1481b16c 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -54,7 +54,8 @@ do { \
54 __func__, curr_xid, (int)rc); \ 54 __func__, curr_xid, (int)rc); \
55} while (0) 55} while (0)
56extern char *build_path_from_dentry(struct dentry *); 56extern char *build_path_from_dentry(struct dentry *);
57extern char *cifs_build_path_to_root(struct cifs_sb_info *cifs_sb); 57extern char *cifs_build_path_to_root(struct cifs_sb_info *cifs_sb,
58 struct cifsTconInfo *tcon);
58extern char *build_wildcard_path_from_dentry(struct dentry *direntry); 59extern char *build_wildcard_path_from_dentry(struct dentry *direntry);
59extern char *cifs_compose_mount_options(const char *sb_mountdata, 60extern char *cifs_compose_mount_options(const char *sb_mountdata,
60 const char *fullpath, const struct dfs_info3_param *ref, 61 const char *fullpath, const struct dfs_info3_param *ref,
@@ -79,9 +80,7 @@ extern bool is_valid_oplock_break(struct smb_hdr *smb,
79 struct TCP_Server_Info *); 80 struct TCP_Server_Info *);
80extern bool is_size_safe_to_change(struct cifsInodeInfo *, __u64 eof); 81extern bool is_size_safe_to_change(struct cifsInodeInfo *, __u64 eof);
81extern struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *, bool); 82extern struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *, bool);
82#ifdef CONFIG_CIFS_EXPERIMENTAL
83extern struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *, bool); 83extern struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *, bool);
84#endif
85extern unsigned int smbCalcSize(struct smb_hdr *ptr); 84extern unsigned int smbCalcSize(struct smb_hdr *ptr);
86extern unsigned int smbCalcSize_LE(struct smb_hdr *ptr); 85extern unsigned int smbCalcSize_LE(struct smb_hdr *ptr);
87extern int decode_negTokenInit(unsigned char *security_blob, int length, 86extern int decode_negTokenInit(unsigned char *security_blob, int length,
@@ -130,10 +129,12 @@ extern int cifs_get_file_info_unix(struct file *filp);
130extern int cifs_get_inode_info_unix(struct inode **pinode, 129extern int cifs_get_inode_info_unix(struct inode **pinode,
131 const unsigned char *search_path, 130 const unsigned char *search_path,
132 struct super_block *sb, int xid); 131 struct super_block *sb, int xid);
133extern void cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, 132extern int cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb,
134 struct cifs_fattr *fattr, struct inode *inode, 133 struct cifs_fattr *fattr, struct inode *inode,
135 const char *path, const __u16 *pfid); 134 const char *path, const __u16 *pfid);
136extern int mode_to_acl(struct inode *inode, const char *path, __u64); 135extern int mode_to_cifs_acl(struct inode *inode, const char *path, __u64);
136extern struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *, struct inode *,
137 const char *, u32 *);
137 138
138extern int cifs_mount(struct super_block *, struct cifs_sb_info *, char *, 139extern int cifs_mount(struct super_block *, struct cifs_sb_info *, char *,
139 const char *); 140 const char *);
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 2f2632b6df5..67acfb3acad 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -2478,95 +2478,6 @@ querySymLinkRetry:
2478} 2478}
2479 2479
2480#ifdef CONFIG_CIFS_EXPERIMENTAL 2480#ifdef CONFIG_CIFS_EXPERIMENTAL
2481/* Initialize NT TRANSACT SMB into small smb request buffer.
2482 This assumes that all NT TRANSACTS that we init here have
2483 total parm and data under about 400 bytes (to fit in small cifs
2484 buffer size), which is the case so far, it easily fits. NB:
2485 Setup words themselves and ByteCount
2486 MaxSetupCount (size of returned setup area) and
2487 MaxParameterCount (returned parms size) must be set by caller */
2488static int
2489smb_init_nttransact(const __u16 sub_command, const int setup_count,
2490 const int parm_len, struct cifsTconInfo *tcon,
2491 void **ret_buf)
2492{
2493 int rc;
2494 __u32 temp_offset;
2495 struct smb_com_ntransact_req *pSMB;
2496
2497 rc = small_smb_init(SMB_COM_NT_TRANSACT, 19 + setup_count, tcon,
2498 (void **)&pSMB);
2499 if (rc)
2500 return rc;
2501 *ret_buf = (void *)pSMB;
2502 pSMB->Reserved = 0;
2503 pSMB->TotalParameterCount = cpu_to_le32(parm_len);
2504 pSMB->TotalDataCount = 0;
2505 pSMB->MaxDataCount = cpu_to_le32((tcon->ses->server->maxBuf -
2506 MAX_CIFS_HDR_SIZE) & 0xFFFFFF00);
2507 pSMB->ParameterCount = pSMB->TotalParameterCount;
2508 pSMB->DataCount = pSMB->TotalDataCount;
2509 temp_offset = offsetof(struct smb_com_ntransact_req, Parms) +
2510 (setup_count * 2) - 4 /* for rfc1001 length itself */;
2511 pSMB->ParameterOffset = cpu_to_le32(temp_offset);
2512 pSMB->DataOffset = cpu_to_le32(temp_offset + parm_len);
2513 pSMB->SetupCount = setup_count; /* no need to le convert byte fields */
2514 pSMB->SubCommand = cpu_to_le16(sub_command);
2515 return 0;
2516}
2517
2518static int
2519validate_ntransact(char *buf, char **ppparm, char **ppdata,
2520 __u32 *pparmlen, __u32 *pdatalen)
2521{
2522 char *end_of_smb;
2523 __u32 data_count, data_offset, parm_count, parm_offset;
2524 struct smb_com_ntransact_rsp *pSMBr;
2525
2526 *pdatalen = 0;
2527 *pparmlen = 0;
2528
2529 if (buf == NULL)
2530 return -EINVAL;
2531
2532 pSMBr = (struct smb_com_ntransact_rsp *)buf;
2533
2534 /* ByteCount was converted from little endian in SendReceive */
2535 end_of_smb = 2 /* sizeof byte count */ + pSMBr->ByteCount +
2536 (char *)&pSMBr->ByteCount;
2537
2538 data_offset = le32_to_cpu(pSMBr->DataOffset);
2539 data_count = le32_to_cpu(pSMBr->DataCount);
2540 parm_offset = le32_to_cpu(pSMBr->ParameterOffset);
2541 parm_count = le32_to_cpu(pSMBr->ParameterCount);
2542
2543 *ppparm = (char *)&pSMBr->hdr.Protocol + parm_offset;
2544 *ppdata = (char *)&pSMBr->hdr.Protocol + data_offset;
2545
2546 /* should we also check that parm and data areas do not overlap? */
2547 if (*ppparm > end_of_smb) {
2548 cFYI(1, "parms start after end of smb");
2549 return -EINVAL;
2550 } else if (parm_count + *ppparm > end_of_smb) {
2551 cFYI(1, "parm end after end of smb");
2552 return -EINVAL;
2553 } else if (*ppdata > end_of_smb) {
2554 cFYI(1, "data starts after end of smb");
2555 return -EINVAL;
2556 } else if (data_count + *ppdata > end_of_smb) {
2557 cFYI(1, "data %p + count %d (%p) past smb end %p start %p",
2558 *ppdata, data_count, (data_count + *ppdata),
2559 end_of_smb, pSMBr);
2560 return -EINVAL;
2561 } else if (parm_count + data_count > pSMBr->ByteCount) {
2562 cFYI(1, "parm count and data count larger than SMB");
2563 return -EINVAL;
2564 }
2565 *pdatalen = data_count;
2566 *pparmlen = parm_count;
2567 return 0;
2568}
2569
2570int 2481int
2571CIFSSMBQueryReparseLinkInfo(const int xid, struct cifsTconInfo *tcon, 2482CIFSSMBQueryReparseLinkInfo(const int xid, struct cifsTconInfo *tcon,
2572 const unsigned char *searchName, 2483 const unsigned char *searchName,
@@ -3056,7 +2967,97 @@ GetExtAttrOut:
3056 2967
3057#endif /* CONFIG_POSIX */ 2968#endif /* CONFIG_POSIX */
3058 2969
3059#ifdef CONFIG_CIFS_EXPERIMENTAL 2970#ifdef CONFIG_CIFS_ACL
2971/*
2972 * Initialize NT TRANSACT SMB into small smb request buffer. This assumes that
2973 * all NT TRANSACTS that we init here have total parm and data under about 400
2974 * bytes (to fit in small cifs buffer size), which is the case so far, it
2975 * easily fits. NB: Setup words themselves and ByteCount MaxSetupCount (size of
2976 * returned setup area) and MaxParameterCount (returned parms size) must be set
2977 * by caller
2978 */
2979static int
2980smb_init_nttransact(const __u16 sub_command, const int setup_count,
2981 const int parm_len, struct cifsTconInfo *tcon,
2982 void **ret_buf)
2983{
2984 int rc;
2985 __u32 temp_offset;
2986 struct smb_com_ntransact_req *pSMB;
2987
2988 rc = small_smb_init(SMB_COM_NT_TRANSACT, 19 + setup_count, tcon,
2989 (void **)&pSMB);
2990 if (rc)
2991 return rc;
2992 *ret_buf = (void *)pSMB;
2993 pSMB->Reserved = 0;
2994 pSMB->TotalParameterCount = cpu_to_le32(parm_len);
2995 pSMB->TotalDataCount = 0;
2996 pSMB->MaxDataCount = cpu_to_le32((tcon->ses->server->maxBuf -
2997 MAX_CIFS_HDR_SIZE) & 0xFFFFFF00);
2998 pSMB->ParameterCount = pSMB->TotalParameterCount;
2999 pSMB->DataCount = pSMB->TotalDataCount;
3000 temp_offset = offsetof(struct smb_com_ntransact_req, Parms) +
3001 (setup_count * 2) - 4 /* for rfc1001 length itself */;
3002 pSMB->ParameterOffset = cpu_to_le32(temp_offset);
3003 pSMB->DataOffset = cpu_to_le32(temp_offset + parm_len);
3004 pSMB->SetupCount = setup_count; /* no need to le convert byte fields */
3005 pSMB->SubCommand = cpu_to_le16(sub_command);
3006 return 0;
3007}
3008
3009static int
3010validate_ntransact(char *buf, char **ppparm, char **ppdata,
3011 __u32 *pparmlen, __u32 *pdatalen)
3012{
3013 char *end_of_smb;
3014 __u32 data_count, data_offset, parm_count, parm_offset;
3015 struct smb_com_ntransact_rsp *pSMBr;
3016
3017 *pdatalen = 0;
3018 *pparmlen = 0;
3019
3020 if (buf == NULL)
3021 return -EINVAL;
3022
3023 pSMBr = (struct smb_com_ntransact_rsp *)buf;
3024
3025 /* ByteCount was converted from little endian in SendReceive */
3026 end_of_smb = 2 /* sizeof byte count */ + pSMBr->ByteCount +
3027 (char *)&pSMBr->ByteCount;
3028
3029 data_offset = le32_to_cpu(pSMBr->DataOffset);
3030 data_count = le32_to_cpu(pSMBr->DataCount);
3031 parm_offset = le32_to_cpu(pSMBr->ParameterOffset);
3032 parm_count = le32_to_cpu(pSMBr->ParameterCount);
3033
3034 *ppparm = (char *)&pSMBr->hdr.Protocol + parm_offset;
3035 *ppdata = (char *)&pSMBr->hdr.Protocol + data_offset;
3036
3037 /* should we also check that parm and data areas do not overlap? */
3038 if (*ppparm > end_of_smb) {
3039 cFYI(1, "parms start after end of smb");
3040 return -EINVAL;
3041 } else if (parm_count + *ppparm > end_of_smb) {
3042 cFYI(1, "parm end after end of smb");
3043 return -EINVAL;
3044 } else if (*ppdata > end_of_smb) {
3045 cFYI(1, "data starts after end of smb");
3046 return -EINVAL;
3047 } else if (data_count + *ppdata > end_of_smb) {
3048 cFYI(1, "data %p + count %d (%p) past smb end %p start %p",
3049 *ppdata, data_count, (data_count + *ppdata),
3050 end_of_smb, pSMBr);
3051 return -EINVAL;
3052 } else if (parm_count + data_count > pSMBr->ByteCount) {
3053 cFYI(1, "parm count and data count larger than SMB");
3054 return -EINVAL;
3055 }
3056 *pdatalen = data_count;
3057 *pparmlen = parm_count;
3058 return 0;
3059}
3060
3060/* Get Security Descriptor (by handle) from remote server for a file or dir */ 3061/* Get Security Descriptor (by handle) from remote server for a file or dir */
3061int 3062int
3062CIFSSMBGetCIFSACL(const int xid, struct cifsTconInfo *tcon, __u16 fid, 3063CIFSSMBGetCIFSACL(const int xid, struct cifsTconInfo *tcon, __u16 fid,
@@ -3214,7 +3215,7 @@ setCifsAclRetry:
3214 return (rc); 3215 return (rc);
3215} 3216}
3216 3217
3217#endif /* CONFIG_CIFS_EXPERIMENTAL */ 3218#endif /* CONFIG_CIFS_ACL */
3218 3219
3219/* Legacy Query Path Information call for lookup to old servers such 3220/* Legacy Query Path Information call for lookup to old servers such
3220 as Win9x/WinME */ 3221 as Win9x/WinME */
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 251a17c0354..cc1a8604a79 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -105,6 +105,7 @@ struct smb_vol {
105 unsigned int wsize; 105 unsigned int wsize;
106 bool sockopt_tcp_nodelay:1; 106 bool sockopt_tcp_nodelay:1;
107 unsigned short int port; 107 unsigned short int port;
108 unsigned long actimeo; /* attribute cache timeout (jiffies) */
108 char *prepath; 109 char *prepath;
109 struct sockaddr_storage srcaddr; /* allow binding to a local IP */ 110 struct sockaddr_storage srcaddr; /* allow binding to a local IP */
110 struct nls_table *local_nls; 111 struct nls_table *local_nls;
@@ -806,23 +807,20 @@ cifs_parse_mount_options(char *options, const char *devname,
806 short int override_gid = -1; 807 short int override_gid = -1;
807 bool uid_specified = false; 808 bool uid_specified = false;
808 bool gid_specified = false; 809 bool gid_specified = false;
810 char *nodename = utsname()->nodename;
809 811
810 separator[0] = ','; 812 separator[0] = ',';
811 separator[1] = 0; 813 separator[1] = 0;
812 814
813 if (Local_System_Name[0] != 0) 815 /*
814 memcpy(vol->source_rfc1001_name, Local_System_Name, 15); 816 * does not have to be perfect mapping since field is
815 else { 817 * informational, only used for servers that do not support
816 char *nodename = utsname()->nodename; 818 * port 445 and it can be overridden at mount time
817 int n = strnlen(nodename, 15); 819 */
818 memset(vol->source_rfc1001_name, 0x20, 15); 820 memset(vol->source_rfc1001_name, 0x20, 15);
819 for (i = 0; i < n; i++) { 821 for (i = 0; i < strnlen(nodename, 15); i++)
820 /* does not have to be perfect mapping since field is 822 vol->source_rfc1001_name[i] = toupper(nodename[i]);
821 informational, only used for servers that do not support 823
822 port 445 and it can be overridden at mount time */
823 vol->source_rfc1001_name[i] = toupper(nodename[i]);
824 }
825 }
826 vol->source_rfc1001_name[15] = 0; 824 vol->source_rfc1001_name[15] = 0;
827 /* null target name indicates to use *SMBSERVR default called name 825 /* null target name indicates to use *SMBSERVR default called name
828 if we end up sending RFC1001 session initialize */ 826 if we end up sending RFC1001 session initialize */
@@ -840,6 +838,8 @@ cifs_parse_mount_options(char *options, const char *devname,
840 /* default to using server inode numbers where available */ 838 /* default to using server inode numbers where available */
841 vol->server_ino = 1; 839 vol->server_ino = 1;
842 840
841 vol->actimeo = CIFS_DEF_ACTIMEO;
842
843 if (!options) 843 if (!options)
844 return 1; 844 return 1;
845 845
@@ -1214,6 +1214,16 @@ cifs_parse_mount_options(char *options, const char *devname,
1214 printk(KERN_WARNING "CIFS: server net" 1214 printk(KERN_WARNING "CIFS: server net"
1215 "biosname longer than 15 truncated.\n"); 1215 "biosname longer than 15 truncated.\n");
1216 } 1216 }
1217 } else if (strnicmp(data, "actimeo", 7) == 0) {
1218 if (value && *value) {
1219 vol->actimeo = HZ * simple_strtoul(value,
1220 &value, 0);
1221 if (vol->actimeo > CIFS_MAX_ACTIMEO) {
1222 cERROR(1, "CIFS: attribute cache"
1223 "timeout too large");
1224 return 1;
1225 }
1226 }
1217 } else if (strnicmp(data, "credentials", 4) == 0) { 1227 } else if (strnicmp(data, "credentials", 4) == 0) {
1218 /* ignore */ 1228 /* ignore */
1219 } else if (strnicmp(data, "version", 3) == 0) { 1229 } else if (strnicmp(data, "version", 3) == 0) {
@@ -1352,6 +1362,11 @@ cifs_parse_mount_options(char *options, const char *devname,
1352 "supported. Instead set " 1362 "supported. Instead set "
1353 "/proc/fs/cifs/LookupCacheEnabled to 0\n"); 1363 "/proc/fs/cifs/LookupCacheEnabled to 0\n");
1354 } else if (strnicmp(data, "fsc", 3) == 0) { 1364 } else if (strnicmp(data, "fsc", 3) == 0) {
1365#ifndef CONFIG_CIFS_FSCACHE
1366 cERROR(1, "FS-Cache support needs CONFIG_CIFS_FSCACHE"
1367 "kernel config option set");
1368 return 1;
1369#endif
1355 vol->fsc = true; 1370 vol->fsc = true;
1356 } else if (strnicmp(data, "mfsymlinks", 10) == 0) { 1371 } else if (strnicmp(data, "mfsymlinks", 10) == 0) {
1357 vol->mfsymlinks = true; 1372 vol->mfsymlinks = true;
@@ -2566,6 +2581,8 @@ static void setup_cifs_sb(struct smb_vol *pvolume_info,
2566 cFYI(1, "file mode: 0x%x dir mode: 0x%x", 2581 cFYI(1, "file mode: 0x%x dir mode: 0x%x",
2567 cifs_sb->mnt_file_mode, cifs_sb->mnt_dir_mode); 2582 cifs_sb->mnt_file_mode, cifs_sb->mnt_dir_mode);
2568 2583
2584 cifs_sb->actimeo = pvolume_info->actimeo;
2585
2569 if (pvolume_info->noperm) 2586 if (pvolume_info->noperm)
2570 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_PERM; 2587 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_PERM;
2571 if (pvolume_info->setuids) 2588 if (pvolume_info->setuids)
@@ -2816,13 +2833,13 @@ remote_path_check:
2816 /* check if a whole path (including prepath) is not remote */ 2833 /* check if a whole path (including prepath) is not remote */
2817 if (!rc && cifs_sb->prepathlen && tcon) { 2834 if (!rc && cifs_sb->prepathlen && tcon) {
2818 /* build_path_to_root works only when we have a valid tcon */ 2835 /* build_path_to_root works only when we have a valid tcon */
2819 full_path = cifs_build_path_to_root(cifs_sb); 2836 full_path = cifs_build_path_to_root(cifs_sb, tcon);
2820 if (full_path == NULL) { 2837 if (full_path == NULL) {
2821 rc = -ENOMEM; 2838 rc = -ENOMEM;
2822 goto mount_fail_check; 2839 goto mount_fail_check;
2823 } 2840 }
2824 rc = is_path_accessible(xid, tcon, cifs_sb, full_path); 2841 rc = is_path_accessible(xid, tcon, cifs_sb, full_path);
2825 if (rc != -EREMOTE) { 2842 if (rc != 0 && rc != -EREMOTE) {
2826 kfree(full_path); 2843 kfree(full_path);
2827 goto mount_fail_check; 2844 goto mount_fail_check;
2828 } 2845 }
diff --git a/fs/cifs/dns_resolve.c b/fs/cifs/dns_resolve.c
index 0eb87026cad..548f06230a6 100644
--- a/fs/cifs/dns_resolve.c
+++ b/fs/cifs/dns_resolve.c
@@ -66,7 +66,7 @@ dns_resolve_server_name_to_ip(const char *unc, char **ip_addr)
66 /* Search for server name delimiter */ 66 /* Search for server name delimiter */
67 sep = memchr(hostname, '\\', len); 67 sep = memchr(hostname, '\\', len);
68 if (sep) 68 if (sep)
69 len = sep - unc; 69 len = sep - hostname;
70 else 70 else
71 cFYI(1, "%s: probably server name is whole unc: %s", 71 cFYI(1, "%s: probably server name is whole unc: %s",
72 __func__, unc); 72 __func__, unc);
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 06c3e83fa38..5a28660ca2b 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1108,7 +1108,6 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file,
1108 return total_written; 1108 return total_written;
1109} 1109}
1110 1110
1111#ifdef CONFIG_CIFS_EXPERIMENTAL
1112struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode, 1111struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1113 bool fsuid_only) 1112 bool fsuid_only)
1114{ 1113{
@@ -1142,7 +1141,6 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1142 spin_unlock(&cifs_file_list_lock); 1141 spin_unlock(&cifs_file_list_lock);
1143 return NULL; 1142 return NULL;
1144} 1143}
1145#endif
1146 1144
1147struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode, 1145struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1148 bool fsuid_only) 1146 bool fsuid_only)
@@ -2271,8 +2269,10 @@ void cifs_oplock_break_get(struct cifsFileInfo *cfile)
2271 2269
2272void cifs_oplock_break_put(struct cifsFileInfo *cfile) 2270void cifs_oplock_break_put(struct cifsFileInfo *cfile)
2273{ 2271{
2272 struct super_block *sb = cfile->dentry->d_sb;
2273
2274 cifsFileInfo_put(cfile); 2274 cifsFileInfo_put(cfile);
2275 cifs_sb_deactive(cfile->dentry->d_sb); 2275 cifs_sb_deactive(sb);
2276} 2276}
2277 2277
2278const struct address_space_operations cifs_addr_ops = { 2278const struct address_space_operations cifs_addr_ops = {
diff --git a/fs/cifs/fscache.c b/fs/cifs/fscache.c
index a2ad94efcfe..297a43d0ff7 100644
--- a/fs/cifs/fscache.c
+++ b/fs/cifs/fscache.c
@@ -2,7 +2,7 @@
2 * fs/cifs/fscache.c - CIFS filesystem cache interface 2 * fs/cifs/fscache.c - CIFS filesystem cache interface
3 * 3 *
4 * Copyright (c) 2010 Novell, Inc. 4 * Copyright (c) 2010 Novell, Inc.
5 * Author(s): Suresh Jayaraman (sjayaraman@suse.de> 5 * Author(s): Suresh Jayaraman <sjayaraman@suse.de>
6 * 6 *
7 * This library is free software; you can redistribute it and/or modify 7 * This library is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU Lesser General Public License as published 8 * it under the terms of the GNU Lesser General Public License as published
@@ -67,10 +67,12 @@ static void cifs_fscache_enable_inode_cookie(struct inode *inode)
67 if (cifsi->fscache) 67 if (cifsi->fscache)
68 return; 68 return;
69 69
70 cifsi->fscache = fscache_acquire_cookie(tcon->fscache, 70 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE) {
71 cifsi->fscache = fscache_acquire_cookie(tcon->fscache,
71 &cifs_fscache_inode_object_def, cifsi); 72 &cifs_fscache_inode_object_def, cifsi);
72 cFYI(1, "CIFS: got FH cookie (0x%p/0x%p)", tcon->fscache, 73 cFYI(1, "CIFS: got FH cookie (0x%p/0x%p)", tcon->fscache,
73 cifsi->fscache); 74 cifsi->fscache);
75 }
74} 76}
75 77
76void cifs_fscache_release_inode_cookie(struct inode *inode) 78void cifs_fscache_release_inode_cookie(struct inode *inode)
@@ -101,10 +103,8 @@ void cifs_fscache_set_inode_cookie(struct inode *inode, struct file *filp)
101{ 103{
102 if ((filp->f_flags & O_ACCMODE) != O_RDONLY) 104 if ((filp->f_flags & O_ACCMODE) != O_RDONLY)
103 cifs_fscache_disable_inode_cookie(inode); 105 cifs_fscache_disable_inode_cookie(inode);
104 else { 106 else
105 cifs_fscache_enable_inode_cookie(inode); 107 cifs_fscache_enable_inode_cookie(inode);
106 cFYI(1, "CIFS: fscache inode cookie set");
107 }
108} 108}
109 109
110void cifs_fscache_reset_inode_cookie(struct inode *inode) 110void cifs_fscache_reset_inode_cookie(struct inode *inode)
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index ef3a55bf86b..589f3e3f6e0 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -686,13 +686,18 @@ int cifs_get_inode_info(struct inode **pinode,
686 cFYI(1, "cifs_sfu_type failed: %d", tmprc); 686 cFYI(1, "cifs_sfu_type failed: %d", tmprc);
687 } 687 }
688 688
689#ifdef CONFIG_CIFS_EXPERIMENTAL 689#ifdef CONFIG_CIFS_ACL
690 /* fill in 0777 bits from ACL */ 690 /* fill in 0777 bits from ACL */
691 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) { 691 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) {
692 cFYI(1, "Getting mode bits from ACL"); 692 rc = cifs_acl_to_fattr(cifs_sb, &fattr, *pinode, full_path,
693 cifs_acl_to_fattr(cifs_sb, &fattr, *pinode, full_path, pfid); 693 pfid);
694 if (rc) {
695 cFYI(1, "%s: Getting ACL failed with error: %d",
696 __func__, rc);
697 goto cgii_exit;
698 }
694 } 699 }
695#endif 700#endif /* CONFIG_CIFS_ACL */
696 701
697 /* fill in remaining high mode bits e.g. SUID, VTX */ 702 /* fill in remaining high mode bits e.g. SUID, VTX */
698 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) 703 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
@@ -723,12 +728,12 @@ static const struct inode_operations cifs_ipc_inode_ops = {
723 .lookup = cifs_lookup, 728 .lookup = cifs_lookup,
724}; 729};
725 730
726char *cifs_build_path_to_root(struct cifs_sb_info *cifs_sb) 731char *cifs_build_path_to_root(struct cifs_sb_info *cifs_sb,
732 struct cifsTconInfo *tcon)
727{ 733{
728 int pplen = cifs_sb->prepathlen; 734 int pplen = cifs_sb->prepathlen;
729 int dfsplen; 735 int dfsplen;
730 char *full_path = NULL; 736 char *full_path = NULL;
731 struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb);
732 737
733 /* if no prefix path, simply set path to the root of share to "" */ 738 /* if no prefix path, simply set path to the root of share to "" */
734 if (pplen == 0) { 739 if (pplen == 0) {
@@ -870,7 +875,7 @@ struct inode *cifs_root_iget(struct super_block *sb, unsigned long ino)
870 char *full_path; 875 char *full_path;
871 struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb); 876 struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb);
872 877
873 full_path = cifs_build_path_to_root(cifs_sb); 878 full_path = cifs_build_path_to_root(cifs_sb, tcon);
874 if (full_path == NULL) 879 if (full_path == NULL)
875 return ERR_PTR(-ENOMEM); 880 return ERR_PTR(-ENOMEM);
876 881
@@ -881,8 +886,10 @@ struct inode *cifs_root_iget(struct super_block *sb, unsigned long ino)
881 rc = cifs_get_inode_info(&inode, full_path, NULL, sb, 886 rc = cifs_get_inode_info(&inode, full_path, NULL, sb,
882 xid, NULL); 887 xid, NULL);
883 888
884 if (!inode) 889 if (!inode) {
885 return ERR_PTR(rc); 890 inode = ERR_PTR(rc);
891 goto out;
892 }
886 893
887#ifdef CONFIG_CIFS_FSCACHE 894#ifdef CONFIG_CIFS_FSCACHE
888 /* populate tcon->resource_id */ 895 /* populate tcon->resource_id */
@@ -898,13 +905,11 @@ struct inode *cifs_root_iget(struct super_block *sb, unsigned long ino)
898 inode->i_uid = cifs_sb->mnt_uid; 905 inode->i_uid = cifs_sb->mnt_uid;
899 inode->i_gid = cifs_sb->mnt_gid; 906 inode->i_gid = cifs_sb->mnt_gid;
900 } else if (rc) { 907 } else if (rc) {
901 kfree(full_path);
902 _FreeXid(xid);
903 iget_failed(inode); 908 iget_failed(inode);
904 return ERR_PTR(rc); 909 inode = ERR_PTR(rc);
905 } 910 }
906 911
907 912out:
908 kfree(full_path); 913 kfree(full_path);
909 /* can not call macro FreeXid here since in a void func 914 /* can not call macro FreeXid here since in a void func
910 * TODO: This is no longer true 915 * TODO: This is no longer true
@@ -1648,6 +1653,7 @@ static bool
1648cifs_inode_needs_reval(struct inode *inode) 1653cifs_inode_needs_reval(struct inode *inode)
1649{ 1654{
1650 struct cifsInodeInfo *cifs_i = CIFS_I(inode); 1655 struct cifsInodeInfo *cifs_i = CIFS_I(inode);
1656 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1651 1657
1652 if (cifs_i->clientCanCacheRead) 1658 if (cifs_i->clientCanCacheRead)
1653 return false; 1659 return false;
@@ -1658,19 +1664,21 @@ cifs_inode_needs_reval(struct inode *inode)
1658 if (cifs_i->time == 0) 1664 if (cifs_i->time == 0)
1659 return true; 1665 return true;
1660 1666
1661 /* FIXME: the actimeo should be tunable */ 1667 if (!time_in_range(jiffies, cifs_i->time,
1662 if (time_after_eq(jiffies, cifs_i->time + HZ)) 1668 cifs_i->time + cifs_sb->actimeo))
1663 return true; 1669 return true;
1664 1670
1665 /* hardlinked files w/ noserverino get "special" treatment */ 1671 /* hardlinked files w/ noserverino get "special" treatment */
1666 if (!(CIFS_SB(inode->i_sb)->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) && 1672 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) &&
1667 S_ISREG(inode->i_mode) && inode->i_nlink != 1) 1673 S_ISREG(inode->i_mode) && inode->i_nlink != 1)
1668 return true; 1674 return true;
1669 1675
1670 return false; 1676 return false;
1671} 1677}
1672 1678
1673/* check invalid_mapping flag and zap the cache if it's set */ 1679/*
1680 * Zap the cache. Called when invalid_mapping flag is set.
1681 */
1674static void 1682static void
1675cifs_invalidate_mapping(struct inode *inode) 1683cifs_invalidate_mapping(struct inode *inode)
1676{ 1684{
@@ -2114,11 +2122,16 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
2114 2122
2115 if (attrs->ia_valid & ATTR_MODE) { 2123 if (attrs->ia_valid & ATTR_MODE) {
2116 rc = 0; 2124 rc = 0;
2117#ifdef CONFIG_CIFS_EXPERIMENTAL 2125#ifdef CONFIG_CIFS_ACL
2118 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) 2126 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) {
2119 rc = mode_to_acl(inode, full_path, mode); 2127 rc = mode_to_cifs_acl(inode, full_path, mode);
2120 else 2128 if (rc) {
2121#endif 2129 cFYI(1, "%s: Setting ACL failed with error: %d",
2130 __func__, rc);
2131 goto cifs_setattr_exit;
2132 }
2133 } else
2134#endif /* CONFIG_CIFS_ACL */
2122 if (((mode & S_IWUGO) == 0) && 2135 if (((mode & S_IWUGO) == 0) &&
2123 (cifsInode->cifsAttrs & ATTR_READONLY) == 0) { 2136 (cifsInode->cifsAttrs & ATTR_READONLY) == 0) {
2124 2137
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index ef7bb7b50f5..a73eb9f4bda 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -226,26 +226,29 @@ static int initiate_cifs_search(const int xid, struct file *file)
226 char *full_path = NULL; 226 char *full_path = NULL;
227 struct cifsFileInfo *cifsFile; 227 struct cifsFileInfo *cifsFile;
228 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); 228 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
229 struct tcon_link *tlink; 229 struct tcon_link *tlink = NULL;
230 struct cifsTconInfo *pTcon; 230 struct cifsTconInfo *pTcon;
231 231
232 tlink = cifs_sb_tlink(cifs_sb);
233 if (IS_ERR(tlink))
234 return PTR_ERR(tlink);
235 pTcon = tlink_tcon(tlink);
236
237 if (file->private_data == NULL)
238 file->private_data =
239 kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
240 if (file->private_data == NULL) { 232 if (file->private_data == NULL) {
241 rc = -ENOMEM; 233 tlink = cifs_sb_tlink(cifs_sb);
242 goto error_exit; 234 if (IS_ERR(tlink))
235 return PTR_ERR(tlink);
236
237 cifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
238 if (cifsFile == NULL) {
239 rc = -ENOMEM;
240 goto error_exit;
241 }
242 file->private_data = cifsFile;
243 cifsFile->tlink = cifs_get_tlink(tlink);
244 pTcon = tlink_tcon(tlink);
245 } else {
246 cifsFile = file->private_data;
247 pTcon = tlink_tcon(cifsFile->tlink);
243 } 248 }
244 249
245 cifsFile = file->private_data;
246 cifsFile->invalidHandle = true; 250 cifsFile->invalidHandle = true;
247 cifsFile->srch_inf.endOfSearch = false; 251 cifsFile->srch_inf.endOfSearch = false;
248 cifsFile->tlink = cifs_get_tlink(tlink);
249 252
250 full_path = build_path_from_dentry(file->f_path.dentry); 253 full_path = build_path_from_dentry(file->f_path.dentry);
251 if (full_path == NULL) { 254 if (full_path == NULL) {
@@ -756,18 +759,6 @@ static int cifs_filldir(char *pfindEntry, struct file *file, filldir_t filldir,
756 rc = filldir(direntry, qstring.name, qstring.len, file->f_pos, 759 rc = filldir(direntry, qstring.name, qstring.len, file->f_pos,
757 ino, fattr.cf_dtype); 760 ino, fattr.cf_dtype);
758 761
759 /*
760 * we can not return filldir errors to the caller since they are
761 * "normal" when the stat blocksize is too small - we return remapped
762 * error instead
763 *
764 * FIXME: This looks bogus. filldir returns -EOVERFLOW in the above
765 * case already. Why should we be clobbering other errors from it?
766 */
767 if (rc) {
768 cFYI(1, "filldir rc = %d", rc);
769 rc = -EOVERFLOW;
770 }
771 dput(tmp_dentry); 762 dput(tmp_dentry);
772 return rc; 763 return rc;
773} 764}
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index a264b744bb4..eae2a149160 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -30,10 +30,11 @@
30 30
31#define MAX_EA_VALUE_SIZE 65535 31#define MAX_EA_VALUE_SIZE 65535
32#define CIFS_XATTR_DOS_ATTRIB "user.DosAttrib" 32#define CIFS_XATTR_DOS_ATTRIB "user.DosAttrib"
33#define CIFS_XATTR_CIFS_ACL "system.cifs_acl"
33#define CIFS_XATTR_USER_PREFIX "user." 34#define CIFS_XATTR_USER_PREFIX "user."
34#define CIFS_XATTR_SYSTEM_PREFIX "system." 35#define CIFS_XATTR_SYSTEM_PREFIX "system."
35#define CIFS_XATTR_OS2_PREFIX "os2." 36#define CIFS_XATTR_OS2_PREFIX "os2."
36#define CIFS_XATTR_SECURITY_PREFIX ".security" 37#define CIFS_XATTR_SECURITY_PREFIX "security."
37#define CIFS_XATTR_TRUSTED_PREFIX "trusted." 38#define CIFS_XATTR_TRUSTED_PREFIX "trusted."
38#define XATTR_TRUSTED_PREFIX_LEN 8 39#define XATTR_TRUSTED_PREFIX_LEN 8
39#define XATTR_SECURITY_PREFIX_LEN 9 40#define XATTR_SECURITY_PREFIX_LEN 9
@@ -277,29 +278,8 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
277 cifs_sb->local_nls, 278 cifs_sb->local_nls,
278 cifs_sb->mnt_cifs_flags & 279 cifs_sb->mnt_cifs_flags &
279 CIFS_MOUNT_MAP_SPECIAL_CHR); 280 CIFS_MOUNT_MAP_SPECIAL_CHR);
280#ifdef CONFIG_CIFS_EXPERIMENTAL
281 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) {
282 __u16 fid;
283 int oplock = 0;
284 struct cifs_ntsd *pacl = NULL;
285 __u32 buflen = 0;
286 if (experimEnabled)
287 rc = CIFSSMBOpen(xid, pTcon, full_path,
288 FILE_OPEN, GENERIC_READ, 0, &fid,
289 &oplock, NULL, cifs_sb->local_nls,
290 cifs_sb->mnt_cifs_flags &
291 CIFS_MOUNT_MAP_SPECIAL_CHR);
292 /* else rc is EOPNOTSUPP from above */
293
294 if (rc == 0) {
295 rc = CIFSSMBGetCIFSACL(xid, pTcon, fid, &pacl,
296 &buflen);
297 CIFSSMBClose(xid, pTcon, fid);
298 }
299 }
300#endif /* EXPERIMENTAL */
301#else 281#else
302 cFYI(1, "query POSIX ACL not supported yet"); 282 cFYI(1, "Query POSIX ACL not supported yet");
303#endif /* CONFIG_CIFS_POSIX */ 283#endif /* CONFIG_CIFS_POSIX */
304 } else if (strncmp(ea_name, POSIX_ACL_XATTR_DEFAULT, 284 } else if (strncmp(ea_name, POSIX_ACL_XATTR_DEFAULT,
305 strlen(POSIX_ACL_XATTR_DEFAULT)) == 0) { 285 strlen(POSIX_ACL_XATTR_DEFAULT)) == 0) {
@@ -311,8 +291,33 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
311 cifs_sb->mnt_cifs_flags & 291 cifs_sb->mnt_cifs_flags &
312 CIFS_MOUNT_MAP_SPECIAL_CHR); 292 CIFS_MOUNT_MAP_SPECIAL_CHR);
313#else 293#else
314 cFYI(1, "query POSIX default ACL not supported yet"); 294 cFYI(1, "Query POSIX default ACL not supported yet");
315#endif 295#endif /* CONFIG_CIFS_POSIX */
296 } else if (strncmp(ea_name, CIFS_XATTR_CIFS_ACL,
297 strlen(CIFS_XATTR_CIFS_ACL)) == 0) {
298#ifdef CONFIG_CIFS_ACL
299 u32 acllen;
300 struct cifs_ntsd *pacl;
301
302 pacl = get_cifs_acl(cifs_sb, direntry->d_inode,
303 full_path, &acllen);
304 if (IS_ERR(pacl)) {
305 rc = PTR_ERR(pacl);
306 cERROR(1, "%s: error %zd getting sec desc",
307 __func__, rc);
308 } else {
309 if (ea_value) {
310 if (acllen > buf_size)
311 acllen = -ERANGE;
312 else
313 memcpy(ea_value, pacl, acllen);
314 }
315 rc = acllen;
316 kfree(pacl);
317 }
318#else
319 cFYI(1, "Query CIFS ACL not supported yet");
320#endif /* CONFIG_CIFS_ACL */
316 } else if (strncmp(ea_name, 321 } else if (strncmp(ea_name,
317 CIFS_XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) == 0) { 322 CIFS_XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) == 0) {
318 cFYI(1, "Trusted xattr namespace not supported yet"); 323 cFYI(1, "Trusted xattr namespace not supported yet");
diff --git a/fs/compat.c b/fs/compat.c
index c580c322fa6..eb1740ac8c0 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -1350,6 +1350,10 @@ static int compat_count(compat_uptr_t __user *argv, int max)
1350 argv++; 1350 argv++;
1351 if (i++ >= max) 1351 if (i++ >= max)
1352 return -E2BIG; 1352 return -E2BIG;
1353
1354 if (fatal_signal_pending(current))
1355 return -ERESTARTNOHAND;
1356 cond_resched();
1353 } 1357 }
1354 } 1358 }
1355 return i; 1359 return i;
@@ -1391,6 +1395,12 @@ static int compat_copy_strings(int argc, compat_uptr_t __user *argv,
1391 while (len > 0) { 1395 while (len > 0) {
1392 int offset, bytes_to_copy; 1396 int offset, bytes_to_copy;
1393 1397
1398 if (fatal_signal_pending(current)) {
1399 ret = -ERESTARTNOHAND;
1400 goto out;
1401 }
1402 cond_resched();
1403
1394 offset = pos % PAGE_SIZE; 1404 offset = pos % PAGE_SIZE;
1395 if (offset == 0) 1405 if (offset == 0)
1396 offset = PAGE_SIZE; 1406 offset = PAGE_SIZE;
@@ -1407,18 +1417,8 @@ static int compat_copy_strings(int argc, compat_uptr_t __user *argv,
1407 if (!kmapped_page || kpos != (pos & PAGE_MASK)) { 1417 if (!kmapped_page || kpos != (pos & PAGE_MASK)) {
1408 struct page *page; 1418 struct page *page;
1409 1419
1410#ifdef CONFIG_STACK_GROWSUP 1420 page = get_arg_page(bprm, pos, 1);
1411 ret = expand_stack_downwards(bprm->vma, pos); 1421 if (!page) {
1412 if (ret < 0) {
1413 /* We've exceed the stack rlimit. */
1414 ret = -E2BIG;
1415 goto out;
1416 }
1417#endif
1418 ret = get_user_pages(current, bprm->mm, pos,
1419 1, 1, 1, &page, NULL);
1420 if (ret <= 0) {
1421 /* We've exceed the stack rlimit. */
1422 ret = -E2BIG; 1422 ret = -E2BIG;
1423 goto out; 1423 goto out;
1424 } 1424 }
@@ -1539,8 +1539,10 @@ int compat_do_execve(char * filename,
1539 return retval; 1539 return retval;
1540 1540
1541out: 1541out:
1542 if (bprm->mm) 1542 if (bprm->mm) {
1543 acct_arg_size(bprm, 0);
1543 mmput(bprm->mm); 1544 mmput(bprm->mm);
1545 }
1544 1546
1545out_file: 1547out_file:
1546 if (bprm->file) { 1548 if (bprm->file) {
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index 410ed188faa..a60579b007b 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -19,7 +19,6 @@
19#include <linux/compiler.h> 19#include <linux/compiler.h>
20#include <linux/sched.h> 20#include <linux/sched.h>
21#include <linux/smp.h> 21#include <linux/smp.h>
22#include <linux/smp_lock.h>
23#include <linux/ioctl.h> 22#include <linux/ioctl.h>
24#include <linux/if.h> 23#include <linux/if.h>
25#include <linux/if_bridge.h> 24#include <linux/if_bridge.h>
diff --git a/fs/ecryptfs/super.c b/fs/ecryptfs/super.c
index 253732382d3..2720178b771 100644
--- a/fs/ecryptfs/super.c
+++ b/fs/ecryptfs/super.c
@@ -28,7 +28,6 @@
28#include <linux/key.h> 28#include <linux/key.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <linux/seq_file.h> 30#include <linux/seq_file.h>
31#include <linux/smp_lock.h>
32#include <linux/file.h> 31#include <linux/file.h>
33#include <linux/crypto.h> 32#include <linux/crypto.h>
34#include "ecryptfs_kernel.h" 33#include "ecryptfs_kernel.h"
diff --git a/fs/exec.c b/fs/exec.c
index 99d33a1371e..c62efcb959c 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -164,7 +164,26 @@ out:
164 164
165#ifdef CONFIG_MMU 165#ifdef CONFIG_MMU
166 166
167static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, 167void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
168{
169 struct mm_struct *mm = current->mm;
170 long diff = (long)(pages - bprm->vma_pages);
171
172 if (!mm || !diff)
173 return;
174
175 bprm->vma_pages = pages;
176
177#ifdef SPLIT_RSS_COUNTING
178 add_mm_counter(mm, MM_ANONPAGES, diff);
179#else
180 spin_lock(&mm->page_table_lock);
181 add_mm_counter(mm, MM_ANONPAGES, diff);
182 spin_unlock(&mm->page_table_lock);
183#endif
184}
185
186struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
168 int write) 187 int write)
169{ 188{
170 struct page *page; 189 struct page *page;
@@ -186,6 +205,8 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
186 unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start; 205 unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
187 struct rlimit *rlim; 206 struct rlimit *rlim;
188 207
208 acct_arg_size(bprm, size / PAGE_SIZE);
209
189 /* 210 /*
190 * We've historically supported up to 32 pages (ARG_MAX) 211 * We've historically supported up to 32 pages (ARG_MAX)
191 * of argument strings even with small stacks 212 * of argument strings even with small stacks
@@ -254,6 +275,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
254 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP; 275 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
255 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 276 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
256 INIT_LIST_HEAD(&vma->anon_vma_chain); 277 INIT_LIST_HEAD(&vma->anon_vma_chain);
278
279 err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
280 if (err)
281 goto err;
282
257 err = insert_vm_struct(mm, vma); 283 err = insert_vm_struct(mm, vma);
258 if (err) 284 if (err)
259 goto err; 285 goto err;
@@ -276,7 +302,11 @@ static bool valid_arg_len(struct linux_binprm *bprm, long len)
276 302
277#else 303#else
278 304
279static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, 305void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
306{
307}
308
309struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
280 int write) 310 int write)
281{ 311{
282 struct page *page; 312 struct page *page;
@@ -1003,6 +1033,7 @@ int flush_old_exec(struct linux_binprm * bprm)
1003 /* 1033 /*
1004 * Release all of the old mmap stuff 1034 * Release all of the old mmap stuff
1005 */ 1035 */
1036 acct_arg_size(bprm, 0);
1006 retval = exec_mmap(bprm->mm); 1037 retval = exec_mmap(bprm->mm);
1007 if (retval) 1038 if (retval)
1008 goto out; 1039 goto out;
@@ -1426,8 +1457,10 @@ int do_execve(const char * filename,
1426 return retval; 1457 return retval;
1427 1458
1428out: 1459out:
1429 if (bprm->mm) 1460 if (bprm->mm) {
1430 mmput (bprm->mm); 1461 acct_arg_size(bprm, 0);
1462 mmput(bprm->mm);
1463 }
1431 1464
1432out_file: 1465out_file:
1433 if (bprm->file) { 1466 if (bprm->file) {
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 2fedaf8b501..acf8695fa8f 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -27,7 +27,6 @@
27#include <linux/init.h> 27#include <linux/init.h>
28#include <linux/blkdev.h> 28#include <linux/blkdev.h>
29#include <linux/parser.h> 29#include <linux/parser.h>
30#include <linux/smp_lock.h>
31#include <linux/buffer_head.h> 30#include <linux/buffer_head.h>
32#include <linux/exportfs.h> 31#include <linux/exportfs.h>
33#include <linux/vfs.h> 32#include <linux/vfs.h>
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 6a5edea2d70..94ce3d7a1c4 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -910,6 +910,7 @@ struct ext4_inode_info {
910#define EXT4_MOUNT_JOURNAL_CHECKSUM 0x800000 /* Journal checksums */ 910#define EXT4_MOUNT_JOURNAL_CHECKSUM 0x800000 /* Journal checksums */
911#define EXT4_MOUNT_JOURNAL_ASYNC_COMMIT 0x1000000 /* Journal Async Commit */ 911#define EXT4_MOUNT_JOURNAL_ASYNC_COMMIT 0x1000000 /* Journal Async Commit */
912#define EXT4_MOUNT_I_VERSION 0x2000000 /* i_version support */ 912#define EXT4_MOUNT_I_VERSION 0x2000000 /* i_version support */
913#define EXT4_MOUNT_MBLK_IO_SUBMIT 0x4000000 /* multi-block io submits */
913#define EXT4_MOUNT_DELALLOC 0x8000000 /* Delalloc support */ 914#define EXT4_MOUNT_DELALLOC 0x8000000 /* Delalloc support */
914#define EXT4_MOUNT_DATA_ERR_ABORT 0x10000000 /* Abort on file data write */ 915#define EXT4_MOUNT_DATA_ERR_ABORT 0x10000000 /* Abort on file data write */
915#define EXT4_MOUNT_BLOCK_VALIDITY 0x20000000 /* Block validity checking */ 916#define EXT4_MOUNT_BLOCK_VALIDITY 0x20000000 /* Block validity checking */
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index bdbe6990220..e659597b690 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -2125,9 +2125,12 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
2125 */ 2125 */
2126 if (unlikely(journal_data && PageChecked(page))) 2126 if (unlikely(journal_data && PageChecked(page)))
2127 err = __ext4_journalled_writepage(page, len); 2127 err = __ext4_journalled_writepage(page, len);
2128 else 2128 else if (test_opt(inode->i_sb, MBLK_IO_SUBMIT))
2129 err = ext4_bio_write_page(&io_submit, page, 2129 err = ext4_bio_write_page(&io_submit, page,
2130 len, mpd->wbc); 2130 len, mpd->wbc);
2131 else
2132 err = block_write_full_page(page,
2133 noalloc_get_block_write, mpd->wbc);
2131 2134
2132 if (!err) 2135 if (!err)
2133 mpd->pages_written++; 2136 mpd->pages_written++;
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index bf5ae883b1b..eb3bc2fe647 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -331,6 +331,30 @@ mext_out:
331 return err; 331 return err;
332 } 332 }
333 333
334 case FITRIM:
335 {
336 struct super_block *sb = inode->i_sb;
337 struct fstrim_range range;
338 int ret = 0;
339
340 if (!capable(CAP_SYS_ADMIN))
341 return -EPERM;
342
343 if (copy_from_user(&range, (struct fstrim_range *)arg,
344 sizeof(range)))
345 return -EFAULT;
346
347 ret = ext4_trim_fs(sb, &range);
348 if (ret < 0)
349 return ret;
350
351 if (copy_to_user((struct fstrim_range *)arg, &range,
352 sizeof(range)))
353 return -EFAULT;
354
355 return 0;
356 }
357
334 default: 358 default:
335 return -ENOTTY; 359 return -ENOTTY;
336 } 360 }
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 92203b8a099..dc40e75cba8 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -872,7 +872,7 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
872 if (namelen > EXT4_NAME_LEN) 872 if (namelen > EXT4_NAME_LEN)
873 return NULL; 873 return NULL;
874 if ((namelen <= 2) && (name[0] == '.') && 874 if ((namelen <= 2) && (name[0] == '.') &&
875 (name[1] == '.' || name[1] == '0')) { 875 (name[1] == '.' || name[1] == '\0')) {
876 /* 876 /*
877 * "." or ".." will only be in the first block 877 * "." or ".." will only be in the first block
878 * NFS may look up ".."; "." should be handled by the VFS 878 * NFS may look up ".."; "." should be handled by the VFS
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 7f5451cd1d3..beacce11ac5 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -237,8 +237,6 @@ static void ext4_end_bio(struct bio *bio, int error)
237 } while (bh != head); 237 } while (bh != head);
238 } 238 }
239 239
240 put_io_page(io_end->pages[i]);
241
242 /* 240 /*
243 * If this is a partial write which happened to make 241 * If this is a partial write which happened to make
244 * all buffers uptodate then we can optimize away a 242 * all buffers uptodate then we can optimize away a
@@ -248,6 +246,8 @@ static void ext4_end_bio(struct bio *bio, int error)
248 */ 246 */
249 if (!partial_write) 247 if (!partial_write)
250 SetPageUptodate(page); 248 SetPageUptodate(page);
249
250 put_io_page(io_end->pages[i]);
251 } 251 }
252 io_end->num_io_pages = 0; 252 io_end->num_io_pages = 0;
253 inode = io_end->inode; 253 inode = io_end->inode;
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index dc963929de6..981c8477ada 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -232,6 +232,8 @@ static int setup_new_group_blocks(struct super_block *sb,
232 GFP_NOFS); 232 GFP_NOFS);
233 if (err) 233 if (err)
234 goto exit_bh; 234 goto exit_bh;
235 for (i = 0, bit = gdblocks + 1; i < reserved_gdb; i++, bit++)
236 ext4_set_bit(bit, bh->b_data);
235 237
236 ext4_debug("mark block bitmap %#04llx (+%llu)\n", input->block_bitmap, 238 ext4_debug("mark block bitmap %#04llx (+%llu)\n", input->block_bitmap,
237 input->block_bitmap - start); 239 input->block_bitmap - start);
@@ -247,6 +249,9 @@ static int setup_new_group_blocks(struct super_block *sb,
247 err = sb_issue_zeroout(sb, block, sbi->s_itb_per_group, GFP_NOFS); 249 err = sb_issue_zeroout(sb, block, sbi->s_itb_per_group, GFP_NOFS);
248 if (err) 250 if (err)
249 goto exit_bh; 251 goto exit_bh;
252 for (i = 0, bit = input->inode_table - start;
253 i < sbi->s_itb_per_group; i++, bit++)
254 ext4_set_bit(bit, bh->b_data);
250 255
251 if ((err = extend_or_restart_transaction(handle, 2, bh))) 256 if ((err = extend_or_restart_transaction(handle, 2, bh)))
252 goto exit_bh; 257 goto exit_bh;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 61182fe6254..fb15c9c0be7 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1026,6 +1026,8 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
1026 !(def_mount_opts & EXT4_DEFM_NODELALLOC)) 1026 !(def_mount_opts & EXT4_DEFM_NODELALLOC))
1027 seq_puts(seq, ",nodelalloc"); 1027 seq_puts(seq, ",nodelalloc");
1028 1028
1029 if (test_opt(sb, MBLK_IO_SUBMIT))
1030 seq_puts(seq, ",mblk_io_submit");
1029 if (sbi->s_stripe) 1031 if (sbi->s_stripe)
1030 seq_printf(seq, ",stripe=%lu", sbi->s_stripe); 1032 seq_printf(seq, ",stripe=%lu", sbi->s_stripe);
1031 /* 1033 /*
@@ -1197,7 +1199,6 @@ static const struct super_operations ext4_sops = {
1197 .quota_write = ext4_quota_write, 1199 .quota_write = ext4_quota_write,
1198#endif 1200#endif
1199 .bdev_try_to_free_page = bdev_try_to_free_page, 1201 .bdev_try_to_free_page = bdev_try_to_free_page,
1200 .trim_fs = ext4_trim_fs
1201}; 1202};
1202 1203
1203static const struct super_operations ext4_nojournal_sops = { 1204static const struct super_operations ext4_nojournal_sops = {
@@ -1240,8 +1241,8 @@ enum {
1240 Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota, 1241 Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
1241 Opt_noquota, Opt_ignore, Opt_barrier, Opt_nobarrier, Opt_err, 1242 Opt_noquota, Opt_ignore, Opt_barrier, Opt_nobarrier, Opt_err,
1242 Opt_resize, Opt_usrquota, Opt_grpquota, Opt_i_version, 1243 Opt_resize, Opt_usrquota, Opt_grpquota, Opt_i_version,
1243 Opt_stripe, Opt_delalloc, Opt_nodelalloc, 1244 Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_mblk_io_submit,
1244 Opt_block_validity, Opt_noblock_validity, 1245 Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
1245 Opt_inode_readahead_blks, Opt_journal_ioprio, 1246 Opt_inode_readahead_blks, Opt_journal_ioprio,
1246 Opt_dioread_nolock, Opt_dioread_lock, 1247 Opt_dioread_nolock, Opt_dioread_lock,
1247 Opt_discard, Opt_nodiscard, 1248 Opt_discard, Opt_nodiscard,
@@ -1305,6 +1306,8 @@ static const match_table_t tokens = {
1305 {Opt_resize, "resize"}, 1306 {Opt_resize, "resize"},
1306 {Opt_delalloc, "delalloc"}, 1307 {Opt_delalloc, "delalloc"},
1307 {Opt_nodelalloc, "nodelalloc"}, 1308 {Opt_nodelalloc, "nodelalloc"},
1309 {Opt_mblk_io_submit, "mblk_io_submit"},
1310 {Opt_nomblk_io_submit, "nomblk_io_submit"},
1308 {Opt_block_validity, "block_validity"}, 1311 {Opt_block_validity, "block_validity"},
1309 {Opt_noblock_validity, "noblock_validity"}, 1312 {Opt_noblock_validity, "noblock_validity"},
1310 {Opt_inode_readahead_blks, "inode_readahead_blks=%u"}, 1313 {Opt_inode_readahead_blks, "inode_readahead_blks=%u"},
@@ -1726,6 +1729,12 @@ set_qf_format:
1726 case Opt_nodelalloc: 1729 case Opt_nodelalloc:
1727 clear_opt(sbi->s_mount_opt, DELALLOC); 1730 clear_opt(sbi->s_mount_opt, DELALLOC);
1728 break; 1731 break;
1732 case Opt_mblk_io_submit:
1733 set_opt(sbi->s_mount_opt, MBLK_IO_SUBMIT);
1734 break;
1735 case Opt_nomblk_io_submit:
1736 clear_opt(sbi->s_mount_opt, MBLK_IO_SUBMIT);
1737 break;
1729 case Opt_stripe: 1738 case Opt_stripe:
1730 if (match_int(&args[0], &option)) 1739 if (match_int(&args[0], &option))
1731 return 0; 1740 return 0;
@@ -2799,9 +2808,6 @@ static void ext4_clear_request_list(void)
2799 struct ext4_li_request *elr; 2808 struct ext4_li_request *elr;
2800 2809
2801 mutex_lock(&ext4_li_info->li_list_mtx); 2810 mutex_lock(&ext4_li_info->li_list_mtx);
2802 if (list_empty(&ext4_li_info->li_request_list))
2803 return;
2804
2805 list_for_each_safe(pos, n, &ext4_li_info->li_request_list) { 2811 list_for_each_safe(pos, n, &ext4_li_info->li_request_list) {
2806 elr = list_entry(pos, struct ext4_li_request, 2812 elr = list_entry(pos, struct ext4_li_request,
2807 lr_request); 2813 lr_request);
@@ -3268,13 +3274,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3268 * Test whether we have more sectors than will fit in sector_t, 3274 * Test whether we have more sectors than will fit in sector_t,
3269 * and whether the max offset is addressable by the page cache. 3275 * and whether the max offset is addressable by the page cache.
3270 */ 3276 */
3271 ret = generic_check_addressable(sb->s_blocksize_bits, 3277 err = generic_check_addressable(sb->s_blocksize_bits,
3272 ext4_blocks_count(es)); 3278 ext4_blocks_count(es));
3273 if (ret) { 3279 if (err) {
3274 ext4_msg(sb, KERN_ERR, "filesystem" 3280 ext4_msg(sb, KERN_ERR, "filesystem"
3275 " too large to mount safely on this system"); 3281 " too large to mount safely on this system");
3276 if (sizeof(sector_t) < 8) 3282 if (sizeof(sector_t) < 8)
3277 ext4_msg(sb, KERN_WARNING, "CONFIG_LBDAF not enabled"); 3283 ext4_msg(sb, KERN_WARNING, "CONFIG_LBDAF not enabled");
3284 ret = err;
3278 goto failed_mount; 3285 goto failed_mount;
3279 } 3286 }
3280 3287
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index c8224587123..8b984a2cebb 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -13,6 +13,7 @@
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/sched.h> 14#include <linux/sched.h>
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/compat.h>
16 17
17static const struct file_operations fuse_direct_io_file_operations; 18static const struct file_operations fuse_direct_io_file_operations;
18 19
@@ -134,6 +135,7 @@ EXPORT_SYMBOL_GPL(fuse_do_open);
134void fuse_finish_open(struct inode *inode, struct file *file) 135void fuse_finish_open(struct inode *inode, struct file *file)
135{ 136{
136 struct fuse_file *ff = file->private_data; 137 struct fuse_file *ff = file->private_data;
138 struct fuse_conn *fc = get_fuse_conn(inode);
137 139
138 if (ff->open_flags & FOPEN_DIRECT_IO) 140 if (ff->open_flags & FOPEN_DIRECT_IO)
139 file->f_op = &fuse_direct_io_file_operations; 141 file->f_op = &fuse_direct_io_file_operations;
@@ -141,6 +143,15 @@ void fuse_finish_open(struct inode *inode, struct file *file)
141 invalidate_inode_pages2(inode->i_mapping); 143 invalidate_inode_pages2(inode->i_mapping);
142 if (ff->open_flags & FOPEN_NONSEEKABLE) 144 if (ff->open_flags & FOPEN_NONSEEKABLE)
143 nonseekable_open(inode, file); 145 nonseekable_open(inode, file);
146 if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) {
147 struct fuse_inode *fi = get_fuse_inode(inode);
148
149 spin_lock(&fc->lock);
150 fi->attr_version = ++fc->attr_version;
151 i_size_write(inode, 0);
152 spin_unlock(&fc->lock);
153 fuse_invalidate_attr(inode);
154 }
144} 155}
145 156
146int fuse_open_common(struct inode *inode, struct file *file, bool isdir) 157int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
@@ -1618,6 +1629,58 @@ static int fuse_ioctl_copy_user(struct page **pages, struct iovec *iov,
1618} 1629}
1619 1630
1620/* 1631/*
1632 * CUSE servers compiled on 32bit broke on 64bit kernels because the
1633 * ABI was defined to be 'struct iovec' which is different on 32bit
1634 * and 64bit. Fortunately we can determine which structure the server
1635 * used from the size of the reply.
1636 */
1637static int fuse_copy_ioctl_iovec(struct iovec *dst, void *src,
1638 size_t transferred, unsigned count,
1639 bool is_compat)
1640{
1641#ifdef CONFIG_COMPAT
1642 if (count * sizeof(struct compat_iovec) == transferred) {
1643 struct compat_iovec *ciov = src;
1644 unsigned i;
1645
1646 /*
1647 * With this interface a 32bit server cannot support
1648 * non-compat (i.e. ones coming from 64bit apps) ioctl
1649 * requests
1650 */
1651 if (!is_compat)
1652 return -EINVAL;
1653
1654 for (i = 0; i < count; i++) {
1655 dst[i].iov_base = compat_ptr(ciov[i].iov_base);
1656 dst[i].iov_len = ciov[i].iov_len;
1657 }
1658 return 0;
1659 }
1660#endif
1661
1662 if (count * sizeof(struct iovec) != transferred)
1663 return -EIO;
1664
1665 memcpy(dst, src, transferred);
1666 return 0;
1667}
1668
1669/* Make sure iov_length() won't overflow */
1670static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count)
1671{
1672 size_t n;
1673 u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT;
1674
1675 for (n = 0; n < count; n++) {
1676 if (iov->iov_len > (size_t) max)
1677 return -ENOMEM;
1678 max -= iov->iov_len;
1679 }
1680 return 0;
1681}
1682
1683/*
1621 * For ioctls, there is no generic way to determine how much memory 1684 * For ioctls, there is no generic way to determine how much memory
1622 * needs to be read and/or written. Furthermore, ioctls are allowed 1685 * needs to be read and/or written. Furthermore, ioctls are allowed
1623 * to dereference the passed pointer, so the parameter requires deep 1686 * to dereference the passed pointer, so the parameter requires deep
@@ -1798,18 +1861,25 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
1798 in_iovs + out_iovs > FUSE_IOCTL_MAX_IOV) 1861 in_iovs + out_iovs > FUSE_IOCTL_MAX_IOV)
1799 goto out; 1862 goto out;
1800 1863
1801 err = -EIO;
1802 if ((in_iovs + out_iovs) * sizeof(struct iovec) != transferred)
1803 goto out;
1804
1805 /* okay, copy in iovs and retry */
1806 vaddr = kmap_atomic(pages[0], KM_USER0); 1864 vaddr = kmap_atomic(pages[0], KM_USER0);
1807 memcpy(page_address(iov_page), vaddr, transferred); 1865 err = fuse_copy_ioctl_iovec(page_address(iov_page), vaddr,
1866 transferred, in_iovs + out_iovs,
1867 (flags & FUSE_IOCTL_COMPAT) != 0);
1808 kunmap_atomic(vaddr, KM_USER0); 1868 kunmap_atomic(vaddr, KM_USER0);
1869 if (err)
1870 goto out;
1809 1871
1810 in_iov = page_address(iov_page); 1872 in_iov = page_address(iov_page);
1811 out_iov = in_iov + in_iovs; 1873 out_iov = in_iov + in_iovs;
1812 1874
1875 err = fuse_verify_ioctl_iov(in_iov, in_iovs);
1876 if (err)
1877 goto out;
1878
1879 err = fuse_verify_ioctl_iov(out_iov, out_iovs);
1880 if (err)
1881 goto out;
1882
1813 goto retry; 1883 goto retry;
1814 } 1884 }
1815 1885
diff --git a/fs/gfs2/export.c b/fs/gfs2/export.c
index 06d582732d3..5ab3839dfcb 100644
--- a/fs/gfs2/export.c
+++ b/fs/gfs2/export.c
@@ -138,10 +138,8 @@ static struct dentry *gfs2_get_dentry(struct super_block *sb,
138 struct gfs2_inum_host *inum) 138 struct gfs2_inum_host *inum)
139{ 139{
140 struct gfs2_sbd *sdp = sb->s_fs_info; 140 struct gfs2_sbd *sdp = sb->s_fs_info;
141 struct gfs2_holder i_gh;
142 struct inode *inode; 141 struct inode *inode;
143 struct dentry *dentry; 142 struct dentry *dentry;
144 int error;
145 143
146 inode = gfs2_ilookup(sb, inum->no_addr); 144 inode = gfs2_ilookup(sb, inum->no_addr);
147 if (inode) { 145 if (inode) {
@@ -152,52 +150,16 @@ static struct dentry *gfs2_get_dentry(struct super_block *sb,
152 goto out_inode; 150 goto out_inode;
153 } 151 }
154 152
155 error = gfs2_glock_nq_num(sdp, inum->no_addr, &gfs2_inode_glops, 153 inode = gfs2_lookup_by_inum(sdp, inum->no_addr, &inum->no_formal_ino,
156 LM_ST_SHARED, LM_FLAG_ANY, &i_gh); 154 GFS2_BLKST_DINODE);
157 if (error) 155 if (IS_ERR(inode))
158 return ERR_PTR(error); 156 return ERR_CAST(inode);
159
160 error = gfs2_check_blk_type(sdp, inum->no_addr, GFS2_BLKST_DINODE);
161 if (error)
162 goto fail;
163
164 inode = gfs2_inode_lookup(sb, DT_UNKNOWN, inum->no_addr, 0);
165 if (IS_ERR(inode)) {
166 error = PTR_ERR(inode);
167 goto fail;
168 }
169
170 error = gfs2_inode_refresh(GFS2_I(inode));
171 if (error) {
172 iput(inode);
173 goto fail;
174 }
175
176 /* Pick up the works we bypass in gfs2_inode_lookup */
177 if (inode->i_state & I_NEW)
178 gfs2_set_iop(inode);
179
180 if (GFS2_I(inode)->i_no_formal_ino != inum->no_formal_ino) {
181 iput(inode);
182 goto fail;
183 }
184
185 error = -EIO;
186 if (GFS2_I(inode)->i_diskflags & GFS2_DIF_SYSTEM) {
187 iput(inode);
188 goto fail;
189 }
190
191 gfs2_glock_dq_uninit(&i_gh);
192 157
193out_inode: 158out_inode:
194 dentry = d_obtain_alias(inode); 159 dentry = d_obtain_alias(inode);
195 if (!IS_ERR(dentry)) 160 if (!IS_ERR(dentry))
196 dentry->d_op = &gfs2_dops; 161 dentry->d_op = &gfs2_dops;
197 return dentry; 162 return dentry;
198fail:
199 gfs2_glock_dq_uninit(&i_gh);
200 return ERR_PTR(error);
201} 163}
202 164
203static struct dentry *gfs2_fh_to_dentry(struct super_block *sb, struct fid *fid, 165static struct dentry *gfs2_fh_to_dentry(struct super_block *sb, struct fid *fid,
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 87778857f09..f92c1770416 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -686,21 +686,20 @@ static void delete_work_func(struct work_struct *work)
686{ 686{
687 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete); 687 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
688 struct gfs2_sbd *sdp = gl->gl_sbd; 688 struct gfs2_sbd *sdp = gl->gl_sbd;
689 struct gfs2_inode *ip = NULL; 689 struct gfs2_inode *ip;
690 struct inode *inode; 690 struct inode *inode;
691 u64 no_addr = 0; 691 u64 no_addr = gl->gl_name.ln_number;
692
693 ip = gl->gl_object;
694 /* Note: Unsafe to dereference ip as we don't hold right refs/locks */
692 695
693 spin_lock(&gl->gl_spin);
694 ip = (struct gfs2_inode *)gl->gl_object;
695 if (ip) 696 if (ip)
696 no_addr = ip->i_no_addr;
697 spin_unlock(&gl->gl_spin);
698 if (ip) {
699 inode = gfs2_ilookup(sdp->sd_vfs, no_addr); 697 inode = gfs2_ilookup(sdp->sd_vfs, no_addr);
700 if (inode) { 698 else
701 d_prune_aliases(inode); 699 inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
702 iput(inode); 700 if (inode && !IS_ERR(inode)) {
703 } 701 d_prune_aliases(inode);
702 iput(inode);
704 } 703 }
705 gfs2_glock_put(gl); 704 gfs2_glock_put(gl);
706} 705}
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 06370f8bd8c..e1213f7f921 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -73,49 +73,6 @@ static struct inode *gfs2_iget(struct super_block *sb, u64 no_addr)
73 return iget5_locked(sb, hash, iget_test, iget_set, &no_addr); 73 return iget5_locked(sb, hash, iget_test, iget_set, &no_addr);
74} 74}
75 75
76struct gfs2_skip_data {
77 u64 no_addr;
78 int skipped;
79};
80
81static int iget_skip_test(struct inode *inode, void *opaque)
82{
83 struct gfs2_inode *ip = GFS2_I(inode);
84 struct gfs2_skip_data *data = opaque;
85
86 if (ip->i_no_addr == data->no_addr) {
87 if (inode->i_state & (I_FREEING|I_WILL_FREE)){
88 data->skipped = 1;
89 return 0;
90 }
91 return 1;
92 }
93 return 0;
94}
95
96static int iget_skip_set(struct inode *inode, void *opaque)
97{
98 struct gfs2_inode *ip = GFS2_I(inode);
99 struct gfs2_skip_data *data = opaque;
100
101 if (data->skipped)
102 return 1;
103 inode->i_ino = (unsigned long)(data->no_addr);
104 ip->i_no_addr = data->no_addr;
105 return 0;
106}
107
108static struct inode *gfs2_iget_skip(struct super_block *sb,
109 u64 no_addr)
110{
111 struct gfs2_skip_data data;
112 unsigned long hash = (unsigned long)no_addr;
113
114 data.no_addr = no_addr;
115 data.skipped = 0;
116 return iget5_locked(sb, hash, iget_skip_test, iget_skip_set, &data);
117}
118
119/** 76/**
120 * GFS2 lookup code fills in vfs inode contents based on info obtained 77 * GFS2 lookup code fills in vfs inode contents based on info obtained
121 * from directory entry inside gfs2_inode_lookup(). This has caused issues 78 * from directory entry inside gfs2_inode_lookup(). This has caused issues
@@ -243,93 +200,54 @@ fail:
243 return ERR_PTR(error); 200 return ERR_PTR(error);
244} 201}
245 202
246/** 203struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
247 * gfs2_process_unlinked_inode - Lookup an unlinked inode for reclamation 204 u64 *no_formal_ino, unsigned int blktype)
248 * and try to reclaim it by doing iput.
249 *
250 * This function assumes no rgrp locks are currently held.
251 *
252 * @sb: The super block
253 * no_addr: The inode number
254 *
255 */
256
257void gfs2_process_unlinked_inode(struct super_block *sb, u64 no_addr)
258{ 205{
259 struct gfs2_sbd *sdp; 206 struct super_block *sb = sdp->sd_vfs;
260 struct gfs2_inode *ip; 207 struct gfs2_holder i_gh;
261 struct gfs2_glock *io_gl = NULL;
262 int error;
263 struct gfs2_holder gh;
264 struct inode *inode; 208 struct inode *inode;
209 int error;
265 210
266 inode = gfs2_iget_skip(sb, no_addr); 211 error = gfs2_glock_nq_num(sdp, no_addr, &gfs2_inode_glops,
267 212 LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
268 if (!inode) 213 if (error)
269 return; 214 return ERR_PTR(error);
270
271 /* If it's not a new inode, someone's using it, so leave it alone. */
272 if (!(inode->i_state & I_NEW)) {
273 iput(inode);
274 return;
275 }
276
277 ip = GFS2_I(inode);
278 sdp = GFS2_SB(inode);
279 ip->i_no_formal_ino = -1;
280 215
281 error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl); 216 error = gfs2_check_blk_type(sdp, no_addr, blktype);
282 if (unlikely(error)) 217 if (error)
283 goto fail; 218 goto fail;
284 ip->i_gl->gl_object = ip;
285 219
286 error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl); 220 inode = gfs2_inode_lookup(sb, DT_UNKNOWN, no_addr, 0);
287 if (unlikely(error)) 221 if (IS_ERR(inode))
288 goto fail_put; 222 goto fail;
289
290 set_bit(GIF_INVALID, &ip->i_flags);
291 error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, LM_FLAG_TRY | GL_EXACT,
292 &ip->i_iopen_gh);
293 if (unlikely(error))
294 goto fail_iopen;
295 223
296 ip->i_iopen_gh.gh_gl->gl_object = ip; 224 error = gfs2_inode_refresh(GFS2_I(inode));
297 gfs2_glock_put(io_gl); 225 if (error)
298 io_gl = NULL; 226 goto fail_iput;
299 227
300 inode->i_mode = DT2IF(DT_UNKNOWN); 228 /* Pick up the works we bypass in gfs2_inode_lookup */
229 if (inode->i_state & I_NEW)
230 gfs2_set_iop(inode);
301 231
302 /* 232 /* Two extra checks for NFS only */
303 * We must read the inode in order to work out its type in 233 if (no_formal_ino) {
304 * this case. Note that this doesn't happen often as we normally 234 error = -ESTALE;
305 * know the type beforehand. This code path only occurs during 235 if (GFS2_I(inode)->i_no_formal_ino != *no_formal_ino)
306 * unlinked inode recovery (where it is safe to do this glock, 236 goto fail_iput;
307 * which is not true in the general case).
308 */
309 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, LM_FLAG_TRY,
310 &gh);
311 if (unlikely(error))
312 goto fail_glock;
313 237
314 /* Inode is now uptodate */ 238 error = -EIO;
315 gfs2_glock_dq_uninit(&gh); 239 if (GFS2_I(inode)->i_diskflags & GFS2_DIF_SYSTEM)
316 gfs2_set_iop(inode); 240 goto fail_iput;
317 241
318 /* The iput will cause it to be deleted. */ 242 error = 0;
319 iput(inode); 243 }
320 return;
321 244
322fail_glock:
323 gfs2_glock_dq(&ip->i_iopen_gh);
324fail_iopen:
325 if (io_gl)
326 gfs2_glock_put(io_gl);
327fail_put:
328 ip->i_gl->gl_object = NULL;
329 gfs2_glock_put(ip->i_gl);
330fail: 245fail:
331 iget_failed(inode); 246 gfs2_glock_dq_uninit(&i_gh);
332 return; 247 return error ? ERR_PTR(error) : inode;
248fail_iput:
249 iput(inode);
250 goto fail;
333} 251}
334 252
335static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf) 253static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h
index 6720d7d5fbc..d8499fadcc5 100644
--- a/fs/gfs2/inode.h
+++ b/fs/gfs2/inode.h
@@ -99,7 +99,9 @@ err:
99extern void gfs2_set_iop(struct inode *inode); 99extern void gfs2_set_iop(struct inode *inode);
100extern struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type, 100extern struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type,
101 u64 no_addr, u64 no_formal_ino); 101 u64 no_addr, u64 no_formal_ino);
102extern void gfs2_process_unlinked_inode(struct super_block *sb, u64 no_addr); 102extern struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
103 u64 *no_formal_ino,
104 unsigned int blktype);
103extern struct inode *gfs2_ilookup(struct super_block *sb, u64 no_addr); 105extern struct inode *gfs2_ilookup(struct super_block *sb, u64 no_addr);
104 106
105extern int gfs2_inode_refresh(struct gfs2_inode *ip); 107extern int gfs2_inode_refresh(struct gfs2_inode *ip);
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 58a9b9998b4..f606baf9ba7 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -631,6 +631,7 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
631 struct fs_disk_quota *fdq) 631 struct fs_disk_quota *fdq)
632{ 632{
633 struct inode *inode = &ip->i_inode; 633 struct inode *inode = &ip->i_inode;
634 struct gfs2_sbd *sdp = GFS2_SB(inode);
634 struct address_space *mapping = inode->i_mapping; 635 struct address_space *mapping = inode->i_mapping;
635 unsigned long index = loc >> PAGE_CACHE_SHIFT; 636 unsigned long index = loc >> PAGE_CACHE_SHIFT;
636 unsigned offset = loc & (PAGE_CACHE_SIZE - 1); 637 unsigned offset = loc & (PAGE_CACHE_SIZE - 1);
@@ -658,11 +659,11 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
658 qd->qd_qb.qb_value = qp->qu_value; 659 qd->qd_qb.qb_value = qp->qu_value;
659 if (fdq) { 660 if (fdq) {
660 if (fdq->d_fieldmask & FS_DQ_BSOFT) { 661 if (fdq->d_fieldmask & FS_DQ_BSOFT) {
661 qp->qu_warn = cpu_to_be64(fdq->d_blk_softlimit); 662 qp->qu_warn = cpu_to_be64(fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift);
662 qd->qd_qb.qb_warn = qp->qu_warn; 663 qd->qd_qb.qb_warn = qp->qu_warn;
663 } 664 }
664 if (fdq->d_fieldmask & FS_DQ_BHARD) { 665 if (fdq->d_fieldmask & FS_DQ_BHARD) {
665 qp->qu_limit = cpu_to_be64(fdq->d_blk_hardlimit); 666 qp->qu_limit = cpu_to_be64(fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift);
666 qd->qd_qb.qb_limit = qp->qu_limit; 667 qd->qd_qb.qb_limit = qp->qu_limit;
667 } 668 }
668 } 669 }
@@ -1497,9 +1498,9 @@ static int gfs2_get_dqblk(struct super_block *sb, int type, qid_t id,
1497 fdq->d_version = FS_DQUOT_VERSION; 1498 fdq->d_version = FS_DQUOT_VERSION;
1498 fdq->d_flags = (type == QUOTA_USER) ? FS_USER_QUOTA : FS_GROUP_QUOTA; 1499 fdq->d_flags = (type == QUOTA_USER) ? FS_USER_QUOTA : FS_GROUP_QUOTA;
1499 fdq->d_id = id; 1500 fdq->d_id = id;
1500 fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit); 1501 fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_fsb2bb_shift;
1501 fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn); 1502 fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_fsb2bb_shift;
1502 fdq->d_bcount = be64_to_cpu(qlvb->qb_value); 1503 fdq->d_bcount = be64_to_cpu(qlvb->qb_value) << sdp->sd_fsb2bb_shift;
1503 1504
1504 gfs2_glock_dq_uninit(&q_gh); 1505 gfs2_glock_dq_uninit(&q_gh);
1505out: 1506out:
@@ -1566,10 +1567,10 @@ static int gfs2_set_dqblk(struct super_block *sb, int type, qid_t id,
1566 1567
1567 /* If nothing has changed, this is a no-op */ 1568 /* If nothing has changed, this is a no-op */
1568 if ((fdq->d_fieldmask & FS_DQ_BSOFT) && 1569 if ((fdq->d_fieldmask & FS_DQ_BSOFT) &&
1569 (fdq->d_blk_softlimit == be64_to_cpu(qd->qd_qb.qb_warn))) 1570 ((fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
1570 fdq->d_fieldmask ^= FS_DQ_BSOFT; 1571 fdq->d_fieldmask ^= FS_DQ_BSOFT;
1571 if ((fdq->d_fieldmask & FS_DQ_BHARD) && 1572 if ((fdq->d_fieldmask & FS_DQ_BHARD) &&
1572 (fdq->d_blk_hardlimit == be64_to_cpu(qd->qd_qb.qb_limit))) 1573 ((fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
1573 fdq->d_fieldmask ^= FS_DQ_BHARD; 1574 fdq->d_fieldmask ^= FS_DQ_BHARD;
1574 if (fdq->d_fieldmask == 0) 1575 if (fdq->d_fieldmask == 0)
1575 goto out_i; 1576 goto out_i;
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index bef3ab6cf5c..33c8407b876 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -963,17 +963,18 @@ static int try_rgrp_fit(struct gfs2_rgrpd *rgd, struct gfs2_alloc *al)
963 * The inode, if one has been found, in inode. 963 * The inode, if one has been found, in inode.
964 */ 964 */
965 965
966static u64 try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, 966static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip)
967 u64 skip)
968{ 967{
969 u32 goal = 0, block; 968 u32 goal = 0, block;
970 u64 no_addr; 969 u64 no_addr;
971 struct gfs2_sbd *sdp = rgd->rd_sbd; 970 struct gfs2_sbd *sdp = rgd->rd_sbd;
972 unsigned int n; 971 unsigned int n;
972 struct gfs2_glock *gl;
973 struct gfs2_inode *ip;
974 int error;
975 int found = 0;
973 976
974 for(;;) { 977 while (goal < rgd->rd_data) {
975 if (goal >= rgd->rd_data)
976 break;
977 down_write(&sdp->sd_log_flush_lock); 978 down_write(&sdp->sd_log_flush_lock);
978 n = 1; 979 n = 1;
979 block = rgblk_search(rgd, goal, GFS2_BLKST_UNLINKED, 980 block = rgblk_search(rgd, goal, GFS2_BLKST_UNLINKED,
@@ -990,11 +991,32 @@ static u64 try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked,
990 if (no_addr == skip) 991 if (no_addr == skip)
991 continue; 992 continue;
992 *last_unlinked = no_addr; 993 *last_unlinked = no_addr;
993 return no_addr; 994
995 error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &gl);
996 if (error)
997 continue;
998
999 /* If the inode is already in cache, we can ignore it here
1000 * because the existing inode disposal code will deal with
1001 * it when all refs have gone away. Accessing gl_object like
1002 * this is not safe in general. Here it is ok because we do
1003 * not dereference the pointer, and we only need an approx
1004 * answer to whether it is NULL or not.
1005 */
1006 ip = gl->gl_object;
1007
1008 if (ip || queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
1009 gfs2_glock_put(gl);
1010 else
1011 found++;
1012
1013 /* Limit reclaim to sensible number of tasks */
1014 if (found > 2*NR_CPUS)
1015 return;
994 } 1016 }
995 1017
996 rgd->rd_flags &= ~GFS2_RDF_CHECK; 1018 rgd->rd_flags &= ~GFS2_RDF_CHECK;
997 return 0; 1019 return;
998} 1020}
999 1021
1000/** 1022/**
@@ -1075,11 +1097,9 @@ static void forward_rgrp_set(struct gfs2_sbd *sdp, struct gfs2_rgrpd *rgd)
1075 * Try to acquire rgrp in way which avoids contending with others. 1097 * Try to acquire rgrp in way which avoids contending with others.
1076 * 1098 *
1077 * Returns: errno 1099 * Returns: errno
1078 * unlinked: the block address of an unlinked block to be reclaimed
1079 */ 1100 */
1080 1101
1081static int get_local_rgrp(struct gfs2_inode *ip, u64 *unlinked, 1102static int get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked)
1082 u64 *last_unlinked)
1083{ 1103{
1084 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1104 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1085 struct gfs2_rgrpd *rgd, *begin = NULL; 1105 struct gfs2_rgrpd *rgd, *begin = NULL;
@@ -1089,7 +1109,6 @@ static int get_local_rgrp(struct gfs2_inode *ip, u64 *unlinked,
1089 int loops = 0; 1109 int loops = 0;
1090 int error, rg_locked; 1110 int error, rg_locked;
1091 1111
1092 *unlinked = 0;
1093 rgd = gfs2_blk2rgrpd(sdp, ip->i_goal); 1112 rgd = gfs2_blk2rgrpd(sdp, ip->i_goal);
1094 1113
1095 while (rgd) { 1114 while (rgd) {
@@ -1106,17 +1125,10 @@ static int get_local_rgrp(struct gfs2_inode *ip, u64 *unlinked,
1106 case 0: 1125 case 0:
1107 if (try_rgrp_fit(rgd, al)) 1126 if (try_rgrp_fit(rgd, al))
1108 goto out; 1127 goto out;
1109 /* If the rg came in already locked, there's no 1128 if (rgd->rd_flags & GFS2_RDF_CHECK)
1110 way we can recover from a failed try_rgrp_unlink 1129 try_rgrp_unlink(rgd, last_unlinked, ip->i_no_addr);
1111 because that would require an iput which can only
1112 happen after the rgrp is unlocked. */
1113 if (!rg_locked && rgd->rd_flags & GFS2_RDF_CHECK)
1114 *unlinked = try_rgrp_unlink(rgd, last_unlinked,
1115 ip->i_no_addr);
1116 if (!rg_locked) 1130 if (!rg_locked)
1117 gfs2_glock_dq_uninit(&al->al_rgd_gh); 1131 gfs2_glock_dq_uninit(&al->al_rgd_gh);
1118 if (*unlinked)
1119 return -EAGAIN;
1120 /* fall through */ 1132 /* fall through */
1121 case GLR_TRYFAILED: 1133 case GLR_TRYFAILED:
1122 rgd = recent_rgrp_next(rgd); 1134 rgd = recent_rgrp_next(rgd);
@@ -1145,13 +1157,10 @@ static int get_local_rgrp(struct gfs2_inode *ip, u64 *unlinked,
1145 case 0: 1157 case 0:
1146 if (try_rgrp_fit(rgd, al)) 1158 if (try_rgrp_fit(rgd, al))
1147 goto out; 1159 goto out;
1148 if (!rg_locked && rgd->rd_flags & GFS2_RDF_CHECK) 1160 if (rgd->rd_flags & GFS2_RDF_CHECK)
1149 *unlinked = try_rgrp_unlink(rgd, last_unlinked, 1161 try_rgrp_unlink(rgd, last_unlinked, ip->i_no_addr);
1150 ip->i_no_addr);
1151 if (!rg_locked) 1162 if (!rg_locked)
1152 gfs2_glock_dq_uninit(&al->al_rgd_gh); 1163 gfs2_glock_dq_uninit(&al->al_rgd_gh);
1153 if (*unlinked)
1154 return -EAGAIN;
1155 break; 1164 break;
1156 1165
1157 case GLR_TRYFAILED: 1166 case GLR_TRYFAILED:
@@ -1204,12 +1213,12 @@ int gfs2_inplace_reserve_i(struct gfs2_inode *ip, int hold_rindex,
1204 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1213 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1205 struct gfs2_alloc *al = ip->i_alloc; 1214 struct gfs2_alloc *al = ip->i_alloc;
1206 int error = 0; 1215 int error = 0;
1207 u64 last_unlinked = NO_BLOCK, unlinked; 1216 u64 last_unlinked = NO_BLOCK;
1217 int tries = 0;
1208 1218
1209 if (gfs2_assert_warn(sdp, al->al_requested)) 1219 if (gfs2_assert_warn(sdp, al->al_requested))
1210 return -EINVAL; 1220 return -EINVAL;
1211 1221
1212try_again:
1213 if (hold_rindex) { 1222 if (hold_rindex) {
1214 /* We need to hold the rindex unless the inode we're using is 1223 /* We need to hold the rindex unless the inode we're using is
1215 the rindex itself, in which case it's already held. */ 1224 the rindex itself, in which case it's already held. */
@@ -1218,31 +1227,23 @@ try_again:
1218 else if (!sdp->sd_rgrps) /* We may not have the rindex read 1227 else if (!sdp->sd_rgrps) /* We may not have the rindex read
1219 in, so: */ 1228 in, so: */
1220 error = gfs2_ri_update_special(ip); 1229 error = gfs2_ri_update_special(ip);
1230 if (error)
1231 return error;
1221 } 1232 }
1222 1233
1223 if (error) 1234 do {
1224 return error; 1235 error = get_local_rgrp(ip, &last_unlinked);
1236 /* If there is no space, flushing the log may release some */
1237 if (error)
1238 gfs2_log_flush(sdp, NULL);
1239 } while (error && tries++ < 3);
1225 1240
1226 /* Find an rgrp suitable for allocation. If it encounters any unlinked
1227 dinodes along the way, error will equal -EAGAIN and unlinked will
1228 contains it block address. We then need to look up that inode and
1229 try to free it, and try the allocation again. */
1230 error = get_local_rgrp(ip, &unlinked, &last_unlinked);
1231 if (error) { 1241 if (error) {
1232 if (hold_rindex && ip != GFS2_I(sdp->sd_rindex)) 1242 if (hold_rindex && ip != GFS2_I(sdp->sd_rindex))
1233 gfs2_glock_dq_uninit(&al->al_ri_gh); 1243 gfs2_glock_dq_uninit(&al->al_ri_gh);
1234 if (error != -EAGAIN) 1244 return error;
1235 return error;
1236
1237 gfs2_process_unlinked_inode(ip->i_inode.i_sb, unlinked);
1238 /* regardless of whether or not gfs2_process_unlinked_inode
1239 was successful, we don't want to repeat it again. */
1240 last_unlinked = unlinked;
1241 gfs2_log_flush(sdp, NULL);
1242 error = 0;
1243
1244 goto try_again;
1245 } 1245 }
1246
1246 /* no error, so we have the rgrp set in the inode's allocation. */ 1247 /* no error, so we have the rgrp set in the inode's allocation. */
1247 al->al_file = file; 1248 al->al_file = file;
1248 al->al_line = line; 1249 al->al_line = line;
diff --git a/fs/ioctl.c b/fs/ioctl.c
index e92fdbb3bc3..d6cc1647662 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -6,7 +6,6 @@
6 6
7#include <linux/syscalls.h> 7#include <linux/syscalls.h>
8#include <linux/mm.h> 8#include <linux/mm.h>
9#include <linux/smp_lock.h>
10#include <linux/capability.h> 9#include <linux/capability.h>
11#include <linux/file.h> 10#include <linux/file.h>
12#include <linux/fs.h> 11#include <linux/fs.h>
@@ -530,41 +529,6 @@ static int ioctl_fsthaw(struct file *filp)
530 return thaw_super(sb); 529 return thaw_super(sb);
531} 530}
532 531
533static int ioctl_fstrim(struct file *filp, void __user *argp)
534{
535 struct super_block *sb = filp->f_path.dentry->d_inode->i_sb;
536 struct fstrim_range range;
537 int ret = 0;
538
539 if (!capable(CAP_SYS_ADMIN))
540 return -EPERM;
541
542 /* If filesystem doesn't support trim feature, return. */
543 if (sb->s_op->trim_fs == NULL)
544 return -EOPNOTSUPP;
545
546 /* If a blockdevice-backed filesystem isn't specified, return EINVAL. */
547 if (sb->s_bdev == NULL)
548 return -EINVAL;
549
550 if (argp == NULL) {
551 range.start = 0;
552 range.len = ULLONG_MAX;
553 range.minlen = 0;
554 } else if (copy_from_user(&range, argp, sizeof(range)))
555 return -EFAULT;
556
557 ret = sb->s_op->trim_fs(sb, &range);
558 if (ret < 0)
559 return ret;
560
561 if ((argp != NULL) &&
562 (copy_to_user(argp, &range, sizeof(range))))
563 return -EFAULT;
564
565 return 0;
566}
567
568/* 532/*
569 * When you add any new common ioctls to the switches above and below 533 * When you add any new common ioctls to the switches above and below
570 * please update compat_sys_ioctl() too. 534 * please update compat_sys_ioctl() too.
@@ -615,10 +579,6 @@ int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
615 error = ioctl_fsthaw(filp); 579 error = ioctl_fsthaw(filp);
616 break; 580 break;
617 581
618 case FITRIM:
619 error = ioctl_fstrim(filp, argp);
620 break;
621
622 case FS_IOC_FIEMAP: 582 case FS_IOC_FIEMAP:
623 return ioctl_fiemap(filp, arg); 583 return ioctl_fiemap(filp, arg);
624 584
diff --git a/fs/ioprio.c b/fs/ioprio.c
index 2f7d05c8992..7da2a06508e 100644
--- a/fs/ioprio.c
+++ b/fs/ioprio.c
@@ -103,22 +103,15 @@ SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio)
103 } 103 }
104 104
105 ret = -ESRCH; 105 ret = -ESRCH;
106 /* 106 rcu_read_lock();
107 * We want IOPRIO_WHO_PGRP/IOPRIO_WHO_USER to be "atomic",
108 * so we can't use rcu_read_lock(). See re-copy of ->ioprio
109 * in copy_process().
110 */
111 read_lock(&tasklist_lock);
112 switch (which) { 107 switch (which) {
113 case IOPRIO_WHO_PROCESS: 108 case IOPRIO_WHO_PROCESS:
114 rcu_read_lock();
115 if (!who) 109 if (!who)
116 p = current; 110 p = current;
117 else 111 else
118 p = find_task_by_vpid(who); 112 p = find_task_by_vpid(who);
119 if (p) 113 if (p)
120 ret = set_task_ioprio(p, ioprio); 114 ret = set_task_ioprio(p, ioprio);
121 rcu_read_unlock();
122 break; 115 break;
123 case IOPRIO_WHO_PGRP: 116 case IOPRIO_WHO_PGRP:
124 if (!who) 117 if (!who)
@@ -141,12 +134,7 @@ SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio)
141 break; 134 break;
142 135
143 do_each_thread(g, p) { 136 do_each_thread(g, p) {
144 int match; 137 if (__task_cred(p)->uid != who)
145
146 rcu_read_lock();
147 match = __task_cred(p)->uid == who;
148 rcu_read_unlock();
149 if (!match)
150 continue; 138 continue;
151 ret = set_task_ioprio(p, ioprio); 139 ret = set_task_ioprio(p, ioprio);
152 if (ret) 140 if (ret)
@@ -160,7 +148,7 @@ free_uid:
160 ret = -EINVAL; 148 ret = -EINVAL;
161 } 149 }
162 150
163 read_unlock(&tasklist_lock); 151 rcu_read_unlock();
164 return ret; 152 return ret;
165} 153}
166 154
@@ -204,17 +192,15 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
204 int ret = -ESRCH; 192 int ret = -ESRCH;
205 int tmpio; 193 int tmpio;
206 194
207 read_lock(&tasklist_lock); 195 rcu_read_lock();
208 switch (which) { 196 switch (which) {
209 case IOPRIO_WHO_PROCESS: 197 case IOPRIO_WHO_PROCESS:
210 rcu_read_lock();
211 if (!who) 198 if (!who)
212 p = current; 199 p = current;
213 else 200 else
214 p = find_task_by_vpid(who); 201 p = find_task_by_vpid(who);
215 if (p) 202 if (p)
216 ret = get_task_ioprio(p); 203 ret = get_task_ioprio(p);
217 rcu_read_unlock();
218 break; 204 break;
219 case IOPRIO_WHO_PGRP: 205 case IOPRIO_WHO_PGRP:
220 if (!who) 206 if (!who)
@@ -241,12 +227,7 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
241 break; 227 break;
242 228
243 do_each_thread(g, p) { 229 do_each_thread(g, p) {
244 int match; 230 if (__task_cred(p)->uid != user->uid)
245
246 rcu_read_lock();
247 match = __task_cred(p)->uid == user->uid;
248 rcu_read_unlock();
249 if (!match)
250 continue; 231 continue;
251 tmpio = get_task_ioprio(p); 232 tmpio = get_task_ioprio(p);
252 if (tmpio < 0) 233 if (tmpio < 0)
@@ -264,6 +245,6 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
264 ret = -EINVAL; 245 ret = -EINVAL;
265 } 246 }
266 247
267 read_unlock(&tasklist_lock); 248 rcu_read_unlock();
268 return ret; 249 return ret;
269} 250}
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index c590d155c09..f837ba95352 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -899,6 +899,14 @@ journal_t * jbd2_journal_init_dev(struct block_device *bdev,
899 899
900 /* journal descriptor can store up to n blocks -bzzz */ 900 /* journal descriptor can store up to n blocks -bzzz */
901 journal->j_blocksize = blocksize; 901 journal->j_blocksize = blocksize;
902 journal->j_dev = bdev;
903 journal->j_fs_dev = fs_dev;
904 journal->j_blk_offset = start;
905 journal->j_maxlen = len;
906 bdevname(journal->j_dev, journal->j_devname);
907 p = journal->j_devname;
908 while ((p = strchr(p, '/')))
909 *p = '!';
902 jbd2_stats_proc_init(journal); 910 jbd2_stats_proc_init(journal);
903 n = journal->j_blocksize / sizeof(journal_block_tag_t); 911 n = journal->j_blocksize / sizeof(journal_block_tag_t);
904 journal->j_wbufsize = n; 912 journal->j_wbufsize = n;
@@ -908,14 +916,6 @@ journal_t * jbd2_journal_init_dev(struct block_device *bdev,
908 __func__); 916 __func__);
909 goto out_err; 917 goto out_err;
910 } 918 }
911 journal->j_dev = bdev;
912 journal->j_fs_dev = fs_dev;
913 journal->j_blk_offset = start;
914 journal->j_maxlen = len;
915 bdevname(journal->j_dev, journal->j_devname);
916 p = journal->j_devname;
917 while ((p = strchr(p, '/')))
918 *p = '!';
919 919
920 bh = __getblk(journal->j_dev, start, journal->j_blocksize); 920 bh = __getblk(journal->j_dev, start, journal->j_blocksize);
921 if (!bh) { 921 if (!bh) {
diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c
index d5bb86866e6..25509eb28fd 100644
--- a/fs/lockd/clntlock.c
+++ b/fs/lockd/clntlock.c
@@ -14,7 +14,6 @@
14#include <linux/sunrpc/clnt.h> 14#include <linux/sunrpc/clnt.h>
15#include <linux/sunrpc/svc.h> 15#include <linux/sunrpc/svc.h>
16#include <linux/lockd/lockd.h> 16#include <linux/lockd/lockd.h>
17#include <linux/smp_lock.h>
18#include <linux/kthread.h> 17#include <linux/kthread.h>
19 18
20#define NLMDBG_FACILITY NLMDBG_CLIENT 19#define NLMDBG_FACILITY NLMDBG_CLIENT
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index 47ea1e1925b..332c54cf75e 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -7,7 +7,6 @@
7 */ 7 */
8 8
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/smp_lock.h>
11#include <linux/slab.h> 10#include <linux/slab.h>
12#include <linux/types.h> 11#include <linux/types.h>
13#include <linux/errno.h> 12#include <linux/errno.h>
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index 25e21e4023b..ed0c59fe23c 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -124,7 +124,7 @@ static struct nlm_host *nlm_lookup_host(struct nlm_lookup_host_info *ni)
124 continue; 124 continue;
125 if (host->h_server != ni->server) 125 if (host->h_server != ni->server)
126 continue; 126 continue;
127 if (ni->server && 127 if (ni->server && ni->src_len != 0 &&
128 !rpc_cmp_addr(nlm_srcaddr(host), ni->src_sap)) 128 !rpc_cmp_addr(nlm_srcaddr(host), ni->src_sap))
129 continue; 129 continue;
130 130
@@ -167,6 +167,7 @@ static struct nlm_host *nlm_lookup_host(struct nlm_lookup_host_info *ni)
167 host->h_addrlen = ni->salen; 167 host->h_addrlen = ni->salen;
168 rpc_set_port(nlm_addr(host), 0); 168 rpc_set_port(nlm_addr(host), 0);
169 memcpy(nlm_srcaddr(host), ni->src_sap, ni->src_len); 169 memcpy(nlm_srcaddr(host), ni->src_sap, ni->src_len);
170 host->h_srcaddrlen = ni->src_len;
170 host->h_version = ni->version; 171 host->h_version = ni->version;
171 host->h_proto = ni->protocol; 172 host->h_proto = ni->protocol;
172 host->h_rpcclnt = NULL; 173 host->h_rpcclnt = NULL;
@@ -238,9 +239,6 @@ struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap,
238 const char *hostname, 239 const char *hostname,
239 int noresvport) 240 int noresvport)
240{ 241{
241 const struct sockaddr source = {
242 .sa_family = AF_UNSPEC,
243 };
244 struct nlm_lookup_host_info ni = { 242 struct nlm_lookup_host_info ni = {
245 .server = 0, 243 .server = 0,
246 .sap = sap, 244 .sap = sap,
@@ -249,8 +247,6 @@ struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap,
249 .version = version, 247 .version = version,
250 .hostname = hostname, 248 .hostname = hostname,
251 .hostname_len = strlen(hostname), 249 .hostname_len = strlen(hostname),
252 .src_sap = &source,
253 .src_len = sizeof(source),
254 .noresvport = noresvport, 250 .noresvport = noresvport,
255 }; 251 };
256 252
@@ -357,7 +353,6 @@ nlm_bind_host(struct nlm_host *host)
357 .protocol = host->h_proto, 353 .protocol = host->h_proto,
358 .address = nlm_addr(host), 354 .address = nlm_addr(host),
359 .addrsize = host->h_addrlen, 355 .addrsize = host->h_addrlen,
360 .saddress = nlm_srcaddr(host),
361 .timeout = &timeparms, 356 .timeout = &timeparms,
362 .servername = host->h_name, 357 .servername = host->h_name,
363 .program = &nlm_program, 358 .program = &nlm_program,
@@ -376,6 +371,8 @@ nlm_bind_host(struct nlm_host *host)
376 args.flags |= RPC_CLNT_CREATE_HARDRTRY; 371 args.flags |= RPC_CLNT_CREATE_HARDRTRY;
377 if (host->h_noresvport) 372 if (host->h_noresvport)
378 args.flags |= RPC_CLNT_CREATE_NONPRIVPORT; 373 args.flags |= RPC_CLNT_CREATE_NONPRIVPORT;
374 if (host->h_srcaddrlen)
375 args.saddress = nlm_srcaddr(host);
379 376
380 clnt = rpc_create(&args); 377 clnt = rpc_create(&args);
381 if (!IS_ERR(clnt)) 378 if (!IS_ERR(clnt))
diff --git a/fs/lockd/svc4proc.c b/fs/lockd/svc4proc.c
index a336e832475..38d26119245 100644
--- a/fs/lockd/svc4proc.c
+++ b/fs/lockd/svc4proc.c
@@ -9,7 +9,6 @@
9 9
10#include <linux/types.h> 10#include <linux/types.h>
11#include <linux/time.h> 11#include <linux/time.h>
12#include <linux/smp_lock.h>
13#include <linux/lockd/lockd.h> 12#include <linux/lockd/lockd.h>
14#include <linux/lockd/share.h> 13#include <linux/lockd/share.h>
15 14
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index c462d346acb..ef5659b211e 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -25,7 +25,6 @@
25#include <linux/errno.h> 25#include <linux/errno.h>
26#include <linux/kernel.h> 26#include <linux/kernel.h>
27#include <linux/sched.h> 27#include <linux/sched.h>
28#include <linux/smp_lock.h>
29#include <linux/sunrpc/clnt.h> 28#include <linux/sunrpc/clnt.h>
30#include <linux/sunrpc/svc.h> 29#include <linux/sunrpc/svc.h>
31#include <linux/lockd/nlm.h> 30#include <linux/lockd/nlm.h>
diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c
index c3069f38d60..0caea5310ac 100644
--- a/fs/lockd/svcproc.c
+++ b/fs/lockd/svcproc.c
@@ -9,7 +9,6 @@
9 9
10#include <linux/types.h> 10#include <linux/types.h>
11#include <linux/time.h> 11#include <linux/time.h>
12#include <linux/smp_lock.h>
13#include <linux/lockd/lockd.h> 12#include <linux/lockd/lockd.h>
14#include <linux/lockd/share.h> 13#include <linux/lockd/share.h>
15 14
diff --git a/fs/locks.c b/fs/locks.c
index 0e62dd35d08..8729347bcd1 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -122,7 +122,6 @@
122#include <linux/module.h> 122#include <linux/module.h>
123#include <linux/security.h> 123#include <linux/security.h>
124#include <linux/slab.h> 124#include <linux/slab.h>
125#include <linux/smp_lock.h>
126#include <linux/syscalls.h> 125#include <linux/syscalls.h>
127#include <linux/time.h> 126#include <linux/time.h>
128#include <linux/rcupdate.h> 127#include <linux/rcupdate.h>
diff --git a/fs/logfs/journal.c b/fs/logfs/journal.c
index f46ee8b0e13..9da29706f91 100644
--- a/fs/logfs/journal.c
+++ b/fs/logfs/journal.c
@@ -828,7 +828,7 @@ void do_logfs_journal_wl_pass(struct super_block *sb)
828 super->s_journal_seg[i] = segno; 828 super->s_journal_seg[i] = segno;
829 super->s_journal_ec[i] = ec; 829 super->s_journal_ec[i] = ec;
830 logfs_set_segment_reserved(sb, segno); 830 logfs_set_segment_reserved(sb, segno);
831 err = btree_insert32(head, segno, (void *)1, GFP_KERNEL); 831 err = btree_insert32(head, segno, (void *)1, GFP_NOFS);
832 BUG_ON(err); /* mempool should prevent this */ 832 BUG_ON(err); /* mempool should prevent this */
833 err = logfs_erase_segment(sb, segno, 1); 833 err = logfs_erase_segment(sb, segno, 1);
834 BUG_ON(err); /* FIXME: remount-ro would be nicer */ 834 BUG_ON(err); /* FIXME: remount-ro would be nicer */
diff --git a/fs/logfs/readwrite.c b/fs/logfs/readwrite.c
index 6127baf0e18..ee99a9f5dfd 100644
--- a/fs/logfs/readwrite.c
+++ b/fs/logfs/readwrite.c
@@ -1994,6 +1994,9 @@ static int do_write_inode(struct inode *inode)
1994 1994
1995 /* FIXME: transaction is part of logfs_block now. Is that enough? */ 1995 /* FIXME: transaction is part of logfs_block now. Is that enough? */
1996 err = logfs_write_buf(master_inode, page, 0); 1996 err = logfs_write_buf(master_inode, page, 0);
1997 if (err)
1998 move_page_to_inode(inode, page);
1999
1997 logfs_put_write_page(page); 2000 logfs_put_write_page(page);
1998 return err; 2001 return err;
1999} 2002}
diff --git a/fs/namei.c b/fs/namei.c
index 5362af9b737..4ff7ca53053 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1748,6 +1748,9 @@ struct file *do_filp_open(int dfd, const char *pathname,
1748 if (!(open_flag & O_CREAT)) 1748 if (!(open_flag & O_CREAT))
1749 mode = 0; 1749 mode = 0;
1750 1750
1751 /* Must never be set by userspace */
1752 open_flag &= ~FMODE_NONOTIFY;
1753
1751 /* 1754 /*
1752 * O_SYNC is implemented as __O_SYNC|O_DSYNC. As many places only 1755 * O_SYNC is implemented as __O_SYNC|O_DSYNC. As many places only
1753 * check for O_DSYNC if the need any syncing at all we enforce it's 1756 * check for O_DSYNC if the need any syncing at all we enforce it's
diff --git a/fs/namespace.c b/fs/namespace.c
index 8a415c9c5e5..3dbfc072ec7 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -13,7 +13,6 @@
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/spinlock.h> 14#include <linux/spinlock.h>
15#include <linux/percpu.h> 15#include <linux/percpu.h>
16#include <linux/smp_lock.h>
17#include <linux/init.h> 16#include <linux/init.h>
18#include <linux/kernel.h> 17#include <linux/kernel.h>
19#include <linux/acct.h> 18#include <linux/acct.h>
diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
index aac8832e919..f22b12e7d33 100644
--- a/fs/ncpfs/dir.c
+++ b/fs/ncpfs/dir.c
@@ -19,7 +19,6 @@
19#include <linux/mm.h> 19#include <linux/mm.h>
20#include <asm/uaccess.h> 20#include <asm/uaccess.h>
21#include <asm/byteorder.h> 21#include <asm/byteorder.h>
22#include <linux/smp_lock.h>
23 22
24#include <linux/ncp_fs.h> 23#include <linux/ncp_fs.h>
25 24
diff --git a/fs/ncpfs/file.c b/fs/ncpfs/file.c
index 6c754f70c52..cb50aaf981d 100644
--- a/fs/ncpfs/file.c
+++ b/fs/ncpfs/file.c
@@ -17,7 +17,6 @@
17#include <linux/mm.h> 17#include <linux/mm.h>
18#include <linux/vmalloc.h> 18#include <linux/vmalloc.h>
19#include <linux/sched.h> 19#include <linux/sched.h>
20#include <linux/smp_lock.h>
21 20
22#include <linux/ncp_fs.h> 21#include <linux/ncp_fs.h>
23#include "ncplib_kernel.h" 22#include "ncplib_kernel.h"
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index d290545aa0c..8fb93b604e7 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -26,7 +26,6 @@
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/vmalloc.h> 27#include <linux/vmalloc.h>
28#include <linux/init.h> 28#include <linux/init.h>
29#include <linux/smp_lock.h>
30#include <linux/vfs.h> 29#include <linux/vfs.h>
31#include <linux/mount.h> 30#include <linux/mount.h>
32#include <linux/seq_file.h> 31#include <linux/seq_file.h>
diff --git a/fs/ncpfs/ioctl.c b/fs/ncpfs/ioctl.c
index c2a1f9a155c..d40a547e337 100644
--- a/fs/ncpfs/ioctl.c
+++ b/fs/ncpfs/ioctl.c
@@ -17,7 +17,6 @@
17#include <linux/mount.h> 17#include <linux/mount.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/highuid.h> 19#include <linux/highuid.h>
20#include <linux/smp_lock.h>
21#include <linux/vmalloc.h> 20#include <linux/vmalloc.h>
22#include <linux/sched.h> 21#include <linux/sched.h>
23 22
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index aeec017fe81..93a8b3bd69e 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -9,7 +9,6 @@
9#include <linux/completion.h> 9#include <linux/completion.h>
10#include <linux/ip.h> 10#include <linux/ip.h>
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/smp_lock.h>
13#include <linux/sunrpc/svc.h> 12#include <linux/sunrpc/svc.h>
14#include <linux/sunrpc/svcsock.h> 13#include <linux/sunrpc/svcsock.h>
15#include <linux/nfs_fs.h> 14#include <linux/nfs_fs.h>
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 232a7eead33..1fd62fc49be 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -11,7 +11,6 @@
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/sched.h> 12#include <linux/sched.h>
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <linux/smp_lock.h>
15#include <linux/spinlock.h> 14#include <linux/spinlock.h>
16 15
17#include <linux/nfs4.h> 16#include <linux/nfs4.h>
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 07ac3847e56..996dd8989a9 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -34,6 +34,7 @@
34#include <linux/mount.h> 34#include <linux/mount.h>
35#include <linux/sched.h> 35#include <linux/sched.h>
36#include <linux/vmalloc.h> 36#include <linux/vmalloc.h>
37#include <linux/kmemleak.h>
37 38
38#include "delegation.h" 39#include "delegation.h"
39#include "iostat.h" 40#include "iostat.h"
@@ -56,7 +57,7 @@ static int nfs_rename(struct inode *, struct dentry *,
56 struct inode *, struct dentry *); 57 struct inode *, struct dentry *);
57static int nfs_fsync_dir(struct file *, int); 58static int nfs_fsync_dir(struct file *, int);
58static loff_t nfs_llseek_dir(struct file *, loff_t, int); 59static loff_t nfs_llseek_dir(struct file *, loff_t, int);
59static int nfs_readdir_clear_array(struct page*, gfp_t); 60static void nfs_readdir_clear_array(struct page*);
60 61
61const struct file_operations nfs_dir_operations = { 62const struct file_operations nfs_dir_operations = {
62 .llseek = nfs_llseek_dir, 63 .llseek = nfs_llseek_dir,
@@ -82,8 +83,8 @@ const struct inode_operations nfs_dir_inode_operations = {
82 .setattr = nfs_setattr, 83 .setattr = nfs_setattr,
83}; 84};
84 85
85const struct address_space_operations nfs_dir_addr_space_ops = { 86const struct address_space_operations nfs_dir_aops = {
86 .releasepage = nfs_readdir_clear_array, 87 .freepage = nfs_readdir_clear_array,
87}; 88};
88 89
89#ifdef CONFIG_NFS_V3 90#ifdef CONFIG_NFS_V3
@@ -161,6 +162,7 @@ struct nfs_cache_array_entry {
161 u64 cookie; 162 u64 cookie;
162 u64 ino; 163 u64 ino;
163 struct qstr string; 164 struct qstr string;
165 unsigned char d_type;
164}; 166};
165 167
166struct nfs_cache_array { 168struct nfs_cache_array {
@@ -170,14 +172,13 @@ struct nfs_cache_array {
170 struct nfs_cache_array_entry array[0]; 172 struct nfs_cache_array_entry array[0];
171}; 173};
172 174
173#define MAX_READDIR_ARRAY ((PAGE_SIZE - sizeof(struct nfs_cache_array)) / sizeof(struct nfs_cache_array_entry))
174
175typedef __be32 * (*decode_dirent_t)(struct xdr_stream *, struct nfs_entry *, struct nfs_server *, int); 175typedef __be32 * (*decode_dirent_t)(struct xdr_stream *, struct nfs_entry *, struct nfs_server *, int);
176typedef struct { 176typedef struct {
177 struct file *file; 177 struct file *file;
178 struct page *page; 178 struct page *page;
179 unsigned long page_index; 179 unsigned long page_index;
180 u64 *dir_cookie; 180 u64 *dir_cookie;
181 u64 last_cookie;
181 loff_t current_index; 182 loff_t current_index;
182 decode_dirent_t decode; 183 decode_dirent_t decode;
183 184
@@ -194,9 +195,13 @@ typedef struct {
194static 195static
195struct nfs_cache_array *nfs_readdir_get_array(struct page *page) 196struct nfs_cache_array *nfs_readdir_get_array(struct page *page)
196{ 197{
198 void *ptr;
197 if (page == NULL) 199 if (page == NULL)
198 return ERR_PTR(-EIO); 200 return ERR_PTR(-EIO);
199 return (struct nfs_cache_array *)kmap(page); 201 ptr = kmap(page);
202 if (ptr == NULL)
203 return ERR_PTR(-ENOMEM);
204 return ptr;
200} 205}
201 206
202static 207static
@@ -209,14 +214,15 @@ void nfs_readdir_release_array(struct page *page)
209 * we are freeing strings created by nfs_add_to_readdir_array() 214 * we are freeing strings created by nfs_add_to_readdir_array()
210 */ 215 */
211static 216static
212int nfs_readdir_clear_array(struct page *page, gfp_t mask) 217void nfs_readdir_clear_array(struct page *page)
213{ 218{
214 struct nfs_cache_array *array = nfs_readdir_get_array(page); 219 struct nfs_cache_array *array;
215 int i; 220 int i;
221
222 array = kmap_atomic(page, KM_USER0);
216 for (i = 0; i < array->size; i++) 223 for (i = 0; i < array->size; i++)
217 kfree(array->array[i].string.name); 224 kfree(array->array[i].string.name);
218 nfs_readdir_release_array(page); 225 kunmap_atomic(array, KM_USER0);
219 return 0;
220} 226}
221 227
222/* 228/*
@@ -231,6 +237,11 @@ int nfs_readdir_make_qstr(struct qstr *string, const char *name, unsigned int le
231 string->name = kmemdup(name, len, GFP_KERNEL); 237 string->name = kmemdup(name, len, GFP_KERNEL);
232 if (string->name == NULL) 238 if (string->name == NULL)
233 return -ENOMEM; 239 return -ENOMEM;
240 /*
241 * Avoid a kmemleak false positive. The pointer to the name is stored
242 * in a page cache page which kmemleak does not scan.
243 */
244 kmemleak_not_leak(string->name);
234 string->hash = full_name_hash(name, len); 245 string->hash = full_name_hash(name, len);
235 return 0; 246 return 0;
236} 247}
@@ -244,20 +255,24 @@ int nfs_readdir_add_to_array(struct nfs_entry *entry, struct page *page)
244 255
245 if (IS_ERR(array)) 256 if (IS_ERR(array))
246 return PTR_ERR(array); 257 return PTR_ERR(array);
247 ret = -EIO;
248 if (array->size >= MAX_READDIR_ARRAY)
249 goto out;
250 258
251 cache_entry = &array->array[array->size]; 259 cache_entry = &array->array[array->size];
260
261 /* Check that this entry lies within the page bounds */
262 ret = -ENOSPC;
263 if ((char *)&cache_entry[1] - (char *)page_address(page) > PAGE_SIZE)
264 goto out;
265
252 cache_entry->cookie = entry->prev_cookie; 266 cache_entry->cookie = entry->prev_cookie;
253 cache_entry->ino = entry->ino; 267 cache_entry->ino = entry->ino;
268 cache_entry->d_type = entry->d_type;
254 ret = nfs_readdir_make_qstr(&cache_entry->string, entry->name, entry->len); 269 ret = nfs_readdir_make_qstr(&cache_entry->string, entry->name, entry->len);
255 if (ret) 270 if (ret)
256 goto out; 271 goto out;
257 array->last_cookie = entry->cookie; 272 array->last_cookie = entry->cookie;
258 if (entry->eof == 1)
259 array->eof_index = array->size;
260 array->size++; 273 array->size++;
274 if (entry->eof != 0)
275 array->eof_index = array->size;
261out: 276out:
262 nfs_readdir_release_array(page); 277 nfs_readdir_release_array(page);
263 return ret; 278 return ret;
@@ -272,7 +287,7 @@ int nfs_readdir_search_for_pos(struct nfs_cache_array *array, nfs_readdir_descri
272 if (diff < 0) 287 if (diff < 0)
273 goto out_eof; 288 goto out_eof;
274 if (diff >= array->size) { 289 if (diff >= array->size) {
275 if (array->eof_index > 0) 290 if (array->eof_index >= 0)
276 goto out_eof; 291 goto out_eof;
277 desc->current_index += array->size; 292 desc->current_index += array->size;
278 return -EAGAIN; 293 return -EAGAIN;
@@ -281,8 +296,6 @@ int nfs_readdir_search_for_pos(struct nfs_cache_array *array, nfs_readdir_descri
281 index = (unsigned int)diff; 296 index = (unsigned int)diff;
282 *desc->dir_cookie = array->array[index].cookie; 297 *desc->dir_cookie = array->array[index].cookie;
283 desc->cache_entry_index = index; 298 desc->cache_entry_index = index;
284 if (index == array->eof_index)
285 desc->eof = 1;
286 return 0; 299 return 0;
287out_eof: 300out_eof:
288 desc->eof = 1; 301 desc->eof = 1;
@@ -296,17 +309,16 @@ int nfs_readdir_search_for_cookie(struct nfs_cache_array *array, nfs_readdir_des
296 int status = -EAGAIN; 309 int status = -EAGAIN;
297 310
298 for (i = 0; i < array->size; i++) { 311 for (i = 0; i < array->size; i++) {
299 if (i == array->eof_index) {
300 desc->eof = 1;
301 status = -EBADCOOKIE;
302 }
303 if (array->array[i].cookie == *desc->dir_cookie) { 312 if (array->array[i].cookie == *desc->dir_cookie) {
304 desc->cache_entry_index = i; 313 desc->cache_entry_index = i;
305 status = 0; 314 return 0;
306 break;
307 } 315 }
308 } 316 }
309 317 if (array->eof_index >= 0) {
318 status = -EBADCOOKIE;
319 if (*desc->dir_cookie == array->last_cookie)
320 desc->eof = 1;
321 }
310 return status; 322 return status;
311} 323}
312 324
@@ -314,10 +326,7 @@ static
314int nfs_readdir_search_array(nfs_readdir_descriptor_t *desc) 326int nfs_readdir_search_array(nfs_readdir_descriptor_t *desc)
315{ 327{
316 struct nfs_cache_array *array; 328 struct nfs_cache_array *array;
317 int status = -EBADCOOKIE; 329 int status;
318
319 if (desc->dir_cookie == NULL)
320 goto out;
321 330
322 array = nfs_readdir_get_array(desc->page); 331 array = nfs_readdir_get_array(desc->page);
323 if (IS_ERR(array)) { 332 if (IS_ERR(array)) {
@@ -330,6 +339,10 @@ int nfs_readdir_search_array(nfs_readdir_descriptor_t *desc)
330 else 339 else
331 status = nfs_readdir_search_for_cookie(array, desc); 340 status = nfs_readdir_search_for_cookie(array, desc);
332 341
342 if (status == -EAGAIN) {
343 desc->last_cookie = array->last_cookie;
344 desc->page_index++;
345 }
333 nfs_readdir_release_array(desc->page); 346 nfs_readdir_release_array(desc->page);
334out: 347out:
335 return status; 348 return status;
@@ -381,13 +394,9 @@ int xdr_decode(nfs_readdir_descriptor_t *desc, struct nfs_entry *entry, struct x
381static 394static
382int nfs_same_file(struct dentry *dentry, struct nfs_entry *entry) 395int nfs_same_file(struct dentry *dentry, struct nfs_entry *entry)
383{ 396{
384 struct nfs_inode *node;
385 if (dentry->d_inode == NULL) 397 if (dentry->d_inode == NULL)
386 goto different; 398 goto different;
387 node = NFS_I(dentry->d_inode); 399 if (nfs_compare_fh(entry->fh, NFS_FH(dentry->d_inode)) != 0)
388 if (node->fh.size != entry->fh->size)
389 goto different;
390 if (strncmp(node->fh.data, entry->fh->data, node->fh.size) != 0)
391 goto different; 400 goto different;
392 return 1; 401 return 1;
393different: 402different:
@@ -449,14 +458,15 @@ out:
449 458
450/* Perform conversion from xdr to cache array */ 459/* Perform conversion from xdr to cache array */
451static 460static
452void nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *entry, 461int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *entry,
453 void *xdr_page, struct page *page, unsigned int buflen) 462 void *xdr_page, struct page *page, unsigned int buflen)
454{ 463{
455 struct xdr_stream stream; 464 struct xdr_stream stream;
456 struct xdr_buf buf; 465 struct xdr_buf buf;
457 __be32 *ptr = xdr_page; 466 __be32 *ptr = xdr_page;
458 int status;
459 struct nfs_cache_array *array; 467 struct nfs_cache_array *array;
468 unsigned int count = 0;
469 int status;
460 470
461 buf.head->iov_base = xdr_page; 471 buf.head->iov_base = xdr_page;
462 buf.head->iov_len = buflen; 472 buf.head->iov_len = buflen;
@@ -471,21 +481,32 @@ void nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *e
471 481
472 do { 482 do {
473 status = xdr_decode(desc, entry, &stream); 483 status = xdr_decode(desc, entry, &stream);
474 if (status != 0) 484 if (status != 0) {
485 if (status == -EAGAIN)
486 status = 0;
475 break; 487 break;
488 }
476 489
477 if (nfs_readdir_add_to_array(entry, page) == -1) 490 count++;
478 break; 491
479 if (desc->plus == 1) 492 if (desc->plus != 0)
480 nfs_prime_dcache(desc->file->f_path.dentry, entry); 493 nfs_prime_dcache(desc->file->f_path.dentry, entry);
494
495 status = nfs_readdir_add_to_array(entry, page);
496 if (status != 0)
497 break;
481 } while (!entry->eof); 498 } while (!entry->eof);
482 499
483 if (status == -EBADCOOKIE && entry->eof) { 500 if (count == 0 || (status == -EBADCOOKIE && entry->eof != 0)) {
484 array = nfs_readdir_get_array(page); 501 array = nfs_readdir_get_array(page);
485 array->eof_index = array->size - 1; 502 if (!IS_ERR(array)) {
486 status = 0; 503 array->eof_index = array->size;
487 nfs_readdir_release_array(page); 504 status = 0;
505 nfs_readdir_release_array(page);
506 } else
507 status = PTR_ERR(array);
488 } 508 }
509 return status;
489} 510}
490 511
491static 512static
@@ -537,11 +558,11 @@ int nfs_readdir_xdr_to_array(nfs_readdir_descriptor_t *desc, struct page *page,
537 struct nfs_entry entry; 558 struct nfs_entry entry;
538 struct file *file = desc->file; 559 struct file *file = desc->file;
539 struct nfs_cache_array *array; 560 struct nfs_cache_array *array;
540 int status = 0; 561 int status = -ENOMEM;
541 unsigned int array_size = ARRAY_SIZE(pages); 562 unsigned int array_size = ARRAY_SIZE(pages);
542 563
543 entry.prev_cookie = 0; 564 entry.prev_cookie = 0;
544 entry.cookie = *desc->dir_cookie; 565 entry.cookie = desc->last_cookie;
545 entry.eof = 0; 566 entry.eof = 0;
546 entry.fh = nfs_alloc_fhandle(); 567 entry.fh = nfs_alloc_fhandle();
547 entry.fattr = nfs_alloc_fattr(); 568 entry.fattr = nfs_alloc_fattr();
@@ -549,6 +570,10 @@ int nfs_readdir_xdr_to_array(nfs_readdir_descriptor_t *desc, struct page *page,
549 goto out; 570 goto out;
550 571
551 array = nfs_readdir_get_array(page); 572 array = nfs_readdir_get_array(page);
573 if (IS_ERR(array)) {
574 status = PTR_ERR(array);
575 goto out;
576 }
552 memset(array, 0, sizeof(struct nfs_cache_array)); 577 memset(array, 0, sizeof(struct nfs_cache_array));
553 array->eof_index = -1; 578 array->eof_index = -1;
554 579
@@ -556,12 +581,19 @@ int nfs_readdir_xdr_to_array(nfs_readdir_descriptor_t *desc, struct page *page,
556 if (!pages_ptr) 581 if (!pages_ptr)
557 goto out_release_array; 582 goto out_release_array;
558 do { 583 do {
584 unsigned int pglen;
559 status = nfs_readdir_xdr_filler(pages, desc, &entry, file, inode); 585 status = nfs_readdir_xdr_filler(pages, desc, &entry, file, inode);
560 586
561 if (status < 0) 587 if (status < 0)
562 break; 588 break;
563 nfs_readdir_page_filler(desc, &entry, pages_ptr, page, array_size * PAGE_SIZE); 589 pglen = status;
564 } while (array->eof_index < 0 && array->size < MAX_READDIR_ARRAY); 590 status = nfs_readdir_page_filler(desc, &entry, pages_ptr, page, pglen);
591 if (status < 0) {
592 if (status == -ENOSPC)
593 status = 0;
594 break;
595 }
596 } while (array->eof_index < 0);
565 597
566 nfs_readdir_free_large_page(pages_ptr, pages, array_size); 598 nfs_readdir_free_large_page(pages_ptr, pages, array_size);
567out_release_array: 599out_release_array:
@@ -582,8 +614,10 @@ static
582int nfs_readdir_filler(nfs_readdir_descriptor_t *desc, struct page* page) 614int nfs_readdir_filler(nfs_readdir_descriptor_t *desc, struct page* page)
583{ 615{
584 struct inode *inode = desc->file->f_path.dentry->d_inode; 616 struct inode *inode = desc->file->f_path.dentry->d_inode;
617 int ret;
585 618
586 if (nfs_readdir_xdr_to_array(desc, page, inode) < 0) 619 ret = nfs_readdir_xdr_to_array(desc, page, inode);
620 if (ret < 0)
587 goto error; 621 goto error;
588 SetPageUptodate(page); 622 SetPageUptodate(page);
589 623
@@ -595,12 +629,14 @@ int nfs_readdir_filler(nfs_readdir_descriptor_t *desc, struct page* page)
595 return 0; 629 return 0;
596 error: 630 error:
597 unlock_page(page); 631 unlock_page(page);
598 return -EIO; 632 return ret;
599} 633}
600 634
601static 635static
602void cache_page_release(nfs_readdir_descriptor_t *desc) 636void cache_page_release(nfs_readdir_descriptor_t *desc)
603{ 637{
638 if (!desc->page->mapping)
639 nfs_readdir_clear_array(desc->page);
604 page_cache_release(desc->page); 640 page_cache_release(desc->page);
605 desc->page = NULL; 641 desc->page = NULL;
606} 642}
@@ -608,12 +644,8 @@ void cache_page_release(nfs_readdir_descriptor_t *desc)
608static 644static
609struct page *get_cache_page(nfs_readdir_descriptor_t *desc) 645struct page *get_cache_page(nfs_readdir_descriptor_t *desc)
610{ 646{
611 struct page *page; 647 return read_cache_page(desc->file->f_path.dentry->d_inode->i_mapping,
612 page = read_cache_page(desc->file->f_path.dentry->d_inode->i_mapping,
613 desc->page_index, (filler_t *)nfs_readdir_filler, desc); 648 desc->page_index, (filler_t *)nfs_readdir_filler, desc);
614 if (IS_ERR(page))
615 desc->eof = 1;
616 return page;
617} 649}
618 650
619/* 651/*
@@ -629,9 +661,8 @@ int find_cache_page(nfs_readdir_descriptor_t *desc)
629 return PTR_ERR(desc->page); 661 return PTR_ERR(desc->page);
630 662
631 res = nfs_readdir_search_array(desc); 663 res = nfs_readdir_search_array(desc);
632 if (res == 0) 664 if (res != 0)
633 return 0; 665 cache_page_release(desc);
634 cache_page_release(desc);
635 return res; 666 return res;
636} 667}
637 668
@@ -639,22 +670,18 @@ int find_cache_page(nfs_readdir_descriptor_t *desc)
639static inline 670static inline
640int readdir_search_pagecache(nfs_readdir_descriptor_t *desc) 671int readdir_search_pagecache(nfs_readdir_descriptor_t *desc)
641{ 672{
642 int res = -EAGAIN; 673 int res;
643 674
644 while (1) { 675 if (desc->page_index == 0) {
645 res = find_cache_page(desc); 676 desc->current_index = 0;
646 if (res != -EAGAIN) 677 desc->last_cookie = 0;
647 break;
648 desc->page_index++;
649 } 678 }
679 do {
680 res = find_cache_page(desc);
681 } while (res == -EAGAIN);
650 return res; 682 return res;
651} 683}
652 684
653static inline unsigned int dt_type(struct inode *inode)
654{
655 return (inode->i_mode >> 12) & 15;
656}
657
658/* 685/*
659 * Once we've found the start of the dirent within a page: fill 'er up... 686 * Once we've found the start of the dirent within a page: fill 'er up...
660 */ 687 */
@@ -666,35 +693,35 @@ int nfs_do_filldir(nfs_readdir_descriptor_t *desc, void *dirent,
666 int i = 0; 693 int i = 0;
667 int res = 0; 694 int res = 0;
668 struct nfs_cache_array *array = NULL; 695 struct nfs_cache_array *array = NULL;
669 unsigned int d_type = DT_UNKNOWN;
670 struct dentry *dentry = NULL;
671 696
672 array = nfs_readdir_get_array(desc->page); 697 array = nfs_readdir_get_array(desc->page);
698 if (IS_ERR(array)) {
699 res = PTR_ERR(array);
700 goto out;
701 }
673 702
674 for (i = desc->cache_entry_index; i < array->size; i++) { 703 for (i = desc->cache_entry_index; i < array->size; i++) {
675 d_type = DT_UNKNOWN; 704 struct nfs_cache_array_entry *ent;
676 705
677 res = filldir(dirent, array->array[i].string.name, 706 ent = &array->array[i];
678 array->array[i].string.len, file->f_pos, 707 if (filldir(dirent, ent->string.name, ent->string.len,
679 nfs_compat_user_ino64(array->array[i].ino), d_type); 708 file->f_pos, nfs_compat_user_ino64(ent->ino),
680 if (res < 0) 709 ent->d_type) < 0) {
710 desc->eof = 1;
681 break; 711 break;
712 }
682 file->f_pos++; 713 file->f_pos++;
683 desc->cache_entry_index = i;
684 if (i < (array->size-1)) 714 if (i < (array->size-1))
685 *desc->dir_cookie = array->array[i+1].cookie; 715 *desc->dir_cookie = array->array[i+1].cookie;
686 else 716 else
687 *desc->dir_cookie = array->last_cookie; 717 *desc->dir_cookie = array->last_cookie;
688 if (i == array->eof_index) {
689 desc->eof = 1;
690 break;
691 }
692 } 718 }
719 if (array->eof_index >= 0)
720 desc->eof = 1;
693 721
694 nfs_readdir_release_array(desc->page); 722 nfs_readdir_release_array(desc->page);
723out:
695 cache_page_release(desc); 724 cache_page_release(desc);
696 if (dentry != NULL)
697 dput(dentry);
698 dfprintk(DIRCACHE, "NFS: nfs_do_filldir() filling ended @ cookie %Lu; returning = %d\n", 725 dfprintk(DIRCACHE, "NFS: nfs_do_filldir() filling ended @ cookie %Lu; returning = %d\n",
699 (unsigned long long)*desc->dir_cookie, res); 726 (unsigned long long)*desc->dir_cookie, res);
700 return res; 727 return res;
@@ -729,13 +756,14 @@ int uncached_readdir(nfs_readdir_descriptor_t *desc, void *dirent,
729 goto out; 756 goto out;
730 } 757 }
731 758
732 if (nfs_readdir_xdr_to_array(desc, page, inode) == -1) {
733 status = -EIO;
734 goto out_release;
735 }
736
737 desc->page_index = 0; 759 desc->page_index = 0;
760 desc->last_cookie = *desc->dir_cookie;
738 desc->page = page; 761 desc->page = page;
762
763 status = nfs_readdir_xdr_to_array(desc, page, inode);
764 if (status < 0)
765 goto out_release;
766
739 status = nfs_do_filldir(desc, dirent, filldir); 767 status = nfs_do_filldir(desc, dirent, filldir);
740 768
741 out: 769 out:
@@ -757,7 +785,7 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
757 struct inode *inode = dentry->d_inode; 785 struct inode *inode = dentry->d_inode;
758 nfs_readdir_descriptor_t my_desc, 786 nfs_readdir_descriptor_t my_desc,
759 *desc = &my_desc; 787 *desc = &my_desc;
760 int res = -ENOMEM; 788 int res;
761 789
762 dfprintk(FILE, "NFS: readdir(%s/%s) starting at cookie %llu\n", 790 dfprintk(FILE, "NFS: readdir(%s/%s) starting at cookie %llu\n",
763 dentry->d_parent->d_name.name, dentry->d_name.name, 791 dentry->d_parent->d_name.name, dentry->d_name.name,
@@ -782,18 +810,18 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
782 if (res < 0) 810 if (res < 0)
783 goto out; 811 goto out;
784 812
785 while (desc->eof != 1) { 813 do {
786 res = readdir_search_pagecache(desc); 814 res = readdir_search_pagecache(desc);
787 815
788 if (res == -EBADCOOKIE) { 816 if (res == -EBADCOOKIE) {
817 res = 0;
789 /* This means either end of directory */ 818 /* This means either end of directory */
790 if (*desc->dir_cookie && desc->eof == 0) { 819 if (*desc->dir_cookie && desc->eof == 0) {
791 /* Or that the server has 'lost' a cookie */ 820 /* Or that the server has 'lost' a cookie */
792 res = uncached_readdir(desc, dirent, filldir); 821 res = uncached_readdir(desc, dirent, filldir);
793 if (res >= 0) 822 if (res == 0)
794 continue; 823 continue;
795 } 824 }
796 res = 0;
797 break; 825 break;
798 } 826 }
799 if (res == -ETOOSMALL && desc->plus) { 827 if (res == -ETOOSMALL && desc->plus) {
@@ -808,11 +836,9 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
808 break; 836 break;
809 837
810 res = nfs_do_filldir(desc, dirent, filldir); 838 res = nfs_do_filldir(desc, dirent, filldir);
811 if (res < 0) { 839 if (res < 0)
812 res = 0;
813 break; 840 break;
814 } 841 } while (!desc->eof);
815 }
816out: 842out:
817 nfs_unblock_sillyrename(dentry); 843 nfs_unblock_sillyrename(dentry);
818 if (res > 0) 844 if (res > 0)
@@ -1345,12 +1371,12 @@ static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry
1345 res = NULL; 1371 res = NULL;
1346 goto out; 1372 goto out;
1347 /* This turned out not to be a regular file */ 1373 /* This turned out not to be a regular file */
1348 case -EISDIR:
1349 case -ENOTDIR: 1374 case -ENOTDIR:
1350 goto no_open; 1375 goto no_open;
1351 case -ELOOP: 1376 case -ELOOP:
1352 if (!(nd->intent.open.flags & O_NOFOLLOW)) 1377 if (!(nd->intent.open.flags & O_NOFOLLOW))
1353 goto no_open; 1378 goto no_open;
1379 /* case -EISDIR: */
1354 /* case -EINVAL: */ 1380 /* case -EINVAL: */
1355 default: 1381 default:
1356 res = ERR_CAST(inode); 1382 res = ERR_CAST(inode);
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 84d3c8b9020..e6ace0d93c7 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -867,7 +867,7 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
867 goto out; 867 goto out;
868 nfs_alloc_commit_data(dreq); 868 nfs_alloc_commit_data(dreq);
869 869
870 if (dreq->commit_data == NULL || count < wsize) 870 if (dreq->commit_data == NULL || count <= wsize)
871 sync = NFS_FILE_SYNC; 871 sync = NFS_FILE_SYNC;
872 872
873 dreq->inode = inode; 873 dreq->inode = inode;
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 60677f9f131..7bf029ef408 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -693,6 +693,7 @@ do_getlk(struct file *filp, int cmd, struct file_lock *fl, int is_local)
693{ 693{
694 struct inode *inode = filp->f_mapping->host; 694 struct inode *inode = filp->f_mapping->host;
695 int status = 0; 695 int status = 0;
696 unsigned int saved_type = fl->fl_type;
696 697
697 /* Try local locking first */ 698 /* Try local locking first */
698 posix_test_lock(filp, fl); 699 posix_test_lock(filp, fl);
@@ -700,6 +701,7 @@ do_getlk(struct file *filp, int cmd, struct file_lock *fl, int is_local)
700 /* found a conflict */ 701 /* found a conflict */
701 goto out; 702 goto out;
702 } 703 }
704 fl->fl_type = saved_type;
703 705
704 if (nfs_have_delegation(inode, FMODE_READ)) 706 if (nfs_have_delegation(inode, FMODE_READ))
705 goto out_noconflict; 707 goto out_noconflict;
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 314f5716460..e67e31c7341 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -289,6 +289,7 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
289 } else if (S_ISDIR(inode->i_mode)) { 289 } else if (S_ISDIR(inode->i_mode)) {
290 inode->i_op = NFS_SB(sb)->nfs_client->rpc_ops->dir_inode_ops; 290 inode->i_op = NFS_SB(sb)->nfs_client->rpc_ops->dir_inode_ops;
291 inode->i_fop = &nfs_dir_operations; 291 inode->i_fop = &nfs_dir_operations;
292 inode->i_data.a_ops = &nfs_dir_aops;
292 if (nfs_server_capable(inode, NFS_CAP_READDIRPLUS)) 293 if (nfs_server_capable(inode, NFS_CAP_READDIRPLUS))
293 set_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(inode)->flags); 294 set_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(inode)->flags);
294 /* Deal with crossing mountpoints */ 295 /* Deal with crossing mountpoints */
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index db08ff3ff45..e6356b750b7 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -362,6 +362,15 @@ unsigned int nfs_page_length(struct page *page)
362} 362}
363 363
364/* 364/*
365 * Convert a umode to a dirent->d_type
366 */
367static inline
368unsigned char nfs_umode_to_dtype(umode_t mode)
369{
370 return (mode >> 12) & 15;
371}
372
373/*
365 * Determine the number of pages in an array of length 'len' and 374 * Determine the number of pages in an array of length 'len' and
366 * with a base offset of 'base' 375 * with a base offset of 'base'
367 */ 376 */
diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c
index eceafe74f47..4f981f1f668 100644
--- a/fs/nfs/mount_clnt.c
+++ b/fs/nfs/mount_clnt.c
@@ -505,13 +505,13 @@ static struct rpc_procinfo mnt3_procedures[] = {
505 505
506static struct rpc_version mnt_version1 = { 506static struct rpc_version mnt_version1 = {
507 .number = 1, 507 .number = 1,
508 .nrprocs = 2, 508 .nrprocs = ARRAY_SIZE(mnt_procedures),
509 .procs = mnt_procedures, 509 .procs = mnt_procedures,
510}; 510};
511 511
512static struct rpc_version mnt_version3 = { 512static struct rpc_version mnt_version3 = {
513 .number = 3, 513 .number = 3,
514 .nrprocs = 2, 514 .nrprocs = ARRAY_SIZE(mnt3_procedures),
515 .procs = mnt3_procedures, 515 .procs = mnt3_procedures,
516}; 516};
517 517
diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c
index e6bf45710cc..5914a1911c9 100644
--- a/fs/nfs/nfs2xdr.c
+++ b/fs/nfs/nfs2xdr.c
@@ -423,7 +423,7 @@ nfs_xdr_readdirres(struct rpc_rqst *req, __be32 *p, void *dummy)
423 struct page **page; 423 struct page **page;
424 size_t hdrlen; 424 size_t hdrlen;
425 unsigned int pglen, recvd; 425 unsigned int pglen, recvd;
426 int status, nr = 0; 426 int status;
427 427
428 if ((status = ntohl(*p++))) 428 if ((status = ntohl(*p++)))
429 return nfs_stat_to_errno(status); 429 return nfs_stat_to_errno(status);
@@ -443,7 +443,7 @@ nfs_xdr_readdirres(struct rpc_rqst *req, __be32 *p, void *dummy)
443 if (pglen > recvd) 443 if (pglen > recvd)
444 pglen = recvd; 444 pglen = recvd;
445 page = rcvbuf->pages; 445 page = rcvbuf->pages;
446 return nr; 446 return pglen;
447} 447}
448 448
449static void print_overflow_msg(const char *func, const struct xdr_stream *xdr) 449static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
@@ -485,6 +485,8 @@ nfs_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, struct nfs_se
485 entry->prev_cookie = entry->cookie; 485 entry->prev_cookie = entry->cookie;
486 entry->cookie = ntohl(*p++); 486 entry->cookie = ntohl(*p++);
487 487
488 entry->d_type = DT_UNKNOWN;
489
488 p = xdr_inline_peek(xdr, 8); 490 p = xdr_inline_peek(xdr, 8);
489 if (p != NULL) 491 if (p != NULL)
490 entry->eof = !p[0] && p[1]; 492 entry->eof = !p[0] && p[1];
@@ -495,7 +497,7 @@ nfs_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, struct nfs_se
495 497
496out_overflow: 498out_overflow:
497 print_overflow_msg(__func__, xdr); 499 print_overflow_msg(__func__, xdr);
498 return ERR_PTR(-EIO); 500 return ERR_PTR(-EAGAIN);
499} 501}
500 502
501/* 503/*
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
index d9a5e832c25..f6cc60f06da 100644
--- a/fs/nfs/nfs3xdr.c
+++ b/fs/nfs/nfs3xdr.c
@@ -555,7 +555,7 @@ nfs3_xdr_readdirres(struct rpc_rqst *req, __be32 *p, struct nfs3_readdirres *res
555 struct page **page; 555 struct page **page;
556 size_t hdrlen; 556 size_t hdrlen;
557 u32 recvd, pglen; 557 u32 recvd, pglen;
558 int status, nr = 0; 558 int status;
559 559
560 status = ntohl(*p++); 560 status = ntohl(*p++);
561 /* Decode post_op_attrs */ 561 /* Decode post_op_attrs */
@@ -586,7 +586,7 @@ nfs3_xdr_readdirres(struct rpc_rqst *req, __be32 *p, struct nfs3_readdirres *res
586 pglen = recvd; 586 pglen = recvd;
587 page = rcvbuf->pages; 587 page = rcvbuf->pages;
588 588
589 return nr; 589 return pglen;
590} 590}
591 591
592__be32 * 592__be32 *
@@ -622,11 +622,13 @@ nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, struct nfs_s
622 entry->prev_cookie = entry->cookie; 622 entry->prev_cookie = entry->cookie;
623 p = xdr_decode_hyper(p, &entry->cookie); 623 p = xdr_decode_hyper(p, &entry->cookie);
624 624
625 entry->d_type = DT_UNKNOWN;
625 if (plus) { 626 if (plus) {
626 entry->fattr->valid = 0; 627 entry->fattr->valid = 0;
627 p = xdr_decode_post_op_attr_stream(xdr, entry->fattr); 628 p = xdr_decode_post_op_attr_stream(xdr, entry->fattr);
628 if (IS_ERR(p)) 629 if (IS_ERR(p))
629 goto out_overflow_exit; 630 goto out_overflow_exit;
631 entry->d_type = nfs_umode_to_dtype(entry->fattr->mode);
630 /* In fact, a post_op_fh3: */ 632 /* In fact, a post_op_fh3: */
631 p = xdr_inline_decode(xdr, 4); 633 p = xdr_inline_decode(xdr, 4);
632 if (unlikely(!p)) 634 if (unlikely(!p))
@@ -656,7 +658,7 @@ nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, struct nfs_s
656out_overflow: 658out_overflow:
657 print_overflow_msg(__func__, xdr); 659 print_overflow_msg(__func__, xdr);
658out_overflow_exit: 660out_overflow_exit:
659 return ERR_PTR(-EIO); 661 return ERR_PTR(-EAGAIN);
660} 662}
661 663
662/* 664/*
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 0f24cdf2cb1..4435e5e1f90 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2852,8 +2852,10 @@ static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
2852 nfs4_setup_readdir(cookie, NFS_COOKIEVERF(dir), dentry, &args); 2852 nfs4_setup_readdir(cookie, NFS_COOKIEVERF(dir), dentry, &args);
2853 res.pgbase = args.pgbase; 2853 res.pgbase = args.pgbase;
2854 status = nfs4_call_sync(NFS_SERVER(dir), &msg, &args, &res, 0); 2854 status = nfs4_call_sync(NFS_SERVER(dir), &msg, &args, &res, 0);
2855 if (status == 0) 2855 if (status >= 0) {
2856 memcpy(NFS_COOKIEVERF(dir), res.verifier.data, NFS4_VERIFIER_SIZE); 2856 memcpy(NFS_COOKIEVERF(dir), res.verifier.data, NFS4_VERIFIER_SIZE);
2857 status += args.pgbase;
2858 }
2857 2859
2858 nfs_invalidate_atime(dir); 2860 nfs_invalidate_atime(dir);
2859 2861
@@ -3359,6 +3361,8 @@ static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
3359 ret = nfs_revalidate_inode(server, inode); 3361 ret = nfs_revalidate_inode(server, inode);
3360 if (ret < 0) 3362 if (ret < 0)
3361 return ret; 3363 return ret;
3364 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
3365 nfs_zap_acl_cache(inode);
3362 ret = nfs4_read_cached_acl(inode, buf, buflen); 3366 ret = nfs4_read_cached_acl(inode, buf, buflen);
3363 if (ret != -ENOENT) 3367 if (ret != -ENOENT)
3364 return ret; 3368 return ret;
@@ -3387,6 +3391,13 @@ static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t bufl
3387 nfs_inode_return_delegation(inode); 3391 nfs_inode_return_delegation(inode);
3388 buf_to_pages(buf, buflen, arg.acl_pages, &arg.acl_pgbase); 3392 buf_to_pages(buf, buflen, arg.acl_pages, &arg.acl_pgbase);
3389 ret = nfs4_call_sync(server, &msg, &arg, &res, 1); 3393 ret = nfs4_call_sync(server, &msg, &arg, &res, 1);
3394 /*
3395 * Acl update can result in inode attribute update.
3396 * so mark the attribute cache invalid.
3397 */
3398 spin_lock(&inode->i_lock);
3399 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR;
3400 spin_unlock(&inode->i_lock);
3390 nfs_access_zap_cache(inode); 3401 nfs_access_zap_cache(inode);
3391 nfs_zap_acl_cache(inode); 3402 nfs_zap_acl_cache(inode);
3392 return ret; 3403 return ret;
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index f313c4cce7e..9f1826b012e 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -4518,7 +4518,7 @@ static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct n
4518 xdr_read_pages(xdr, pglen); 4518 xdr_read_pages(xdr, pglen);
4519 4519
4520 4520
4521 return 0; 4521 return pglen;
4522} 4522}
4523 4523
4524static int decode_readlink(struct xdr_stream *xdr, struct rpc_rqst *req) 4524static int decode_readlink(struct xdr_stream *xdr, struct rpc_rqst *req)
@@ -6208,6 +6208,10 @@ __be32 *nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
6208 if (entry->fattr->valid & NFS_ATTR_FATTR_FILEID) 6208 if (entry->fattr->valid & NFS_ATTR_FATTR_FILEID)
6209 entry->ino = entry->fattr->fileid; 6209 entry->ino = entry->fattr->fileid;
6210 6210
6211 entry->d_type = DT_UNKNOWN;
6212 if (entry->fattr->valid & NFS_ATTR_FATTR_TYPE)
6213 entry->d_type = nfs_umode_to_dtype(entry->fattr->mode);
6214
6211 if (verify_attr_len(xdr, p, len) < 0) 6215 if (verify_attr_len(xdr, p, len) < 0)
6212 goto out_overflow; 6216 goto out_overflow;
6213 6217
@@ -6221,7 +6225,7 @@ __be32 *nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
6221 6225
6222out_overflow: 6226out_overflow:
6223 print_overflow_msg(__func__, xdr); 6227 print_overflow_msg(__func__, xdr);
6224 return ERR_PTR(-EIO); 6228 return ERR_PTR(-EAGAIN);
6225} 6229}
6226 6230
6227/* 6231/*
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 137b549e63d..b68536cc904 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -115,7 +115,7 @@ int nfs_set_page_tag_locked(struct nfs_page *req)
115{ 115{
116 if (!nfs_lock_request_dontget(req)) 116 if (!nfs_lock_request_dontget(req))
117 return 0; 117 return 0;
118 if (req->wb_page != NULL) 118 if (test_bit(PG_MAPPED, &req->wb_flags))
119 radix_tree_tag_set(&NFS_I(req->wb_context->path.dentry->d_inode)->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED); 119 radix_tree_tag_set(&NFS_I(req->wb_context->path.dentry->d_inode)->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
120 return 1; 120 return 1;
121} 121}
@@ -125,7 +125,7 @@ int nfs_set_page_tag_locked(struct nfs_page *req)
125 */ 125 */
126void nfs_clear_page_tag_locked(struct nfs_page *req) 126void nfs_clear_page_tag_locked(struct nfs_page *req)
127{ 127{
128 if (req->wb_page != NULL) { 128 if (test_bit(PG_MAPPED, &req->wb_flags)) {
129 struct inode *inode = req->wb_context->path.dentry->d_inode; 129 struct inode *inode = req->wb_context->path.dentry->d_inode;
130 struct nfs_inode *nfsi = NFS_I(inode); 130 struct nfs_inode *nfsi = NFS_I(inode);
131 131
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index e4b62c6f5a6..aedcaa7f291 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -152,7 +152,6 @@ static void nfs_readpage_release(struct nfs_page *req)
152 (long long)NFS_FILEID(req->wb_context->path.dentry->d_inode), 152 (long long)NFS_FILEID(req->wb_context->path.dentry->d_inode),
153 req->wb_bytes, 153 req->wb_bytes,
154 (long long)req_offset(req)); 154 (long long)req_offset(req));
155 nfs_clear_request(req);
156 nfs_release_request(req); 155 nfs_release_request(req);
157} 156}
158 157
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 0a42e8f4adc..4100630c9a5 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -39,7 +39,6 @@
39#include <linux/nfs_mount.h> 39#include <linux/nfs_mount.h>
40#include <linux/nfs4_mount.h> 40#include <linux/nfs4_mount.h>
41#include <linux/lockd/bind.h> 41#include <linux/lockd/bind.h>
42#include <linux/smp_lock.h>
43#include <linux/seq_file.h> 42#include <linux/seq_file.h>
44#include <linux/mount.h> 43#include <linux/mount.h>
45#include <linux/mnt_namespace.h> 44#include <linux/mnt_namespace.h>
@@ -67,6 +66,12 @@
67 66
68#define NFSDBG_FACILITY NFSDBG_VFS 67#define NFSDBG_FACILITY NFSDBG_VFS
69 68
69#ifdef CONFIG_NFS_V3
70#define NFS_DEFAULT_VERSION 3
71#else
72#define NFS_DEFAULT_VERSION 2
73#endif
74
70enum { 75enum {
71 /* Mount options that take no arguments */ 76 /* Mount options that take no arguments */
72 Opt_soft, Opt_hard, 77 Opt_soft, Opt_hard,
@@ -1064,12 +1069,10 @@ static int nfs_parse_mount_options(char *raw,
1064 mnt->flags |= NFS_MOUNT_VER3; 1069 mnt->flags |= NFS_MOUNT_VER3;
1065 mnt->version = 3; 1070 mnt->version = 3;
1066 break; 1071 break;
1067#ifdef CONFIG_NFS_V4
1068 case Opt_v4: 1072 case Opt_v4:
1069 mnt->flags &= ~NFS_MOUNT_VER3; 1073 mnt->flags &= ~NFS_MOUNT_VER3;
1070 mnt->version = 4; 1074 mnt->version = 4;
1071 break; 1075 break;
1072#endif
1073 case Opt_udp: 1076 case Opt_udp:
1074 mnt->flags &= ~NFS_MOUNT_TCP; 1077 mnt->flags &= ~NFS_MOUNT_TCP;
1075 mnt->nfs_server.protocol = XPRT_TRANSPORT_UDP; 1078 mnt->nfs_server.protocol = XPRT_TRANSPORT_UDP;
@@ -1281,12 +1284,10 @@ static int nfs_parse_mount_options(char *raw,
1281 mnt->flags |= NFS_MOUNT_VER3; 1284 mnt->flags |= NFS_MOUNT_VER3;
1282 mnt->version = 3; 1285 mnt->version = 3;
1283 break; 1286 break;
1284#ifdef CONFIG_NFS_V4
1285 case NFS4_VERSION: 1287 case NFS4_VERSION:
1286 mnt->flags &= ~NFS_MOUNT_VER3; 1288 mnt->flags &= ~NFS_MOUNT_VER3;
1287 mnt->version = 4; 1289 mnt->version = 4;
1288 break; 1290 break;
1289#endif
1290 default: 1291 default:
1291 goto out_invalid_value; 1292 goto out_invalid_value;
1292 } 1293 }
@@ -2277,7 +2278,7 @@ static int nfs_get_sb(struct file_system_type *fs_type,
2277 }; 2278 };
2278 int error = -ENOMEM; 2279 int error = -ENOMEM;
2279 2280
2280 data = nfs_alloc_parsed_mount_data(3); 2281 data = nfs_alloc_parsed_mount_data(NFS_DEFAULT_VERSION);
2281 mntfh = nfs_alloc_fhandle(); 2282 mntfh = nfs_alloc_fhandle();
2282 if (data == NULL || mntfh == NULL) 2283 if (data == NULL || mntfh == NULL)
2283 goto out_free_fh; 2284 goto out_free_fh;
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 4c14c17a527..10d648ea128 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -390,6 +390,7 @@ static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
390 if (nfs_have_delegation(inode, FMODE_WRITE)) 390 if (nfs_have_delegation(inode, FMODE_WRITE))
391 nfsi->change_attr++; 391 nfsi->change_attr++;
392 } 392 }
393 set_bit(PG_MAPPED, &req->wb_flags);
393 SetPagePrivate(req->wb_page); 394 SetPagePrivate(req->wb_page);
394 set_page_private(req->wb_page, (unsigned long)req); 395 set_page_private(req->wb_page, (unsigned long)req);
395 nfsi->npages++; 396 nfsi->npages++;
@@ -415,6 +416,7 @@ static void nfs_inode_remove_request(struct nfs_page *req)
415 spin_lock(&inode->i_lock); 416 spin_lock(&inode->i_lock);
416 set_page_private(req->wb_page, 0); 417 set_page_private(req->wb_page, 0);
417 ClearPagePrivate(req->wb_page); 418 ClearPagePrivate(req->wb_page);
419 clear_bit(PG_MAPPED, &req->wb_flags);
418 radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index); 420 radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index);
419 nfsi->npages--; 421 nfsi->npages--;
420 if (!nfsi->npages) { 422 if (!nfsi->npages) {
@@ -422,7 +424,6 @@ static void nfs_inode_remove_request(struct nfs_page *req)
422 iput(inode); 424 iput(inode);
423 } else 425 } else
424 spin_unlock(&inode->i_lock); 426 spin_unlock(&inode->i_lock);
425 nfs_clear_request(req);
426 nfs_release_request(req); 427 nfs_release_request(req);
427} 428}
428 429
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index 2a533a0af2a..7e84a852cda 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -260,9 +260,11 @@ void fill_post_wcc(struct svc_fh *fhp)
260 err = vfs_getattr(fhp->fh_export->ex_path.mnt, fhp->fh_dentry, 260 err = vfs_getattr(fhp->fh_export->ex_path.mnt, fhp->fh_dentry,
261 &fhp->fh_post_attr); 261 &fhp->fh_post_attr);
262 fhp->fh_post_change = fhp->fh_dentry->d_inode->i_version; 262 fhp->fh_post_change = fhp->fh_dentry->d_inode->i_version;
263 if (err) 263 if (err) {
264 fhp->fh_post_saved = 0; 264 fhp->fh_post_saved = 0;
265 else 265 /* Grab the ctime anyway - set_change_info might use it */
266 fhp->fh_post_attr.ctime = fhp->fh_dentry->d_inode->i_ctime;
267 } else
266 fhp->fh_post_saved = 1; 268 fhp->fh_post_saved = 1;
267} 269}
268 270
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index ad2bfa68d53..116cab970e0 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -2262,7 +2262,7 @@ nfs4_file_downgrade(struct nfs4_file *fp, unsigned int share_access)
2262 * Spawn a thread to perform a recall on the delegation represented 2262 * Spawn a thread to perform a recall on the delegation represented
2263 * by the lease (file_lock) 2263 * by the lease (file_lock)
2264 * 2264 *
2265 * Called from break_lease() with lock_kernel() held. 2265 * Called from break_lease() with lock_flocks() held.
2266 * Note: we assume break_lease will only call this *once* for any given 2266 * Note: we assume break_lease will only call this *once* for any given
2267 * lease. 2267 * lease.
2268 */ 2268 */
@@ -2286,7 +2286,7 @@ void nfsd_break_deleg_cb(struct file_lock *fl)
2286 list_add_tail(&dp->dl_recall_lru, &del_recall_lru); 2286 list_add_tail(&dp->dl_recall_lru, &del_recall_lru);
2287 spin_unlock(&recall_lock); 2287 spin_unlock(&recall_lock);
2288 2288
2289 /* only place dl_time is set. protected by lock_kernel*/ 2289 /* only place dl_time is set. protected by lock_flocks*/
2290 dp->dl_time = get_seconds(); 2290 dp->dl_time = get_seconds();
2291 2291
2292 /* 2292 /*
@@ -2303,7 +2303,7 @@ void nfsd_break_deleg_cb(struct file_lock *fl)
2303/* 2303/*
2304 * The file_lock is being reapd. 2304 * The file_lock is being reapd.
2305 * 2305 *
2306 * Called by locks_free_lock() with lock_kernel() held. 2306 * Called by locks_free_lock() with lock_flocks() held.
2307 */ 2307 */
2308static 2308static
2309void nfsd_release_deleg_cb(struct file_lock *fl) 2309void nfsd_release_deleg_cb(struct file_lock *fl)
@@ -2318,7 +2318,7 @@ void nfsd_release_deleg_cb(struct file_lock *fl)
2318} 2318}
2319 2319
2320/* 2320/*
2321 * Called from setlease() with lock_kernel() held 2321 * Called from setlease() with lock_flocks() held
2322 */ 2322 */
2323static 2323static
2324int nfsd_same_client_deleg_cb(struct file_lock *onlist, struct file_lock *try) 2324int nfsd_same_client_deleg_cb(struct file_lock *onlist, struct file_lock *try)
diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
index 4d476ff08ae..60fce3dc5cb 100644
--- a/fs/nfsd/xdr4.h
+++ b/fs/nfsd/xdr4.h
@@ -484,18 +484,17 @@ static inline bool nfsd4_not_cached(struct nfsd4_compoundres *resp)
484static inline void 484static inline void
485set_change_info(struct nfsd4_change_info *cinfo, struct svc_fh *fhp) 485set_change_info(struct nfsd4_change_info *cinfo, struct svc_fh *fhp)
486{ 486{
487 BUG_ON(!fhp->fh_pre_saved || !fhp->fh_post_saved); 487 BUG_ON(!fhp->fh_pre_saved);
488 cinfo->atomic = 1; 488 cinfo->atomic = fhp->fh_post_saved;
489 cinfo->change_supported = IS_I_VERSION(fhp->fh_dentry->d_inode); 489 cinfo->change_supported = IS_I_VERSION(fhp->fh_dentry->d_inode);
490 if (cinfo->change_supported) { 490
491 cinfo->before_change = fhp->fh_pre_change; 491 cinfo->before_change = fhp->fh_pre_change;
492 cinfo->after_change = fhp->fh_post_change; 492 cinfo->after_change = fhp->fh_post_change;
493 } else { 493 cinfo->before_ctime_sec = fhp->fh_pre_ctime.tv_sec;
494 cinfo->before_ctime_sec = fhp->fh_pre_ctime.tv_sec; 494 cinfo->before_ctime_nsec = fhp->fh_pre_ctime.tv_nsec;
495 cinfo->before_ctime_nsec = fhp->fh_pre_ctime.tv_nsec; 495 cinfo->after_ctime_sec = fhp->fh_post_attr.ctime.tv_sec;
496 cinfo->after_ctime_sec = fhp->fh_post_attr.ctime.tv_sec; 496 cinfo->after_ctime_nsec = fhp->fh_post_attr.ctime.tv_nsec;
497 cinfo->after_ctime_nsec = fhp->fh_post_attr.ctime.tv_nsec; 497
498 }
499} 498}
500 499
501int nfs4svc_encode_voidres(struct svc_rqst *, __be32 *, void *); 500int nfs4svc_encode_voidres(struct svc_rqst *, __be32 *, void *);
diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c
index 49c844dab33..59e5fe742f7 100644
--- a/fs/nilfs2/dat.c
+++ b/fs/nilfs2/dat.c
@@ -335,7 +335,7 @@ int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr)
335 * the device at this point. 335 * the device at this point.
336 * 336 *
337 * To prevent nilfs_dat_translate() from returning the 337 * To prevent nilfs_dat_translate() from returning the
338 * uncommited block number, this makes a copy of the entry 338 * uncommitted block number, this makes a copy of the entry
339 * buffer and redirects nilfs_dat_translate() to the copy. 339 * buffer and redirects nilfs_dat_translate() to the copy.
340 */ 340 */
341 if (!buffer_nilfs_redirected(entry_bh)) { 341 if (!buffer_nilfs_redirected(entry_bh)) {
diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c
index 33ad25ddd5c..caf9a6a3fb5 100644
--- a/fs/nilfs2/gcinode.c
+++ b/fs/nilfs2/gcinode.c
@@ -176,7 +176,6 @@ int nilfs_gccache_wait_and_mark_dirty(struct buffer_head *bh)
176int nilfs_init_gcinode(struct inode *inode) 176int nilfs_init_gcinode(struct inode *inode)
177{ 177{
178 struct nilfs_inode_info *ii = NILFS_I(inode); 178 struct nilfs_inode_info *ii = NILFS_I(inode);
179 struct the_nilfs *nilfs = NILFS_SB(inode->i_sb)->s_nilfs;
180 179
181 inode->i_mode = S_IFREG; 180 inode->i_mode = S_IFREG;
182 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); 181 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
@@ -186,14 +185,6 @@ int nilfs_init_gcinode(struct inode *inode)
186 ii->i_flags = 0; 185 ii->i_flags = 0;
187 nilfs_bmap_init_gc(ii->i_bmap); 186 nilfs_bmap_init_gc(ii->i_bmap);
188 187
189 /*
190 * Add the inode to GC inode list. Garbage Collection
191 * is serialized and no two processes manipulate the
192 * list simultaneously.
193 */
194 igrab(inode);
195 list_add(&NILFS_I(inode)->i_dirty, &nilfs->ns_gc_inodes);
196
197 return 0; 188 return 0;
198} 189}
199 190
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index 3e90f86d5bf..b185e937a33 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -337,6 +337,7 @@ static int nilfs_ioctl_move_blocks(struct super_block *sb,
337 struct nilfs_argv *argv, void *buf) 337 struct nilfs_argv *argv, void *buf)
338{ 338{
339 size_t nmembs = argv->v_nmembs; 339 size_t nmembs = argv->v_nmembs;
340 struct the_nilfs *nilfs = NILFS_SB(sb)->s_nilfs;
340 struct inode *inode; 341 struct inode *inode;
341 struct nilfs_vdesc *vdesc; 342 struct nilfs_vdesc *vdesc;
342 struct buffer_head *bh, *n; 343 struct buffer_head *bh, *n;
@@ -349,10 +350,21 @@ static int nilfs_ioctl_move_blocks(struct super_block *sb,
349 ino = vdesc->vd_ino; 350 ino = vdesc->vd_ino;
350 cno = vdesc->vd_cno; 351 cno = vdesc->vd_cno;
351 inode = nilfs_iget_for_gc(sb, ino, cno); 352 inode = nilfs_iget_for_gc(sb, ino, cno);
352 if (unlikely(inode == NULL)) { 353 if (IS_ERR(inode)) {
353 ret = -ENOMEM; 354 ret = PTR_ERR(inode);
354 goto failed; 355 goto failed;
355 } 356 }
357 if (list_empty(&NILFS_I(inode)->i_dirty)) {
358 /*
359 * Add the inode to GC inode list. Garbage Collection
360 * is serialized and no two processes manipulate the
361 * list simultaneously.
362 */
363 igrab(inode);
364 list_add(&NILFS_I(inode)->i_dirty,
365 &nilfs->ns_gc_inodes);
366 }
367
356 do { 368 do {
357 ret = nilfs_ioctl_move_inode_block(inode, vdesc, 369 ret = nilfs_ioctl_move_inode_block(inode, vdesc,
358 &buffers); 370 &buffers);
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
index b04f88eed09..f35794b97e8 100644
--- a/fs/notify/fanotify/fanotify.c
+++ b/fs/notify/fanotify/fanotify.c
@@ -92,7 +92,11 @@ static int fanotify_get_response_from_access(struct fsnotify_group *group,
92 92
93 pr_debug("%s: group=%p event=%p\n", __func__, group, event); 93 pr_debug("%s: group=%p event=%p\n", __func__, group, event);
94 94
95 wait_event(group->fanotify_data.access_waitq, event->response); 95 wait_event(group->fanotify_data.access_waitq, event->response ||
96 atomic_read(&group->fanotify_data.bypass_perm));
97
98 if (!event->response) /* bypass_perm set */
99 return 0;
96 100
97 /* userspace responded, convert to something usable */ 101 /* userspace responded, convert to something usable */
98 spin_lock(&event->lock); 102 spin_lock(&event->lock);
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index 063224812b7..8b61220cffc 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -106,20 +106,29 @@ static int create_fd(struct fsnotify_group *group, struct fsnotify_event *event)
106 return client_fd; 106 return client_fd;
107} 107}
108 108
109static ssize_t fill_event_metadata(struct fsnotify_group *group, 109static int fill_event_metadata(struct fsnotify_group *group,
110 struct fanotify_event_metadata *metadata, 110 struct fanotify_event_metadata *metadata,
111 struct fsnotify_event *event) 111 struct fsnotify_event *event)
112{ 112{
113 int ret = 0;
114
113 pr_debug("%s: group=%p metadata=%p event=%p\n", __func__, 115 pr_debug("%s: group=%p metadata=%p event=%p\n", __func__,
114 group, metadata, event); 116 group, metadata, event);
115 117
116 metadata->event_len = FAN_EVENT_METADATA_LEN; 118 metadata->event_len = FAN_EVENT_METADATA_LEN;
119 metadata->metadata_len = FAN_EVENT_METADATA_LEN;
117 metadata->vers = FANOTIFY_METADATA_VERSION; 120 metadata->vers = FANOTIFY_METADATA_VERSION;
118 metadata->mask = event->mask & FAN_ALL_OUTGOING_EVENTS; 121 metadata->mask = event->mask & FAN_ALL_OUTGOING_EVENTS;
119 metadata->pid = pid_vnr(event->tgid); 122 metadata->pid = pid_vnr(event->tgid);
120 metadata->fd = create_fd(group, event); 123 if (unlikely(event->mask & FAN_Q_OVERFLOW))
124 metadata->fd = FAN_NOFD;
125 else {
126 metadata->fd = create_fd(group, event);
127 if (metadata->fd < 0)
128 ret = metadata->fd;
129 }
121 130
122 return metadata->fd; 131 return ret;
123} 132}
124 133
125#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS 134#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
@@ -200,7 +209,7 @@ static int prepare_for_access_response(struct fsnotify_group *group,
200 209
201 mutex_lock(&group->fanotify_data.access_mutex); 210 mutex_lock(&group->fanotify_data.access_mutex);
202 211
203 if (group->fanotify_data.bypass_perm) { 212 if (atomic_read(&group->fanotify_data.bypass_perm)) {
204 mutex_unlock(&group->fanotify_data.access_mutex); 213 mutex_unlock(&group->fanotify_data.access_mutex);
205 kmem_cache_free(fanotify_response_event_cache, re); 214 kmem_cache_free(fanotify_response_event_cache, re);
206 event->response = FAN_ALLOW; 215 event->response = FAN_ALLOW;
@@ -257,24 +266,34 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
257 266
258 pr_debug("%s: group=%p event=%p\n", __func__, group, event); 267 pr_debug("%s: group=%p event=%p\n", __func__, group, event);
259 268
260 fd = fill_event_metadata(group, &fanotify_event_metadata, event); 269 ret = fill_event_metadata(group, &fanotify_event_metadata, event);
261 if (fd < 0) 270 if (ret < 0)
262 return fd; 271 goto out;
263 272
273 fd = fanotify_event_metadata.fd;
264 ret = prepare_for_access_response(group, event, fd); 274 ret = prepare_for_access_response(group, event, fd);
265 if (ret) 275 if (ret)
266 goto out_close_fd; 276 goto out_close_fd;
267 277
268 ret = -EFAULT; 278 ret = -EFAULT;
269 if (copy_to_user(buf, &fanotify_event_metadata, FAN_EVENT_METADATA_LEN)) 279 if (copy_to_user(buf, &fanotify_event_metadata,
280 fanotify_event_metadata.event_len))
270 goto out_kill_access_response; 281 goto out_kill_access_response;
271 282
272 return FAN_EVENT_METADATA_LEN; 283 return fanotify_event_metadata.event_len;
273 284
274out_kill_access_response: 285out_kill_access_response:
275 remove_access_response(group, event, fd); 286 remove_access_response(group, event, fd);
276out_close_fd: 287out_close_fd:
277 sys_close(fd); 288 if (fd != FAN_NOFD)
289 sys_close(fd);
290out:
291#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
292 if (event->mask & FAN_ALL_PERM_EVENTS) {
293 event->response = FAN_DENY;
294 wake_up(&group->fanotify_data.access_waitq);
295 }
296#endif
278 return ret; 297 return ret;
279} 298}
280 299
@@ -382,7 +401,7 @@ static int fanotify_release(struct inode *ignored, struct file *file)
382 401
383 mutex_lock(&group->fanotify_data.access_mutex); 402 mutex_lock(&group->fanotify_data.access_mutex);
384 403
385 group->fanotify_data.bypass_perm = true; 404 atomic_inc(&group->fanotify_data.bypass_perm);
386 405
387 list_for_each_entry_safe(re, lre, &group->fanotify_data.access_list, list) { 406 list_for_each_entry_safe(re, lre, &group->fanotify_data.access_list, list) {
388 pr_debug("%s: found group=%p re=%p event=%p\n", __func__, group, 407 pr_debug("%s: found group=%p re=%p event=%p\n", __func__, group,
@@ -586,11 +605,10 @@ static int fanotify_add_vfsmount_mark(struct fsnotify_group *group,
586{ 605{
587 struct fsnotify_mark *fsn_mark; 606 struct fsnotify_mark *fsn_mark;
588 __u32 added; 607 __u32 added;
608 int ret = 0;
589 609
590 fsn_mark = fsnotify_find_vfsmount_mark(group, mnt); 610 fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
591 if (!fsn_mark) { 611 if (!fsn_mark) {
592 int ret;
593
594 if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks) 612 if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks)
595 return -ENOSPC; 613 return -ENOSPC;
596 614
@@ -600,17 +618,16 @@ static int fanotify_add_vfsmount_mark(struct fsnotify_group *group,
600 618
601 fsnotify_init_mark(fsn_mark, fanotify_free_mark); 619 fsnotify_init_mark(fsn_mark, fanotify_free_mark);
602 ret = fsnotify_add_mark(fsn_mark, group, NULL, mnt, 0); 620 ret = fsnotify_add_mark(fsn_mark, group, NULL, mnt, 0);
603 if (ret) { 621 if (ret)
604 fanotify_free_mark(fsn_mark); 622 goto err;
605 return ret;
606 }
607 } 623 }
608 added = fanotify_mark_add_to_mask(fsn_mark, mask, flags); 624 added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
609 fsnotify_put_mark(fsn_mark); 625
610 if (added & ~mnt->mnt_fsnotify_mask) 626 if (added & ~mnt->mnt_fsnotify_mask)
611 fsnotify_recalc_vfsmount_mask(mnt); 627 fsnotify_recalc_vfsmount_mask(mnt);
612 628err:
613 return 0; 629 fsnotify_put_mark(fsn_mark);
630 return ret;
614} 631}
615 632
616static int fanotify_add_inode_mark(struct fsnotify_group *group, 633static int fanotify_add_inode_mark(struct fsnotify_group *group,
@@ -619,6 +636,7 @@ static int fanotify_add_inode_mark(struct fsnotify_group *group,
619{ 636{
620 struct fsnotify_mark *fsn_mark; 637 struct fsnotify_mark *fsn_mark;
621 __u32 added; 638 __u32 added;
639 int ret = 0;
622 640
623 pr_debug("%s: group=%p inode=%p\n", __func__, group, inode); 641 pr_debug("%s: group=%p inode=%p\n", __func__, group, inode);
624 642
@@ -634,8 +652,6 @@ static int fanotify_add_inode_mark(struct fsnotify_group *group,
634 652
635 fsn_mark = fsnotify_find_inode_mark(group, inode); 653 fsn_mark = fsnotify_find_inode_mark(group, inode);
636 if (!fsn_mark) { 654 if (!fsn_mark) {
637 int ret;
638
639 if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks) 655 if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks)
640 return -ENOSPC; 656 return -ENOSPC;
641 657
@@ -645,16 +661,16 @@ static int fanotify_add_inode_mark(struct fsnotify_group *group,
645 661
646 fsnotify_init_mark(fsn_mark, fanotify_free_mark); 662 fsnotify_init_mark(fsn_mark, fanotify_free_mark);
647 ret = fsnotify_add_mark(fsn_mark, group, inode, NULL, 0); 663 ret = fsnotify_add_mark(fsn_mark, group, inode, NULL, 0);
648 if (ret) { 664 if (ret)
649 fanotify_free_mark(fsn_mark); 665 goto err;
650 return ret;
651 }
652 } 666 }
653 added = fanotify_mark_add_to_mask(fsn_mark, mask, flags); 667 added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
654 fsnotify_put_mark(fsn_mark); 668
655 if (added & ~inode->i_fsnotify_mask) 669 if (added & ~inode->i_fsnotify_mask)
656 fsnotify_recalc_inode_mask(inode); 670 fsnotify_recalc_inode_mask(inode);
657 return 0; 671err:
672 fsnotify_put_mark(fsn_mark);
673 return ret;
658} 674}
659 675
660/* fanotify syscalls */ 676/* fanotify syscalls */
@@ -687,8 +703,10 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
687 703
688 /* fsnotify_alloc_group takes a ref. Dropped in fanotify_release */ 704 /* fsnotify_alloc_group takes a ref. Dropped in fanotify_release */
689 group = fsnotify_alloc_group(&fanotify_fsnotify_ops); 705 group = fsnotify_alloc_group(&fanotify_fsnotify_ops);
690 if (IS_ERR(group)) 706 if (IS_ERR(group)) {
707 free_uid(user);
691 return PTR_ERR(group); 708 return PTR_ERR(group);
709 }
692 710
693 group->fanotify_data.user = user; 711 group->fanotify_data.user = user;
694 atomic_inc(&user->fanotify_listeners); 712 atomic_inc(&user->fanotify_listeners);
@@ -698,6 +716,7 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
698 mutex_init(&group->fanotify_data.access_mutex); 716 mutex_init(&group->fanotify_data.access_mutex);
699 init_waitqueue_head(&group->fanotify_data.access_waitq); 717 init_waitqueue_head(&group->fanotify_data.access_waitq);
700 INIT_LIST_HEAD(&group->fanotify_data.access_list); 718 INIT_LIST_HEAD(&group->fanotify_data.access_list);
719 atomic_set(&group->fanotify_data.bypass_perm, 0);
701#endif 720#endif
702 switch (flags & FAN_ALL_CLASS_BITS) { 721 switch (flags & FAN_ALL_CLASS_BITS) {
703 case FAN_CLASS_NOTIF: 722 case FAN_CLASS_NOTIF:
@@ -764,8 +783,10 @@ SYSCALL_DEFINE(fanotify_mark)(int fanotify_fd, unsigned int flags,
764 if (flags & ~FAN_ALL_MARK_FLAGS) 783 if (flags & ~FAN_ALL_MARK_FLAGS)
765 return -EINVAL; 784 return -EINVAL;
766 switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) { 785 switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
767 case FAN_MARK_ADD: 786 case FAN_MARK_ADD: /* fallthrough */
768 case FAN_MARK_REMOVE: 787 case FAN_MARK_REMOVE:
788 if (!mask)
789 return -EINVAL;
769 case FAN_MARK_FLUSH: 790 case FAN_MARK_FLUSH:
770 break; 791 break;
771 default: 792 default:
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index 444c305a468..4cd5d5d78f9 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -752,6 +752,7 @@ SYSCALL_DEFINE1(inotify_init1, int, flags)
752 if (ret >= 0) 752 if (ret >= 0)
753 return ret; 753 return ret;
754 754
755 fsnotify_put_group(group);
755 atomic_dec(&user->inotify_devs); 756 atomic_dec(&user->inotify_devs);
756out_free_uid: 757out_free_uid:
757 free_uid(user); 758 free_uid(user);
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index f1e962cb3b7..0d7c5540ad6 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -573,11 +573,14 @@ static void ocfs2_dio_end_io(struct kiocb *iocb,
573 /* this io's submitter should not have unlocked this before we could */ 573 /* this io's submitter should not have unlocked this before we could */
574 BUG_ON(!ocfs2_iocb_is_rw_locked(iocb)); 574 BUG_ON(!ocfs2_iocb_is_rw_locked(iocb));
575 575
576 if (ocfs2_iocb_is_sem_locked(iocb)) {
577 up_read(&inode->i_alloc_sem);
578 ocfs2_iocb_clear_sem_locked(iocb);
579 }
580
576 ocfs2_iocb_clear_rw_locked(iocb); 581 ocfs2_iocb_clear_rw_locked(iocb);
577 582
578 level = ocfs2_iocb_rw_locked_level(iocb); 583 level = ocfs2_iocb_rw_locked_level(iocb);
579 if (!level)
580 up_read(&inode->i_alloc_sem);
581 ocfs2_rw_unlock(inode, level); 584 ocfs2_rw_unlock(inode, level);
582 585
583 if (is_async) 586 if (is_async)
diff --git a/fs/ocfs2/aops.h b/fs/ocfs2/aops.h
index 76bfdfda691..eceb456037c 100644
--- a/fs/ocfs2/aops.h
+++ b/fs/ocfs2/aops.h
@@ -68,8 +68,27 @@ static inline void ocfs2_iocb_set_rw_locked(struct kiocb *iocb, int level)
68 else 68 else
69 clear_bit(1, (unsigned long *)&iocb->private); 69 clear_bit(1, (unsigned long *)&iocb->private);
70} 70}
71
72/*
73 * Using a named enum representing lock types in terms of #N bit stored in
74 * iocb->private, which is going to be used for communication bewteen
75 * ocfs2_dio_end_io() and ocfs2_file_aio_write/read().
76 */
77enum ocfs2_iocb_lock_bits {
78 OCFS2_IOCB_RW_LOCK = 0,
79 OCFS2_IOCB_RW_LOCK_LEVEL,
80 OCFS2_IOCB_SEM,
81 OCFS2_IOCB_NUM_LOCKS
82};
83
71#define ocfs2_iocb_clear_rw_locked(iocb) \ 84#define ocfs2_iocb_clear_rw_locked(iocb) \
72 clear_bit(0, (unsigned long *)&iocb->private) 85 clear_bit(OCFS2_IOCB_RW_LOCK, (unsigned long *)&iocb->private)
73#define ocfs2_iocb_rw_locked_level(iocb) \ 86#define ocfs2_iocb_rw_locked_level(iocb) \
74 test_bit(1, (unsigned long *)&iocb->private) 87 test_bit(OCFS2_IOCB_RW_LOCK_LEVEL, (unsigned long *)&iocb->private)
88#define ocfs2_iocb_set_sem_locked(iocb) \
89 set_bit(OCFS2_IOCB_SEM, (unsigned long *)&iocb->private)
90#define ocfs2_iocb_clear_sem_locked(iocb) \
91 clear_bit(OCFS2_IOCB_SEM, (unsigned long *)&iocb->private)
92#define ocfs2_iocb_is_sem_locked(iocb) \
93 test_bit(OCFS2_IOCB_SEM, (unsigned long *)&iocb->private)
75#endif /* OCFS2_FILE_H */ 94#endif /* OCFS2_FILE_H */
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 52c7557f3e2..9f26ac9be2a 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -1964,8 +1964,10 @@ static struct config_item *o2hb_heartbeat_group_make_item(struct config_group *g
1964 if (reg == NULL) 1964 if (reg == NULL)
1965 return ERR_PTR(-ENOMEM); 1965 return ERR_PTR(-ENOMEM);
1966 1966
1967 if (strlen(name) > O2HB_MAX_REGION_NAME_LEN) 1967 if (strlen(name) > O2HB_MAX_REGION_NAME_LEN) {
1968 return ERR_PTR(-ENAMETOOLONG); 1968 ret = -ENAMETOOLONG;
1969 goto free;
1970 }
1969 1971
1970 spin_lock(&o2hb_live_lock); 1972 spin_lock(&o2hb_live_lock);
1971 reg->hr_region_num = 0; 1973 reg->hr_region_num = 0;
@@ -1974,7 +1976,8 @@ static struct config_item *o2hb_heartbeat_group_make_item(struct config_group *g
1974 O2NM_MAX_REGIONS); 1976 O2NM_MAX_REGIONS);
1975 if (reg->hr_region_num >= O2NM_MAX_REGIONS) { 1977 if (reg->hr_region_num >= O2NM_MAX_REGIONS) {
1976 spin_unlock(&o2hb_live_lock); 1978 spin_unlock(&o2hb_live_lock);
1977 return ERR_PTR(-EFBIG); 1979 ret = -EFBIG;
1980 goto free;
1978 } 1981 }
1979 set_bit(reg->hr_region_num, o2hb_region_bitmap); 1982 set_bit(reg->hr_region_num, o2hb_region_bitmap);
1980 } 1983 }
@@ -1986,10 +1989,13 @@ static struct config_item *o2hb_heartbeat_group_make_item(struct config_group *g
1986 ret = o2hb_debug_region_init(reg, o2hb_debug_dir); 1989 ret = o2hb_debug_region_init(reg, o2hb_debug_dir);
1987 if (ret) { 1990 if (ret) {
1988 config_item_put(&reg->hr_item); 1991 config_item_put(&reg->hr_item);
1989 return ERR_PTR(ret); 1992 goto free;
1990 } 1993 }
1991 1994
1992 return &reg->hr_item; 1995 return &reg->hr_item;
1996free:
1997 kfree(reg);
1998 return ERR_PTR(ret);
1993} 1999}
1994 2000
1995static void o2hb_heartbeat_group_drop_item(struct config_group *group, 2001static void o2hb_heartbeat_group_drop_item(struct config_group *group,
diff --git a/fs/ocfs2/cluster/masklog.c b/fs/ocfs2/cluster/masklog.c
index c7fba396392..6c61771469a 100644
--- a/fs/ocfs2/cluster/masklog.c
+++ b/fs/ocfs2/cluster/masklog.c
@@ -113,10 +113,11 @@ static struct mlog_attribute mlog_attrs[MLOG_MAX_BITS] = {
113 define_mask(QUOTA), 113 define_mask(QUOTA),
114 define_mask(REFCOUNT), 114 define_mask(REFCOUNT),
115 define_mask(BASTS), 115 define_mask(BASTS),
116 define_mask(RESERVATIONS),
117 define_mask(CLUSTER),
116 define_mask(ERROR), 118 define_mask(ERROR),
117 define_mask(NOTICE), 119 define_mask(NOTICE),
118 define_mask(KTHREAD), 120 define_mask(KTHREAD),
119 define_mask(RESERVATIONS),
120}; 121};
121 122
122static struct attribute *mlog_attr_ptrs[MLOG_MAX_BITS] = {NULL, }; 123static struct attribute *mlog_attr_ptrs[MLOG_MAX_BITS] = {NULL, };
diff --git a/fs/ocfs2/cluster/masklog.h b/fs/ocfs2/cluster/masklog.h
index ea2ed9f56c9..34d6544357d 100644
--- a/fs/ocfs2/cluster/masklog.h
+++ b/fs/ocfs2/cluster/masklog.h
@@ -81,7 +81,7 @@
81#include <linux/sched.h> 81#include <linux/sched.h>
82 82
83/* bits that are frequently given and infrequently matched in the low word */ 83/* bits that are frequently given and infrequently matched in the low word */
84/* NOTE: If you add a flag, you need to also update mlog.c! */ 84/* NOTE: If you add a flag, you need to also update masklog.c! */
85#define ML_ENTRY 0x0000000000000001ULL /* func call entry */ 85#define ML_ENTRY 0x0000000000000001ULL /* func call entry */
86#define ML_EXIT 0x0000000000000002ULL /* func call exit */ 86#define ML_EXIT 0x0000000000000002ULL /* func call exit */
87#define ML_TCP 0x0000000000000004ULL /* net cluster/tcp.c */ 87#define ML_TCP 0x0000000000000004ULL /* net cluster/tcp.c */
@@ -114,13 +114,14 @@
114#define ML_XATTR 0x0000000020000000ULL /* ocfs2 extended attributes */ 114#define ML_XATTR 0x0000000020000000ULL /* ocfs2 extended attributes */
115#define ML_QUOTA 0x0000000040000000ULL /* ocfs2 quota operations */ 115#define ML_QUOTA 0x0000000040000000ULL /* ocfs2 quota operations */
116#define ML_REFCOUNT 0x0000000080000000ULL /* refcount tree operations */ 116#define ML_REFCOUNT 0x0000000080000000ULL /* refcount tree operations */
117#define ML_BASTS 0x0000001000000000ULL /* dlmglue asts and basts */ 117#define ML_BASTS 0x0000000100000000ULL /* dlmglue asts and basts */
118#define ML_RESERVATIONS 0x0000000200000000ULL /* ocfs2 alloc reservations */
119#define ML_CLUSTER 0x0000000400000000ULL /* cluster stack */
120
118/* bits that are infrequently given and frequently matched in the high word */ 121/* bits that are infrequently given and frequently matched in the high word */
119#define ML_ERROR 0x0000000100000000ULL /* sent to KERN_ERR */ 122#define ML_ERROR 0x1000000000000000ULL /* sent to KERN_ERR */
120#define ML_NOTICE 0x0000000200000000ULL /* setn to KERN_NOTICE */ 123#define ML_NOTICE 0x2000000000000000ULL /* setn to KERN_NOTICE */
121#define ML_KTHREAD 0x0000000400000000ULL /* kernel thread activity */ 124#define ML_KTHREAD 0x4000000000000000ULL /* kernel thread activity */
122#define ML_RESERVATIONS 0x0000000800000000ULL /* ocfs2 alloc reservations */
123#define ML_CLUSTER 0x0000001000000000ULL /* cluster stack */
124 125
125#define MLOG_INITIAL_AND_MASK (ML_ERROR|ML_NOTICE) 126#define MLOG_INITIAL_AND_MASK (ML_ERROR|ML_NOTICE)
126#define MLOG_INITIAL_NOT_MASK (ML_ENTRY|ML_EXIT) 127#define MLOG_INITIAL_NOT_MASK (ML_ENTRY|ML_EXIT)
diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c
index edaded48e7e..895532ac4d9 100644
--- a/fs/ocfs2/dcache.c
+++ b/fs/ocfs2/dcache.c
@@ -476,7 +476,6 @@ static void ocfs2_dentry_iput(struct dentry *dentry, struct inode *inode)
476 476
477out: 477out:
478 iput(inode); 478 iput(inode);
479 ocfs2_dentry_attach_gen(dentry);
480} 479}
481 480
482/* 481/*
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index c49f6de0e7a..d417b3f9b0c 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -2461,8 +2461,10 @@ static int ocfs2_dx_dir_attach_index(struct ocfs2_super *osb,
2461 2461
2462 di->i_dx_root = cpu_to_le64(dr_blkno); 2462 di->i_dx_root = cpu_to_le64(dr_blkno);
2463 2463
2464 spin_lock(&OCFS2_I(dir)->ip_lock);
2464 OCFS2_I(dir)->ip_dyn_features |= OCFS2_INDEXED_DIR_FL; 2465 OCFS2_I(dir)->ip_dyn_features |= OCFS2_INDEXED_DIR_FL;
2465 di->i_dyn_features = cpu_to_le16(OCFS2_I(dir)->ip_dyn_features); 2466 di->i_dyn_features = cpu_to_le16(OCFS2_I(dir)->ip_dyn_features);
2467 spin_unlock(&OCFS2_I(dir)->ip_lock);
2466 2468
2467 ocfs2_journal_dirty(handle, di_bh); 2469 ocfs2_journal_dirty(handle, di_bh);
2468 2470
@@ -4466,8 +4468,10 @@ static int ocfs2_dx_dir_remove_index(struct inode *dir,
4466 goto out_commit; 4468 goto out_commit;
4467 } 4469 }
4468 4470
4471 spin_lock(&OCFS2_I(dir)->ip_lock);
4469 OCFS2_I(dir)->ip_dyn_features &= ~OCFS2_INDEXED_DIR_FL; 4472 OCFS2_I(dir)->ip_dyn_features &= ~OCFS2_INDEXED_DIR_FL;
4470 di->i_dyn_features = cpu_to_le16(OCFS2_I(dir)->ip_dyn_features); 4473 di->i_dyn_features = cpu_to_le16(OCFS2_I(dir)->ip_dyn_features);
4474 spin_unlock(&OCFS2_I(dir)->ip_lock);
4471 di->i_dx_root = cpu_to_le64(0ULL); 4475 di->i_dx_root = cpu_to_le64(0ULL);
4472 4476
4473 ocfs2_journal_dirty(handle, di_bh); 4477 ocfs2_journal_dirty(handle, di_bh);
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 58a93b95373..cc2aaa96cfe 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -959,7 +959,7 @@ static int dlm_match_regions(struct dlm_ctxt *dlm,
959 r += O2HB_MAX_REGION_NAME_LEN; 959 r += O2HB_MAX_REGION_NAME_LEN;
960 } 960 }
961 961
962 local = kmalloc(sizeof(qr->qr_regions), GFP_KERNEL); 962 local = kmalloc(sizeof(qr->qr_regions), GFP_ATOMIC);
963 if (!local) { 963 if (!local) {
964 status = -ENOMEM; 964 status = -ENOMEM;
965 goto bail; 965 goto bail;
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index f564b0e5f80..59f0f6bdfc6 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -2346,7 +2346,8 @@ static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data)
2346 */ 2346 */
2347static int dlm_is_lockres_migrateable(struct dlm_ctxt *dlm, 2347static int dlm_is_lockres_migrateable(struct dlm_ctxt *dlm,
2348 struct dlm_lock_resource *res, 2348 struct dlm_lock_resource *res,
2349 int *numlocks) 2349 int *numlocks,
2350 int *hasrefs)
2350{ 2351{
2351 int ret; 2352 int ret;
2352 int i; 2353 int i;
@@ -2356,6 +2357,9 @@ static int dlm_is_lockres_migrateable(struct dlm_ctxt *dlm,
2356 2357
2357 assert_spin_locked(&res->spinlock); 2358 assert_spin_locked(&res->spinlock);
2358 2359
2360 *numlocks = 0;
2361 *hasrefs = 0;
2362
2359 ret = -EINVAL; 2363 ret = -EINVAL;
2360 if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { 2364 if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
2361 mlog(0, "cannot migrate lockres with unknown owner!\n"); 2365 mlog(0, "cannot migrate lockres with unknown owner!\n");
@@ -2386,7 +2390,13 @@ static int dlm_is_lockres_migrateable(struct dlm_ctxt *dlm,
2386 } 2390 }
2387 2391
2388 *numlocks = count; 2392 *numlocks = count;
2389 mlog(0, "migrateable lockres having %d locks\n", *numlocks); 2393
2394 count = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
2395 if (count < O2NM_MAX_NODES)
2396 *hasrefs = 1;
2397
2398 mlog(0, "%s: res %.*s, Migrateable, locks %d, refs %d\n", dlm->name,
2399 res->lockname.len, res->lockname.name, *numlocks, *hasrefs);
2390 2400
2391leave: 2401leave:
2392 return ret; 2402 return ret;
@@ -2408,7 +2418,7 @@ static int dlm_migrate_lockres(struct dlm_ctxt *dlm,
2408 const char *name; 2418 const char *name;
2409 unsigned int namelen; 2419 unsigned int namelen;
2410 int mle_added = 0; 2420 int mle_added = 0;
2411 int numlocks; 2421 int numlocks, hasrefs;
2412 int wake = 0; 2422 int wake = 0;
2413 2423
2414 if (!dlm_grab(dlm)) 2424 if (!dlm_grab(dlm))
@@ -2417,13 +2427,13 @@ static int dlm_migrate_lockres(struct dlm_ctxt *dlm,
2417 name = res->lockname.name; 2427 name = res->lockname.name;
2418 namelen = res->lockname.len; 2428 namelen = res->lockname.len;
2419 2429
2420 mlog(0, "migrating %.*s to %u\n", namelen, name, target); 2430 mlog(0, "%s: Migrating %.*s to %u\n", dlm->name, namelen, name, target);
2421 2431
2422 /* 2432 /*
2423 * ensure this lockres is a proper candidate for migration 2433 * ensure this lockres is a proper candidate for migration
2424 */ 2434 */
2425 spin_lock(&res->spinlock); 2435 spin_lock(&res->spinlock);
2426 ret = dlm_is_lockres_migrateable(dlm, res, &numlocks); 2436 ret = dlm_is_lockres_migrateable(dlm, res, &numlocks, &hasrefs);
2427 if (ret < 0) { 2437 if (ret < 0) {
2428 spin_unlock(&res->spinlock); 2438 spin_unlock(&res->spinlock);
2429 goto leave; 2439 goto leave;
@@ -2431,10 +2441,8 @@ static int dlm_migrate_lockres(struct dlm_ctxt *dlm,
2431 spin_unlock(&res->spinlock); 2441 spin_unlock(&res->spinlock);
2432 2442
2433 /* no work to do */ 2443 /* no work to do */
2434 if (numlocks == 0) { 2444 if (numlocks == 0 && !hasrefs)
2435 mlog(0, "no locks were found on this lockres! done!\n");
2436 goto leave; 2445 goto leave;
2437 }
2438 2446
2439 /* 2447 /*
2440 * preallocate up front 2448 * preallocate up front
@@ -2459,14 +2467,14 @@ static int dlm_migrate_lockres(struct dlm_ctxt *dlm,
2459 * find a node to migrate the lockres to 2467 * find a node to migrate the lockres to
2460 */ 2468 */
2461 2469
2462 mlog(0, "picking a migration node\n");
2463 spin_lock(&dlm->spinlock); 2470 spin_lock(&dlm->spinlock);
2464 /* pick a new node */ 2471 /* pick a new node */
2465 if (!test_bit(target, dlm->domain_map) || 2472 if (!test_bit(target, dlm->domain_map) ||
2466 target >= O2NM_MAX_NODES) { 2473 target >= O2NM_MAX_NODES) {
2467 target = dlm_pick_migration_target(dlm, res); 2474 target = dlm_pick_migration_target(dlm, res);
2468 } 2475 }
2469 mlog(0, "node %u chosen for migration\n", target); 2476 mlog(0, "%s: res %.*s, Node %u chosen for migration\n", dlm->name,
2477 namelen, name, target);
2470 2478
2471 if (target >= O2NM_MAX_NODES || 2479 if (target >= O2NM_MAX_NODES ||
2472 !test_bit(target, dlm->domain_map)) { 2480 !test_bit(target, dlm->domain_map)) {
@@ -2667,7 +2675,7 @@ int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2667{ 2675{
2668 int ret; 2676 int ret;
2669 int lock_dropped = 0; 2677 int lock_dropped = 0;
2670 int numlocks; 2678 int numlocks, hasrefs;
2671 2679
2672 spin_lock(&res->spinlock); 2680 spin_lock(&res->spinlock);
2673 if (res->owner != dlm->node_num) { 2681 if (res->owner != dlm->node_num) {
@@ -2681,8 +2689,8 @@ int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2681 } 2689 }
2682 2690
2683 /* No need to migrate a lockres having no locks */ 2691 /* No need to migrate a lockres having no locks */
2684 ret = dlm_is_lockres_migrateable(dlm, res, &numlocks); 2692 ret = dlm_is_lockres_migrateable(dlm, res, &numlocks, &hasrefs);
2685 if (ret >= 0 && numlocks == 0) { 2693 if (ret >= 0 && numlocks == 0 && !hasrefs) {
2686 spin_unlock(&res->spinlock); 2694 spin_unlock(&res->spinlock);
2687 goto leave; 2695 goto leave;
2688 } 2696 }
@@ -2915,6 +2923,12 @@ static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
2915 } 2923 }
2916 queue++; 2924 queue++;
2917 } 2925 }
2926
2927 nodenum = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
2928 if (nodenum < O2NM_MAX_NODES) {
2929 spin_unlock(&res->spinlock);
2930 return nodenum;
2931 }
2918 spin_unlock(&res->spinlock); 2932 spin_unlock(&res->spinlock);
2919 mlog(0, "have not found a suitable target yet! checking domain map\n"); 2933 mlog(0, "have not found a suitable target yet! checking domain map\n");
2920 2934
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 77b4c04a280..f6cba566429 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -2241,11 +2241,15 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
2241 2241
2242 mutex_lock(&inode->i_mutex); 2242 mutex_lock(&inode->i_mutex);
2243 2243
2244 ocfs2_iocb_clear_sem_locked(iocb);
2245
2244relock: 2246relock:
2245 /* to match setattr's i_mutex -> i_alloc_sem -> rw_lock ordering */ 2247 /* to match setattr's i_mutex -> i_alloc_sem -> rw_lock ordering */
2246 if (direct_io) { 2248 if (direct_io) {
2247 down_read(&inode->i_alloc_sem); 2249 down_read(&inode->i_alloc_sem);
2248 have_alloc_sem = 1; 2250 have_alloc_sem = 1;
2251 /* communicate with ocfs2_dio_end_io */
2252 ocfs2_iocb_set_sem_locked(iocb);
2249 } 2253 }
2250 2254
2251 /* 2255 /*
@@ -2382,8 +2386,10 @@ out:
2382 ocfs2_rw_unlock(inode, rw_level); 2386 ocfs2_rw_unlock(inode, rw_level);
2383 2387
2384out_sems: 2388out_sems:
2385 if (have_alloc_sem) 2389 if (have_alloc_sem) {
2386 up_read(&inode->i_alloc_sem); 2390 up_read(&inode->i_alloc_sem);
2391 ocfs2_iocb_clear_sem_locked(iocb);
2392 }
2387 2393
2388 mutex_unlock(&inode->i_mutex); 2394 mutex_unlock(&inode->i_mutex);
2389 2395
@@ -2527,6 +2533,8 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
2527 goto bail; 2533 goto bail;
2528 } 2534 }
2529 2535
2536 ocfs2_iocb_clear_sem_locked(iocb);
2537
2530 /* 2538 /*
2531 * buffered reads protect themselves in ->readpage(). O_DIRECT reads 2539 * buffered reads protect themselves in ->readpage(). O_DIRECT reads
2532 * need locks to protect pending reads from racing with truncate. 2540 * need locks to protect pending reads from racing with truncate.
@@ -2534,6 +2542,7 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
2534 if (filp->f_flags & O_DIRECT) { 2542 if (filp->f_flags & O_DIRECT) {
2535 down_read(&inode->i_alloc_sem); 2543 down_read(&inode->i_alloc_sem);
2536 have_alloc_sem = 1; 2544 have_alloc_sem = 1;
2545 ocfs2_iocb_set_sem_locked(iocb);
2537 2546
2538 ret = ocfs2_rw_lock(inode, 0); 2547 ret = ocfs2_rw_lock(inode, 0);
2539 if (ret < 0) { 2548 if (ret < 0) {
@@ -2575,8 +2584,10 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
2575 } 2584 }
2576 2585
2577bail: 2586bail:
2578 if (have_alloc_sem) 2587 if (have_alloc_sem) {
2579 up_read(&inode->i_alloc_sem); 2588 up_read(&inode->i_alloc_sem);
2589 ocfs2_iocb_clear_sem_locked(iocb);
2590 }
2580 if (rw_level != -1) 2591 if (rw_level != -1)
2581 ocfs2_rw_unlock(inode, rw_level); 2592 ocfs2_rw_unlock(inode, rw_level);
2582 mlog_exit(ret); 2593 mlog_exit(ret);
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index d8408217e3b..70dd3b1798f 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -159,7 +159,9 @@ struct ocfs2_lock_res {
159 char l_name[OCFS2_LOCK_ID_MAX_LEN]; 159 char l_name[OCFS2_LOCK_ID_MAX_LEN];
160 unsigned int l_ro_holders; 160 unsigned int l_ro_holders;
161 unsigned int l_ex_holders; 161 unsigned int l_ex_holders;
162 unsigned char l_level; 162 signed char l_level;
163 signed char l_requested;
164 signed char l_blocking;
163 165
164 /* Data packed - type enum ocfs2_lock_type */ 166 /* Data packed - type enum ocfs2_lock_type */
165 unsigned char l_type; 167 unsigned char l_type;
@@ -169,8 +171,6 @@ struct ocfs2_lock_res {
169 unsigned char l_action; 171 unsigned char l_action;
170 /* Data packed - enum type ocfs2_unlock_action */ 172 /* Data packed - enum type ocfs2_unlock_action */
171 unsigned char l_unlock_action; 173 unsigned char l_unlock_action;
172 unsigned char l_requested;
173 unsigned char l_blocking;
174 unsigned int l_pending_gen; 174 unsigned int l_pending_gen;
175 175
176 spinlock_t l_lock; 176 spinlock_t l_lock;
diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h
index c2e4f8222e2..bf2e7764920 100644
--- a/fs/ocfs2/ocfs2_fs.h
+++ b/fs/ocfs2/ocfs2_fs.h
@@ -350,7 +350,7 @@ enum {
350#define OCFS2_LAST_LOCAL_SYSTEM_INODE LOCAL_GROUP_QUOTA_SYSTEM_INODE 350#define OCFS2_LAST_LOCAL_SYSTEM_INODE LOCAL_GROUP_QUOTA_SYSTEM_INODE
351 NUM_SYSTEM_INODES 351 NUM_SYSTEM_INODES
352}; 352};
353#define NUM_GLOBAL_SYSTEM_INODES OCFS2_LAST_GLOBAL_SYSTEM_INODE 353#define NUM_GLOBAL_SYSTEM_INODES OCFS2_FIRST_LOCAL_SYSTEM_INODE
354#define NUM_LOCAL_SYSTEM_INODES \ 354#define NUM_LOCAL_SYSTEM_INODES \
355 (NUM_SYSTEM_INODES - OCFS2_FIRST_LOCAL_SYSTEM_INODE) 355 (NUM_SYSTEM_INODES - OCFS2_FIRST_LOCAL_SYSTEM_INODE)
356 356
diff --git a/fs/ocfs2/stack_user.c b/fs/ocfs2/stack_user.c
index 252e7c82f92..a5ebe421195 100644
--- a/fs/ocfs2/stack_user.c
+++ b/fs/ocfs2/stack_user.c
@@ -190,7 +190,7 @@ static struct ocfs2_live_connection *ocfs2_connection_find(const char *name)
190 return c; 190 return c;
191 } 191 }
192 192
193 return c; 193 return NULL;
194} 194}
195 195
196/* 196/*
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index f02c0ef3157..cfeab7ce369 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -41,7 +41,6 @@
41#include <linux/mount.h> 41#include <linux/mount.h>
42#include <linux/seq_file.h> 42#include <linux/seq_file.h>
43#include <linux/quotaops.h> 43#include <linux/quotaops.h>
44#include <linux/smp_lock.h>
45 44
46#define MLOG_MASK_PREFIX ML_SUPER 45#define MLOG_MASK_PREFIX ML_SUPER
47#include <cluster/masklog.h> 46#include <cluster/masklog.h>
diff --git a/fs/pipe.c b/fs/pipe.c
index a8012a95572..04629f36e39 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -1199,12 +1199,24 @@ int pipe_proc_fn(struct ctl_table *table, int write, void __user *buf,
1199 return ret; 1199 return ret;
1200} 1200}
1201 1201
1202/*
1203 * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
1204 * location, so checking ->i_pipe is not enough to verify that this is a
1205 * pipe.
1206 */
1207struct pipe_inode_info *get_pipe_info(struct file *file)
1208{
1209 struct inode *i = file->f_path.dentry->d_inode;
1210
1211 return S_ISFIFO(i->i_mode) ? i->i_pipe : NULL;
1212}
1213
1202long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg) 1214long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
1203{ 1215{
1204 struct pipe_inode_info *pipe; 1216 struct pipe_inode_info *pipe;
1205 long ret; 1217 long ret;
1206 1218
1207 pipe = file->f_path.dentry->d_inode->i_pipe; 1219 pipe = get_pipe_info(file);
1208 if (!pipe) 1220 if (!pipe)
1209 return -EBADF; 1221 return -EBADF;
1210 1222
diff --git a/fs/proc/base.c b/fs/proc/base.c
index f3d02ca461e..182845147fe 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1574,7 +1574,7 @@ static int do_proc_readlink(struct path *path, char __user *buffer, int buflen)
1574 if (!tmp) 1574 if (!tmp)
1575 return -ENOMEM; 1575 return -ENOMEM;
1576 1576
1577 pathname = d_path_with_unreachable(path, tmp, PAGE_SIZE); 1577 pathname = d_path(path, tmp, PAGE_SIZE);
1578 len = PTR_ERR(pathname); 1578 len = PTR_ERR(pathname);
1579 if (IS_ERR(pathname)) 1579 if (IS_ERR(pathname))
1580 goto out; 1580 goto out;
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 9c2b5f48487..3ddb6068177 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -16,7 +16,6 @@
16#include <linux/limits.h> 16#include <linux/limits.h>
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/smp_lock.h>
20#include <linux/sysctl.h> 19#include <linux/sysctl.h>
21#include <linux/slab.h> 20#include <linux/slab.h>
22 21
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index da6b01d70f0..c126c83b9a4 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -706,6 +706,7 @@ static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,
706 * skip over unmapped regions. 706 * skip over unmapped regions.
707 */ 707 */
708#define PAGEMAP_WALK_SIZE (PMD_SIZE) 708#define PAGEMAP_WALK_SIZE (PMD_SIZE)
709#define PAGEMAP_WALK_MASK (PMD_MASK)
709static ssize_t pagemap_read(struct file *file, char __user *buf, 710static ssize_t pagemap_read(struct file *file, char __user *buf,
710 size_t count, loff_t *ppos) 711 size_t count, loff_t *ppos)
711{ 712{
@@ -776,7 +777,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
776 unsigned long end; 777 unsigned long end;
777 778
778 pm.pos = 0; 779 pm.pos = 0;
779 end = start_vaddr + PAGEMAP_WALK_SIZE; 780 end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
780 /* overflow ? */ 781 /* overflow ? */
781 if (end < start_vaddr || end > end_vaddr) 782 if (end < start_vaddr || end > end_vaddr)
782 end = end_vaddr; 783 end = end_vaddr;
diff --git a/fs/read_write.c b/fs/read_write.c
index 431a0ed610c..5d431bacbea 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -9,7 +9,6 @@
9#include <linux/fcntl.h> 9#include <linux/fcntl.h>
10#include <linux/file.h> 10#include <linux/file.h>
11#include <linux/uio.h> 11#include <linux/uio.h>
12#include <linux/smp_lock.h>
13#include <linux/fsnotify.h> 12#include <linux/fsnotify.h>
14#include <linux/security.h> 13#include <linux/security.h>
15#include <linux/module.h> 14#include <linux/module.h>
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 41656d40dc5..0bae036831e 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -8,7 +8,6 @@
8#include <linux/reiserfs_acl.h> 8#include <linux/reiserfs_acl.h>
9#include <linux/reiserfs_xattr.h> 9#include <linux/reiserfs_xattr.h>
10#include <linux/exportfs.h> 10#include <linux/exportfs.h>
11#include <linux/smp_lock.h>
12#include <linux/pagemap.h> 11#include <linux/pagemap.h>
13#include <linux/highmem.h> 12#include <linux/highmem.h>
14#include <linux/slab.h> 13#include <linux/slab.h>
diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c
index adf22b485ce..79265fdc317 100644
--- a/fs/reiserfs/ioctl.c
+++ b/fs/reiserfs/ioctl.c
@@ -9,7 +9,6 @@
9#include <linux/time.h> 9#include <linux/time.h>
10#include <asm/uaccess.h> 10#include <asm/uaccess.h>
11#include <linux/pagemap.h> 11#include <linux/pagemap.h>
12#include <linux/smp_lock.h>
13#include <linux/compat.h> 12#include <linux/compat.h>
14 13
15/* 14/*
@@ -184,12 +183,11 @@ int reiserfs_unpack(struct inode *inode, struct file *filp)
184 return 0; 183 return 0;
185 } 184 }
186 185
187 /* we need to make sure nobody is changing the file size beneath
188 ** us
189 */
190 reiserfs_mutex_lock_safe(&inode->i_mutex, inode->i_sb);
191 depth = reiserfs_write_lock_once(inode->i_sb); 186 depth = reiserfs_write_lock_once(inode->i_sb);
192 187
188 /* we need to make sure nobody is changing the file size beneath us */
189 reiserfs_mutex_lock_safe(&inode->i_mutex, inode->i_sb);
190
193 write_from = inode->i_size & (blocksize - 1); 191 write_from = inode->i_size & (blocksize - 1);
194 /* if we are on a block boundary, we are already unpacked. */ 192 /* if we are on a block boundary, we are already unpacked. */
195 if (write_from == 0) { 193 if (write_from == 0) {
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 076c8b19468..d31bce1a9f9 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -43,7 +43,6 @@
43#include <linux/fcntl.h> 43#include <linux/fcntl.h>
44#include <linux/stat.h> 44#include <linux/stat.h>
45#include <linux/string.h> 45#include <linux/string.h>
46#include <linux/smp_lock.h>
47#include <linux/buffer_head.h> 46#include <linux/buffer_head.h>
48#include <linux/workqueue.h> 47#include <linux/workqueue.h>
49#include <linux/writeback.h> 48#include <linux/writeback.h>
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 3bf7a6457f4..b243117b875 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -28,7 +28,6 @@
28#include <linux/mount.h> 28#include <linux/mount.h>
29#include <linux/namei.h> 29#include <linux/namei.h>
30#include <linux/crc32.h> 30#include <linux/crc32.h>
31#include <linux/smp_lock.h>
32 31
33struct file_system_type reiserfs_fs_type; 32struct file_system_type reiserfs_fs_type;
34 33
diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c
index 536d697a8a2..90d2fcb67a3 100644
--- a/fs/reiserfs/xattr_acl.c
+++ b/fs/reiserfs/xattr_acl.c
@@ -472,7 +472,9 @@ int reiserfs_acl_chmod(struct inode *inode)
472 struct reiserfs_transaction_handle th; 472 struct reiserfs_transaction_handle th;
473 size_t size = reiserfs_xattr_nblocks(inode, 473 size_t size = reiserfs_xattr_nblocks(inode,
474 reiserfs_acl_size(clone->a_count)); 474 reiserfs_acl_size(clone->a_count));
475 reiserfs_write_lock(inode->i_sb); 475 int depth;
476
477 depth = reiserfs_write_lock_once(inode->i_sb);
476 error = journal_begin(&th, inode->i_sb, size * 2); 478 error = journal_begin(&th, inode->i_sb, size * 2);
477 if (!error) { 479 if (!error) {
478 int error2; 480 int error2;
@@ -482,7 +484,7 @@ int reiserfs_acl_chmod(struct inode *inode)
482 if (error2) 484 if (error2)
483 error = error2; 485 error = error2;
484 } 486 }
485 reiserfs_write_unlock(inode->i_sb); 487 reiserfs_write_unlock_once(inode->i_sb, depth);
486 } 488 }
487 posix_acl_release(clone); 489 posix_acl_release(clone);
488 return error; 490 return error;
diff --git a/fs/splice.c b/fs/splice.c
index 8f1dfaecc8f..ce2f02579e3 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -1311,18 +1311,6 @@ long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
1311static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe, 1311static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe,
1312 struct pipe_inode_info *opipe, 1312 struct pipe_inode_info *opipe,
1313 size_t len, unsigned int flags); 1313 size_t len, unsigned int flags);
1314/*
1315 * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
1316 * location, so checking ->i_pipe is not enough to verify that this is a
1317 * pipe.
1318 */
1319static inline struct pipe_inode_info *pipe_info(struct inode *inode)
1320{
1321 if (S_ISFIFO(inode->i_mode))
1322 return inode->i_pipe;
1323
1324 return NULL;
1325}
1326 1314
1327/* 1315/*
1328 * Determine where to splice to/from. 1316 * Determine where to splice to/from.
@@ -1336,8 +1324,8 @@ static long do_splice(struct file *in, loff_t __user *off_in,
1336 loff_t offset, *off; 1324 loff_t offset, *off;
1337 long ret; 1325 long ret;
1338 1326
1339 ipipe = pipe_info(in->f_path.dentry->d_inode); 1327 ipipe = get_pipe_info(in);
1340 opipe = pipe_info(out->f_path.dentry->d_inode); 1328 opipe = get_pipe_info(out);
1341 1329
1342 if (ipipe && opipe) { 1330 if (ipipe && opipe) {
1343 if (off_in || off_out) 1331 if (off_in || off_out)
@@ -1555,7 +1543,7 @@ static long vmsplice_to_user(struct file *file, const struct iovec __user *iov,
1555 int error; 1543 int error;
1556 long ret; 1544 long ret;
1557 1545
1558 pipe = pipe_info(file->f_path.dentry->d_inode); 1546 pipe = get_pipe_info(file);
1559 if (!pipe) 1547 if (!pipe)
1560 return -EBADF; 1548 return -EBADF;
1561 1549
@@ -1642,7 +1630,7 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
1642 }; 1630 };
1643 long ret; 1631 long ret;
1644 1632
1645 pipe = pipe_info(file->f_path.dentry->d_inode); 1633 pipe = get_pipe_info(file);
1646 if (!pipe) 1634 if (!pipe)
1647 return -EBADF; 1635 return -EBADF;
1648 1636
@@ -2022,8 +2010,8 @@ static int link_pipe(struct pipe_inode_info *ipipe,
2022static long do_tee(struct file *in, struct file *out, size_t len, 2010static long do_tee(struct file *in, struct file *out, size_t len,
2023 unsigned int flags) 2011 unsigned int flags)
2024{ 2012{
2025 struct pipe_inode_info *ipipe = pipe_info(in->f_path.dentry->d_inode); 2013 struct pipe_inode_info *ipipe = get_pipe_info(in);
2026 struct pipe_inode_info *opipe = pipe_info(out->f_path.dentry->d_inode); 2014 struct pipe_inode_info *opipe = get_pipe_info(out);
2027 int ret = -EINVAL; 2015 int ret = -EINVAL;
2028 2016
2029 /* 2017 /*
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 7d287afccde..691f61223ed 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -934,7 +934,6 @@ xfs_aops_discard_page(
934 struct xfs_inode *ip = XFS_I(inode); 934 struct xfs_inode *ip = XFS_I(inode);
935 struct buffer_head *bh, *head; 935 struct buffer_head *bh, *head;
936 loff_t offset = page_offset(page); 936 loff_t offset = page_offset(page);
937 ssize_t len = 1 << inode->i_blkbits;
938 937
939 if (!xfs_is_delayed_page(page, IO_DELAY)) 938 if (!xfs_is_delayed_page(page, IO_DELAY))
940 goto out_invalidate; 939 goto out_invalidate;
@@ -949,58 +948,14 @@ xfs_aops_discard_page(
949 xfs_ilock(ip, XFS_ILOCK_EXCL); 948 xfs_ilock(ip, XFS_ILOCK_EXCL);
950 bh = head = page_buffers(page); 949 bh = head = page_buffers(page);
951 do { 950 do {
952 int done;
953 xfs_fileoff_t offset_fsb;
954 xfs_bmbt_irec_t imap;
955 int nimaps = 1;
956 int error; 951 int error;
957 xfs_fsblock_t firstblock; 952 xfs_fileoff_t start_fsb;
958 xfs_bmap_free_t flist;
959 953
960 if (!buffer_delay(bh)) 954 if (!buffer_delay(bh))
961 goto next_buffer; 955 goto next_buffer;
962 956
963 offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset); 957 start_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
964 958 error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1);
965 /*
966 * Map the range first and check that it is a delalloc extent
967 * before trying to unmap the range. Otherwise we will be
968 * trying to remove a real extent (which requires a
969 * transaction) or a hole, which is probably a bad idea...
970 */
971 error = xfs_bmapi(NULL, ip, offset_fsb, 1,
972 XFS_BMAPI_ENTIRE, NULL, 0, &imap,
973 &nimaps, NULL);
974
975 if (error) {
976 /* something screwed, just bail */
977 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
978 xfs_fs_cmn_err(CE_ALERT, ip->i_mount,
979 "page discard failed delalloc mapping lookup.");
980 }
981 break;
982 }
983 if (!nimaps) {
984 /* nothing there */
985 goto next_buffer;
986 }
987 if (imap.br_startblock != DELAYSTARTBLOCK) {
988 /* been converted, ignore */
989 goto next_buffer;
990 }
991 WARN_ON(imap.br_blockcount == 0);
992
993 /*
994 * Note: while we initialise the firstblock/flist pair, they
995 * should never be used because blocks should never be
996 * allocated or freed for a delalloc extent and hence we need
997 * don't cancel or finish them after the xfs_bunmapi() call.
998 */
999 xfs_bmap_init(&flist, &firstblock);
1000 error = xfs_bunmapi(NULL, ip, offset_fsb, 1, 0, 1, &firstblock,
1001 &flist, &done);
1002
1003 ASSERT(!flist.xbf_count && !flist.xbf_first);
1004 if (error) { 959 if (error) {
1005 /* something screwed, just bail */ 960 /* something screwed, just bail */
1006 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { 961 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
@@ -1010,7 +965,7 @@ xfs_aops_discard_page(
1010 break; 965 break;
1011 } 966 }
1012next_buffer: 967next_buffer:
1013 offset += len; 968 offset += 1 << inode->i_blkbits;
1014 969
1015 } while ((bh = bh->b_this_page) != head); 970 } while ((bh = bh->b_this_page) != head);
1016 971
@@ -1505,11 +1460,42 @@ xfs_vm_write_failed(
1505 struct inode *inode = mapping->host; 1460 struct inode *inode = mapping->host;
1506 1461
1507 if (to > inode->i_size) { 1462 if (to > inode->i_size) {
1508 struct iattr ia = { 1463 /*
1509 .ia_valid = ATTR_SIZE | ATTR_FORCE, 1464 * punch out the delalloc blocks we have already allocated. We
1510 .ia_size = inode->i_size, 1465 * don't call xfs_setattr() to do this as we may be in the
1511 }; 1466 * middle of a multi-iovec write and so the vfs inode->i_size
1512 xfs_setattr(XFS_I(inode), &ia, XFS_ATTR_NOLOCK); 1467 * will not match the xfs ip->i_size and so it will zero too
1468 * much. Hence we jus truncate the page cache to zero what is
1469 * necessary and punch the delalloc blocks directly.
1470 */
1471 struct xfs_inode *ip = XFS_I(inode);
1472 xfs_fileoff_t start_fsb;
1473 xfs_fileoff_t end_fsb;
1474 int error;
1475
1476 truncate_pagecache(inode, to, inode->i_size);
1477
1478 /*
1479 * Check if there are any blocks that are outside of i_size
1480 * that need to be trimmed back.
1481 */
1482 start_fsb = XFS_B_TO_FSB(ip->i_mount, inode->i_size) + 1;
1483 end_fsb = XFS_B_TO_FSB(ip->i_mount, to);
1484 if (end_fsb <= start_fsb)
1485 return;
1486
1487 xfs_ilock(ip, XFS_ILOCK_EXCL);
1488 error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
1489 end_fsb - start_fsb);
1490 if (error) {
1491 /* something screwed, just bail */
1492 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
1493 xfs_fs_cmn_err(CE_ALERT, ip->i_mount,
1494 "xfs_vm_write_failed: unable to clean up ino %lld",
1495 ip->i_ino);
1496 }
1497 }
1498 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1513 } 1499 }
1514} 1500}
1515 1501
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index aa1d353def2..4c5deb6e9e3 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -488,29 +488,16 @@ found:
488 spin_unlock(&pag->pag_buf_lock); 488 spin_unlock(&pag->pag_buf_lock);
489 xfs_perag_put(pag); 489 xfs_perag_put(pag);
490 490
491 /* Attempt to get the semaphore without sleeping, 491 if (xfs_buf_cond_lock(bp)) {
492 * if this does not work then we need to drop the 492 /* failed, so wait for the lock if requested. */
493 * spinlock and do a hard attempt on the semaphore.
494 */
495 if (down_trylock(&bp->b_sema)) {
496 if (!(flags & XBF_TRYLOCK)) { 493 if (!(flags & XBF_TRYLOCK)) {
497 /* wait for buffer ownership */
498 xfs_buf_lock(bp); 494 xfs_buf_lock(bp);
499 XFS_STATS_INC(xb_get_locked_waited); 495 XFS_STATS_INC(xb_get_locked_waited);
500 } else { 496 } else {
501 /* We asked for a trylock and failed, no need
502 * to look at file offset and length here, we
503 * know that this buffer at least overlaps our
504 * buffer and is locked, therefore our buffer
505 * either does not exist, or is this buffer.
506 */
507 xfs_buf_rele(bp); 497 xfs_buf_rele(bp);
508 XFS_STATS_INC(xb_busy_locked); 498 XFS_STATS_INC(xb_busy_locked);
509 return NULL; 499 return NULL;
510 } 500 }
511 } else {
512 /* trylock worked */
513 XB_SET_OWNER(bp);
514 } 501 }
515 502
516 if (bp->b_flags & XBF_STALE) { 503 if (bp->b_flags & XBF_STALE) {
@@ -876,10 +863,18 @@ xfs_buf_rele(
876 */ 863 */
877 864
878/* 865/*
879 * Locks a buffer object, if it is not already locked. 866 * Locks a buffer object, if it is not already locked. Note that this in
880 * Note that this in no way locks the underlying pages, so it is only 867 * no way locks the underlying pages, so it is only useful for
881 * useful for synchronizing concurrent use of buffer objects, not for 868 * synchronizing concurrent use of buffer objects, not for synchronizing
882 * synchronizing independent access to the underlying pages. 869 * independent access to the underlying pages.
870 *
871 * If we come across a stale, pinned, locked buffer, we know that we are
872 * being asked to lock a buffer that has been reallocated. Because it is
873 * pinned, we know that the log has not been pushed to disk and hence it
874 * will still be locked. Rather than continuing to have trylock attempts
875 * fail until someone else pushes the log, push it ourselves before
876 * returning. This means that the xfsaild will not get stuck trying
877 * to push on stale inode buffers.
883 */ 878 */
884int 879int
885xfs_buf_cond_lock( 880xfs_buf_cond_lock(
@@ -890,6 +885,8 @@ xfs_buf_cond_lock(
890 locked = down_trylock(&bp->b_sema) == 0; 885 locked = down_trylock(&bp->b_sema) == 0;
891 if (locked) 886 if (locked)
892 XB_SET_OWNER(bp); 887 XB_SET_OWNER(bp);
888 else if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
889 xfs_log_force(bp->b_target->bt_mount, 0);
893 890
894 trace_xfs_buf_cond_lock(bp, _RET_IP_); 891 trace_xfs_buf_cond_lock(bp, _RET_IP_);
895 return locked ? 0 : -EBUSY; 892 return locked ? 0 : -EBUSY;
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 8abd12e32e1..4111cd3966c 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -5471,8 +5471,13 @@ xfs_getbmap(
5471 if (error) 5471 if (error)
5472 goto out_unlock_iolock; 5472 goto out_unlock_iolock;
5473 } 5473 }
5474 5474 /*
5475 ASSERT(ip->i_delayed_blks == 0); 5475 * even after flushing the inode, there can still be delalloc
5476 * blocks on the inode beyond EOF due to speculative
5477 * preallocation. These are not removed until the release
5478 * function is called or the inode is inactivated. Hence we
5479 * cannot assert here that ip->i_delayed_blks == 0.
5480 */
5476 } 5481 }
5477 5482
5478 lock = xfs_ilock_map_shared(ip); 5483 lock = xfs_ilock_map_shared(ip);
@@ -6070,3 +6075,79 @@ xfs_bmap_disk_count_leaves(
6070 *count += xfs_bmbt_disk_get_blockcount(frp); 6075 *count += xfs_bmbt_disk_get_blockcount(frp);
6071 } 6076 }
6072} 6077}
6078
6079/*
6080 * dead simple method of punching delalyed allocation blocks from a range in
6081 * the inode. Walks a block at a time so will be slow, but is only executed in
6082 * rare error cases so the overhead is not critical. This will alays punch out
6083 * both the start and end blocks, even if the ranges only partially overlap
6084 * them, so it is up to the caller to ensure that partial blocks are not
6085 * passed in.
6086 */
6087int
6088xfs_bmap_punch_delalloc_range(
6089 struct xfs_inode *ip,
6090 xfs_fileoff_t start_fsb,
6091 xfs_fileoff_t length)
6092{
6093 xfs_fileoff_t remaining = length;
6094 int error = 0;
6095
6096 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
6097
6098 do {
6099 int done;
6100 xfs_bmbt_irec_t imap;
6101 int nimaps = 1;
6102 xfs_fsblock_t firstblock;
6103 xfs_bmap_free_t flist;
6104
6105 /*
6106 * Map the range first and check that it is a delalloc extent
6107 * before trying to unmap the range. Otherwise we will be
6108 * trying to remove a real extent (which requires a
6109 * transaction) or a hole, which is probably a bad idea...
6110 */
6111 error = xfs_bmapi(NULL, ip, start_fsb, 1,
6112 XFS_BMAPI_ENTIRE, NULL, 0, &imap,
6113 &nimaps, NULL);
6114
6115 if (error) {
6116 /* something screwed, just bail */
6117 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
6118 xfs_fs_cmn_err(CE_ALERT, ip->i_mount,
6119 "Failed delalloc mapping lookup ino %lld fsb %lld.",
6120 ip->i_ino, start_fsb);
6121 }
6122 break;
6123 }
6124 if (!nimaps) {
6125 /* nothing there */
6126 goto next_block;
6127 }
6128 if (imap.br_startblock != DELAYSTARTBLOCK) {
6129 /* been converted, ignore */
6130 goto next_block;
6131 }
6132 WARN_ON(imap.br_blockcount == 0);
6133
6134 /*
6135 * Note: while we initialise the firstblock/flist pair, they
6136 * should never be used because blocks should never be
6137 * allocated or freed for a delalloc extent and hence we need
6138 * don't cancel or finish them after the xfs_bunmapi() call.
6139 */
6140 xfs_bmap_init(&flist, &firstblock);
6141 error = xfs_bunmapi(NULL, ip, start_fsb, 1, 0, 1, &firstblock,
6142 &flist, &done);
6143 if (error)
6144 break;
6145
6146 ASSERT(!flist.xbf_count && !flist.xbf_first);
6147next_block:
6148 start_fsb++;
6149 remaining--;
6150 } while(remaining > 0);
6151
6152 return error;
6153}
diff --git a/fs/xfs/xfs_bmap.h b/fs/xfs/xfs_bmap.h
index 71ec9b6ecdf..3651191daea 100644
--- a/fs/xfs/xfs_bmap.h
+++ b/fs/xfs/xfs_bmap.h
@@ -394,6 +394,11 @@ xfs_bmap_count_blocks(
394 int whichfork, 394 int whichfork,
395 int *count); 395 int *count);
396 396
397int
398xfs_bmap_punch_delalloc_range(
399 struct xfs_inode *ip,
400 xfs_fileoff_t start_fsb,
401 xfs_fileoff_t length);
397#endif /* __KERNEL__ */ 402#endif /* __KERNEL__ */
398 403
399#endif /* __XFS_BMAP_H__ */ 404#endif /* __XFS_BMAP_H__ */
diff --git a/fs/xfs/xfs_dfrag.c b/fs/xfs/xfs_dfrag.c
index 3b9582c60a2..e60490bc00a 100644
--- a/fs/xfs/xfs_dfrag.c
+++ b/fs/xfs/xfs_dfrag.c
@@ -377,6 +377,19 @@ xfs_swap_extents(
377 ip->i_d.di_format = tip->i_d.di_format; 377 ip->i_d.di_format = tip->i_d.di_format;
378 tip->i_d.di_format = tmp; 378 tip->i_d.di_format = tmp;
379 379
380 /*
381 * The extents in the source inode could still contain speculative
382 * preallocation beyond EOF (e.g. the file is open but not modified
383 * while defrag is in progress). In that case, we need to copy over the
384 * number of delalloc blocks the data fork in the source inode is
385 * tracking beyond EOF so that when the fork is truncated away when the
386 * temporary inode is unlinked we don't underrun the i_delayed_blks
387 * counter on that inode.
388 */
389 ASSERT(tip->i_delayed_blks == 0);
390 tip->i_delayed_blks = ip->i_delayed_blks;
391 ip->i_delayed_blks = 0;
392
380 ilf_fields = XFS_ILOG_CORE; 393 ilf_fields = XFS_ILOG_CORE;
381 394
382 switch(ip->i_d.di_format) { 395 switch(ip->i_d.di_format) {
diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c
index ed999026766..c78cc6a3d87 100644
--- a/fs/xfs/xfs_error.c
+++ b/fs/xfs/xfs_error.c
@@ -58,6 +58,7 @@ xfs_error_trap(int e)
58int xfs_etest[XFS_NUM_INJECT_ERROR]; 58int xfs_etest[XFS_NUM_INJECT_ERROR];
59int64_t xfs_etest_fsid[XFS_NUM_INJECT_ERROR]; 59int64_t xfs_etest_fsid[XFS_NUM_INJECT_ERROR];
60char * xfs_etest_fsname[XFS_NUM_INJECT_ERROR]; 60char * xfs_etest_fsname[XFS_NUM_INJECT_ERROR];
61int xfs_error_test_active;
61 62
62int 63int
63xfs_error_test(int error_tag, int *fsidp, char *expression, 64xfs_error_test(int error_tag, int *fsidp, char *expression,
@@ -108,6 +109,7 @@ xfs_errortag_add(int error_tag, xfs_mount_t *mp)
108 len = strlen(mp->m_fsname); 109 len = strlen(mp->m_fsname);
109 xfs_etest_fsname[i] = kmem_alloc(len + 1, KM_SLEEP); 110 xfs_etest_fsname[i] = kmem_alloc(len + 1, KM_SLEEP);
110 strcpy(xfs_etest_fsname[i], mp->m_fsname); 111 strcpy(xfs_etest_fsname[i], mp->m_fsname);
112 xfs_error_test_active++;
111 return 0; 113 return 0;
112 } 114 }
113 } 115 }
@@ -137,6 +139,7 @@ xfs_errortag_clearall(xfs_mount_t *mp, int loud)
137 xfs_etest_fsid[i] = 0LL; 139 xfs_etest_fsid[i] = 0LL;
138 kmem_free(xfs_etest_fsname[i]); 140 kmem_free(xfs_etest_fsname[i]);
139 xfs_etest_fsname[i] = NULL; 141 xfs_etest_fsname[i] = NULL;
142 xfs_error_test_active--;
140 } 143 }
141 } 144 }
142 145
diff --git a/fs/xfs/xfs_error.h b/fs/xfs/xfs_error.h
index c2c1a072bb8..f338847f80b 100644
--- a/fs/xfs/xfs_error.h
+++ b/fs/xfs/xfs_error.h
@@ -127,13 +127,14 @@ extern void xfs_corruption_error(const char *tag, int level,
127#define XFS_RANDOM_BMAPIFORMAT XFS_RANDOM_DEFAULT 127#define XFS_RANDOM_BMAPIFORMAT XFS_RANDOM_DEFAULT
128 128
129#ifdef DEBUG 129#ifdef DEBUG
130extern int xfs_error_test_active;
130extern int xfs_error_test(int, int *, char *, int, char *, unsigned long); 131extern int xfs_error_test(int, int *, char *, int, char *, unsigned long);
131 132
132#define XFS_NUM_INJECT_ERROR 10 133#define XFS_NUM_INJECT_ERROR 10
133#define XFS_TEST_ERROR(expr, mp, tag, rf) \ 134#define XFS_TEST_ERROR(expr, mp, tag, rf) \
134 ((expr) || \ 135 ((expr) || (xfs_error_test_active && \
135 xfs_error_test((tag), (mp)->m_fixedfsid, "expr", __LINE__, __FILE__, \ 136 xfs_error_test((tag), (mp)->m_fixedfsid, "expr", __LINE__, __FILE__, \
136 (rf))) 137 (rf))))
137 138
138extern int xfs_errortag_add(int error_tag, xfs_mount_t *mp); 139extern int xfs_errortag_add(int error_tag, xfs_mount_t *mp);
139extern int xfs_errortag_clearall(xfs_mount_t *mp, int loud); 140extern int xfs_errortag_clearall(xfs_mount_t *mp, int loud);
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index c7ac020705d..7c8d30c453c 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -657,18 +657,37 @@ xfs_inode_item_unlock(
657} 657}
658 658
659/* 659/*
660 * This is called to find out where the oldest active copy of the 660 * This is called to find out where the oldest active copy of the inode log
661 * inode log item in the on disk log resides now that the last log 661 * item in the on disk log resides now that the last log write of it completed
662 * write of it completed at the given lsn. Since we always re-log 662 * at the given lsn. Since we always re-log all dirty data in an inode, the
663 * all dirty data in an inode, the latest copy in the on disk log 663 * latest copy in the on disk log is the only one that matters. Therefore,
664 * is the only one that matters. Therefore, simply return the 664 * simply return the given lsn.
665 * given lsn. 665 *
666 * If the inode has been marked stale because the cluster is being freed, we
667 * don't want to (re-)insert this inode into the AIL. There is a race condition
668 * where the cluster buffer may be unpinned before the inode is inserted into
669 * the AIL during transaction committed processing. If the buffer is unpinned
670 * before the inode item has been committed and inserted, then it is possible
671 * for the buffer to be written and IO completions before the inode is inserted
672 * into the AIL. In that case, we'd be inserting a clean, stale inode into the
673 * AIL which will never get removed. It will, however, get reclaimed which
674 * triggers an assert in xfs_inode_free() complaining about freein an inode
675 * still in the AIL.
676 *
677 * To avoid this, return a lower LSN than the one passed in so that the
678 * transaction committed code will not move the inode forward in the AIL but
679 * will still unpin it properly.
666 */ 680 */
667STATIC xfs_lsn_t 681STATIC xfs_lsn_t
668xfs_inode_item_committed( 682xfs_inode_item_committed(
669 struct xfs_log_item *lip, 683 struct xfs_log_item *lip,
670 xfs_lsn_t lsn) 684 xfs_lsn_t lsn)
671{ 685{
686 struct xfs_inode_log_item *iip = INODE_ITEM(lip);
687 struct xfs_inode *ip = iip->ili_inode;
688
689 if (xfs_iflags_test(ip, XFS_ISTALE))
690 return lsn - 1;
672 return lsn; 691 return lsn;
673} 692}
674 693
diff --git a/fs/xfs/xfs_rename.c b/fs/xfs/xfs_rename.c
index d2af0a8381a..77a59891734 100644
--- a/fs/xfs/xfs_rename.c
+++ b/fs/xfs/xfs_rename.c
@@ -297,6 +297,7 @@ xfs_rename(
297 * it and some incremental backup programs won't work without it. 297 * it and some incremental backup programs won't work without it.
298 */ 298 */
299 xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG); 299 xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG);
300 xfs_trans_log_inode(tp, src_ip, XFS_ILOG_CORE);
300 301
301 /* 302 /*
302 * Adjust the link count on src_dp. This is necessary when 303 * Adjust the link count on src_dp. This is necessary when