diff options
author | Steve French <sfrench@us.ibm.com> | 2010-06-16 09:19:36 -0400 |
---|---|---|
committer | Steve French <sfrench@us.ibm.com> | 2010-06-16 09:19:36 -0400 |
commit | 0933a95dfdb1ae5c93e1ede5899f35acc2bb244d (patch) | |
tree | 55ac47b819a2a2084f82f9d823d9152ac2a7f2b3 /fs | |
parent | 12420ac341533f3715b3deb788637568f22b78ff (diff) | |
parent | 7e27d6e778cd87b6f2415515d7127eba53fe5d02 (diff) |
Merge branch 'master' of /pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'fs')
-rw-r--r-- | fs/block_dev.c | 72 | ||||
-rw-r--r-- | fs/btrfs/acl.c | 8 | ||||
-rw-r--r-- | fs/btrfs/disk-io.c | 11 | ||||
-rw-r--r-- | fs/btrfs/extent-tree.c | 3 | ||||
-rw-r--r-- | fs/btrfs/file.c | 12 | ||||
-rw-r--r-- | fs/btrfs/inode.c | 4 | ||||
-rw-r--r-- | fs/btrfs/ioctl.c | 4 | ||||
-rw-r--r-- | fs/btrfs/relocation.c | 7 | ||||
-rw-r--r-- | fs/btrfs/root-tree.c | 3 | ||||
-rw-r--r-- | fs/btrfs/super.c | 6 | ||||
-rw-r--r-- | fs/ceph/caps.c | 93 | ||||
-rw-r--r-- | fs/ceph/inode.c | 2 | ||||
-rw-r--r-- | fs/ceph/mds_client.c | 28 | ||||
-rw-r--r-- | fs/ceph/mds_client.h | 6 | ||||
-rw-r--r-- | fs/ceph/mon_client.c | 2 | ||||
-rw-r--r-- | fs/ceph/super.c | 4 | ||||
-rw-r--r-- | fs/ext4/inode.c | 40 | ||||
-rw-r--r-- | fs/ext4/move_extent.c | 3 | ||||
-rw-r--r-- | fs/jffs2/acl.c | 3 | ||||
-rw-r--r-- | fs/jffs2/dir.c | 127 | ||||
-rw-r--r-- | fs/jffs2/fs.c | 7 | ||||
-rw-r--r-- | fs/nfsd/nfs4state.c | 2 | ||||
-rw-r--r-- | fs/nfsd/vfs.c | 3 | ||||
-rw-r--r-- | fs/pipe.c | 20 | ||||
-rw-r--r-- | fs/xfs/linux-2.6/xfs_aops.c | 8 |
25 files changed, 289 insertions, 189 deletions
diff --git a/fs/block_dev.c b/fs/block_dev.c index 7346c96308a5..99d6af811747 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
@@ -706,8 +706,13 @@ retry: | |||
706 | * @bdev is about to be opened exclusively. Check @bdev can be opened | 706 | * @bdev is about to be opened exclusively. Check @bdev can be opened |
707 | * exclusively and mark that an exclusive open is in progress. Each | 707 | * exclusively and mark that an exclusive open is in progress. Each |
708 | * successful call to this function must be matched with a call to | 708 | * successful call to this function must be matched with a call to |
709 | * either bd_claim() or bd_abort_claiming(). If this function | 709 | * either bd_finish_claiming() or bd_abort_claiming() (which do not |
710 | * succeeds, the matching bd_claim() is guaranteed to succeed. | 710 | * fail). |
711 | * | ||
712 | * This function is used to gain exclusive access to the block device | ||
713 | * without actually causing other exclusive open attempts to fail. It | ||
714 | * should be used when the open sequence itself requires exclusive | ||
715 | * access but may subsequently fail. | ||
711 | * | 716 | * |
712 | * CONTEXT: | 717 | * CONTEXT: |
713 | * Might sleep. | 718 | * Might sleep. |
@@ -734,6 +739,7 @@ static struct block_device *bd_start_claiming(struct block_device *bdev, | |||
734 | return ERR_PTR(-ENXIO); | 739 | return ERR_PTR(-ENXIO); |
735 | 740 | ||
736 | whole = bdget_disk(disk, 0); | 741 | whole = bdget_disk(disk, 0); |
742 | module_put(disk->fops->owner); | ||
737 | put_disk(disk); | 743 | put_disk(disk); |
738 | if (!whole) | 744 | if (!whole) |
739 | return ERR_PTR(-ENOMEM); | 745 | return ERR_PTR(-ENOMEM); |
@@ -782,15 +788,46 @@ static void bd_abort_claiming(struct block_device *whole, void *holder) | |||
782 | __bd_abort_claiming(whole, holder); /* releases bdev_lock */ | 788 | __bd_abort_claiming(whole, holder); /* releases bdev_lock */ |
783 | } | 789 | } |
784 | 790 | ||
791 | /* increment holders when we have a legitimate claim. requires bdev_lock */ | ||
792 | static void __bd_claim(struct block_device *bdev, struct block_device *whole, | ||
793 | void *holder) | ||
794 | { | ||
795 | /* note that for a whole device bd_holders | ||
796 | * will be incremented twice, and bd_holder will | ||
797 | * be set to bd_claim before being set to holder | ||
798 | */ | ||
799 | whole->bd_holders++; | ||
800 | whole->bd_holder = bd_claim; | ||
801 | bdev->bd_holders++; | ||
802 | bdev->bd_holder = holder; | ||
803 | } | ||
804 | |||
805 | /** | ||
806 | * bd_finish_claiming - finish claiming a block device | ||
807 | * @bdev: block device of interest (passed to bd_start_claiming()) | ||
808 | * @whole: whole block device returned by bd_start_claiming() | ||
809 | * @holder: holder trying to claim @bdev | ||
810 | * | ||
811 | * Finish a claiming block started by bd_start_claiming(). | ||
812 | * | ||
813 | * CONTEXT: | ||
814 | * Grabs and releases bdev_lock. | ||
815 | */ | ||
816 | static void bd_finish_claiming(struct block_device *bdev, | ||
817 | struct block_device *whole, void *holder) | ||
818 | { | ||
819 | spin_lock(&bdev_lock); | ||
820 | BUG_ON(!bd_may_claim(bdev, whole, holder)); | ||
821 | __bd_claim(bdev, whole, holder); | ||
822 | __bd_abort_claiming(whole, holder); /* not actually an abort */ | ||
823 | } | ||
824 | |||
785 | /** | 825 | /** |
786 | * bd_claim - claim a block device | 826 | * bd_claim - claim a block device |
787 | * @bdev: block device to claim | 827 | * @bdev: block device to claim |
788 | * @holder: holder trying to claim @bdev | 828 | * @holder: holder trying to claim @bdev |
789 | * | 829 | * |
790 | * Try to claim @bdev which must have been opened successfully. This | 830 | * Try to claim @bdev which must have been opened successfully. |
791 | * function may be called with or without preceding | ||
792 | * blk_start_claiming(). In the former case, this function is always | ||
793 | * successful and terminates the claiming block. | ||
794 | * | 831 | * |
795 | * CONTEXT: | 832 | * CONTEXT: |
796 | * Might sleep. | 833 | * Might sleep. |
@@ -806,23 +843,10 @@ int bd_claim(struct block_device *bdev, void *holder) | |||
806 | might_sleep(); | 843 | might_sleep(); |
807 | 844 | ||
808 | spin_lock(&bdev_lock); | 845 | spin_lock(&bdev_lock); |
809 | |||
810 | res = bd_prepare_to_claim(bdev, whole, holder); | 846 | res = bd_prepare_to_claim(bdev, whole, holder); |
811 | if (res == 0) { | 847 | if (res == 0) |
812 | /* note that for a whole device bd_holders | 848 | __bd_claim(bdev, whole, holder); |
813 | * will be incremented twice, and bd_holder will | 849 | spin_unlock(&bdev_lock); |
814 | * be set to bd_claim before being set to holder | ||
815 | */ | ||
816 | whole->bd_holders++; | ||
817 | whole->bd_holder = bd_claim; | ||
818 | bdev->bd_holders++; | ||
819 | bdev->bd_holder = holder; | ||
820 | } | ||
821 | |||
822 | if (whole->bd_claiming) | ||
823 | __bd_abort_claiming(whole, holder); /* releases bdev_lock */ | ||
824 | else | ||
825 | spin_unlock(&bdev_lock); | ||
826 | 850 | ||
827 | return res; | 851 | return res; |
828 | } | 852 | } |
@@ -1476,7 +1500,7 @@ static int blkdev_open(struct inode * inode, struct file * filp) | |||
1476 | 1500 | ||
1477 | if (whole) { | 1501 | if (whole) { |
1478 | if (res == 0) | 1502 | if (res == 0) |
1479 | BUG_ON(bd_claim(bdev, filp) != 0); | 1503 | bd_finish_claiming(bdev, whole, filp); |
1480 | else | 1504 | else |
1481 | bd_abort_claiming(whole, filp); | 1505 | bd_abort_claiming(whole, filp); |
1482 | } | 1506 | } |
@@ -1712,7 +1736,7 @@ struct block_device *open_bdev_exclusive(const char *path, fmode_t mode, void *h | |||
1712 | if ((mode & FMODE_WRITE) && bdev_read_only(bdev)) | 1736 | if ((mode & FMODE_WRITE) && bdev_read_only(bdev)) |
1713 | goto out_blkdev_put; | 1737 | goto out_blkdev_put; |
1714 | 1738 | ||
1715 | BUG_ON(bd_claim(bdev, holder) != 0); | 1739 | bd_finish_claiming(bdev, whole, holder); |
1716 | return bdev; | 1740 | return bdev; |
1717 | 1741 | ||
1718 | out_blkdev_put: | 1742 | out_blkdev_put: |
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c index 8d432cd9d580..2222d161c7b6 100644 --- a/fs/btrfs/acl.c +++ b/fs/btrfs/acl.c | |||
@@ -60,6 +60,8 @@ static struct posix_acl *btrfs_get_acl(struct inode *inode, int type) | |||
60 | size = __btrfs_getxattr(inode, name, value, size); | 60 | size = __btrfs_getxattr(inode, name, value, size); |
61 | if (size > 0) { | 61 | if (size > 0) { |
62 | acl = posix_acl_from_xattr(value, size); | 62 | acl = posix_acl_from_xattr(value, size); |
63 | if (IS_ERR(acl)) | ||
64 | return acl; | ||
63 | set_cached_acl(inode, type, acl); | 65 | set_cached_acl(inode, type, acl); |
64 | } | 66 | } |
65 | kfree(value); | 67 | kfree(value); |
@@ -160,6 +162,12 @@ static int btrfs_xattr_acl_set(struct dentry *dentry, const char *name, | |||
160 | int ret; | 162 | int ret; |
161 | struct posix_acl *acl = NULL; | 163 | struct posix_acl *acl = NULL; |
162 | 164 | ||
165 | if (!is_owner_or_cap(dentry->d_inode)) | ||
166 | return -EPERM; | ||
167 | |||
168 | if (!IS_POSIXACL(dentry->d_inode)) | ||
169 | return -EOPNOTSUPP; | ||
170 | |||
163 | if (value) { | 171 | if (value) { |
164 | acl = posix_acl_from_xattr(value, size); | 172 | acl = posix_acl_from_xattr(value, size); |
165 | if (acl == NULL) { | 173 | if (acl == NULL) { |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index f3b287c22caf..34f7c375567e 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -1941,8 +1941,11 @@ struct btrfs_root *open_ctree(struct super_block *sb, | |||
1941 | btrfs_level_size(tree_root, | 1941 | btrfs_level_size(tree_root, |
1942 | btrfs_super_log_root_level(disk_super)); | 1942 | btrfs_super_log_root_level(disk_super)); |
1943 | 1943 | ||
1944 | log_tree_root = kzalloc(sizeof(struct btrfs_root), | 1944 | log_tree_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS); |
1945 | GFP_NOFS); | 1945 | if (!log_tree_root) { |
1946 | err = -ENOMEM; | ||
1947 | goto fail_trans_kthread; | ||
1948 | } | ||
1946 | 1949 | ||
1947 | __setup_root(nodesize, leafsize, sectorsize, stripesize, | 1950 | __setup_root(nodesize, leafsize, sectorsize, stripesize, |
1948 | log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID); | 1951 | log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID); |
@@ -1982,6 +1985,10 @@ struct btrfs_root *open_ctree(struct super_block *sb, | |||
1982 | fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location); | 1985 | fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location); |
1983 | if (!fs_info->fs_root) | 1986 | if (!fs_info->fs_root) |
1984 | goto fail_trans_kthread; | 1987 | goto fail_trans_kthread; |
1988 | if (IS_ERR(fs_info->fs_root)) { | ||
1989 | err = PTR_ERR(fs_info->fs_root); | ||
1990 | goto fail_trans_kthread; | ||
1991 | } | ||
1985 | 1992 | ||
1986 | if (!(sb->s_flags & MS_RDONLY)) { | 1993 | if (!(sb->s_flags & MS_RDONLY)) { |
1987 | down_read(&fs_info->cleanup_work_sem); | 1994 | down_read(&fs_info->cleanup_work_sem); |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index b9080d71991a..32d094002a57 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -4360,7 +4360,8 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans, | |||
4360 | 4360 | ||
4361 | block_rsv = get_block_rsv(trans, root); | 4361 | block_rsv = get_block_rsv(trans, root); |
4362 | cache = btrfs_lookup_block_group(root->fs_info, buf->start); | 4362 | cache = btrfs_lookup_block_group(root->fs_info, buf->start); |
4363 | BUG_ON(block_rsv->space_info != cache->space_info); | 4363 | if (block_rsv->space_info != cache->space_info) |
4364 | goto out; | ||
4364 | 4365 | ||
4365 | if (btrfs_header_generation(buf) == trans->transid) { | 4366 | if (btrfs_header_generation(buf) == trans->transid) { |
4366 | if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { | 4367 | if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { |
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 787b50a16a14..e354c33df082 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c | |||
@@ -1140,7 +1140,7 @@ int btrfs_sync_file(struct file *file, int datasync) | |||
1140 | /* | 1140 | /* |
1141 | * ok we haven't committed the transaction yet, lets do a commit | 1141 | * ok we haven't committed the transaction yet, lets do a commit |
1142 | */ | 1142 | */ |
1143 | if (file && file->private_data) | 1143 | if (file->private_data) |
1144 | btrfs_ioctl_trans_end(file); | 1144 | btrfs_ioctl_trans_end(file); |
1145 | 1145 | ||
1146 | trans = btrfs_start_transaction(root, 0); | 1146 | trans = btrfs_start_transaction(root, 0); |
@@ -1190,14 +1190,22 @@ static const struct vm_operations_struct btrfs_file_vm_ops = { | |||
1190 | 1190 | ||
1191 | static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma) | 1191 | static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma) |
1192 | { | 1192 | { |
1193 | vma->vm_ops = &btrfs_file_vm_ops; | 1193 | struct address_space *mapping = filp->f_mapping; |
1194 | |||
1195 | if (!mapping->a_ops->readpage) | ||
1196 | return -ENOEXEC; | ||
1197 | |||
1194 | file_accessed(filp); | 1198 | file_accessed(filp); |
1199 | vma->vm_ops = &btrfs_file_vm_ops; | ||
1200 | vma->vm_flags |= VM_CAN_NONLINEAR; | ||
1201 | |||
1195 | return 0; | 1202 | return 0; |
1196 | } | 1203 | } |
1197 | 1204 | ||
1198 | const struct file_operations btrfs_file_operations = { | 1205 | const struct file_operations btrfs_file_operations = { |
1199 | .llseek = generic_file_llseek, | 1206 | .llseek = generic_file_llseek, |
1200 | .read = do_sync_read, | 1207 | .read = do_sync_read, |
1208 | .write = do_sync_write, | ||
1201 | .aio_read = generic_file_aio_read, | 1209 | .aio_read = generic_file_aio_read, |
1202 | .splice_read = generic_file_splice_read, | 1210 | .splice_read = generic_file_splice_read, |
1203 | .aio_write = btrfs_file_aio_write, | 1211 | .aio_write = btrfs_file_aio_write, |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index fa6ccc1bfe2a..1bff92ad4744 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -2673,7 +2673,7 @@ static int check_path_shared(struct btrfs_root *root, | |||
2673 | struct extent_buffer *eb; | 2673 | struct extent_buffer *eb; |
2674 | int level; | 2674 | int level; |
2675 | int ret; | 2675 | int ret; |
2676 | u64 refs; | 2676 | u64 refs = 1; |
2677 | 2677 | ||
2678 | for (level = 0; level < BTRFS_MAX_LEVEL; level++) { | 2678 | for (level = 0; level < BTRFS_MAX_LEVEL; level++) { |
2679 | if (!path->nodes[level]) | 2679 | if (!path->nodes[level]) |
@@ -6884,7 +6884,7 @@ static long btrfs_fallocate(struct inode *inode, int mode, | |||
6884 | if (em->block_start == EXTENT_MAP_HOLE || | 6884 | if (em->block_start == EXTENT_MAP_HOLE || |
6885 | (cur_offset >= inode->i_size && | 6885 | (cur_offset >= inode->i_size && |
6886 | !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) { | 6886 | !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) { |
6887 | ret = btrfs_prealloc_file_range(inode, 0, cur_offset, | 6887 | ret = btrfs_prealloc_file_range(inode, mode, cur_offset, |
6888 | last_byte - cur_offset, | 6888 | last_byte - cur_offset, |
6889 | 1 << inode->i_blkbits, | 6889 | 1 << inode->i_blkbits, |
6890 | offset + len, | 6890 | offset + len, |
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 4cdb98cf26de..4dbaf89b1337 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
@@ -1280,7 +1280,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file, | |||
1280 | trans = btrfs_start_transaction(root, 0); | 1280 | trans = btrfs_start_transaction(root, 0); |
1281 | if (IS_ERR(trans)) { | 1281 | if (IS_ERR(trans)) { |
1282 | err = PTR_ERR(trans); | 1282 | err = PTR_ERR(trans); |
1283 | goto out; | 1283 | goto out_up_write; |
1284 | } | 1284 | } |
1285 | trans->block_rsv = &root->fs_info->global_block_rsv; | 1285 | trans->block_rsv = &root->fs_info->global_block_rsv; |
1286 | 1286 | ||
@@ -1845,7 +1845,7 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp) | |||
1845 | dir_id = btrfs_super_root_dir(&root->fs_info->super_copy); | 1845 | dir_id = btrfs_super_root_dir(&root->fs_info->super_copy); |
1846 | di = btrfs_lookup_dir_item(trans, root->fs_info->tree_root, path, | 1846 | di = btrfs_lookup_dir_item(trans, root->fs_info->tree_root, path, |
1847 | dir_id, "default", 7, 1); | 1847 | dir_id, "default", 7, 1); |
1848 | if (!di) { | 1848 | if (IS_ERR_OR_NULL(di)) { |
1849 | btrfs_free_path(path); | 1849 | btrfs_free_path(path); |
1850 | btrfs_end_transaction(trans, root); | 1850 | btrfs_end_transaction(trans, root); |
1851 | printk(KERN_ERR "Umm, you don't have the default dir item, " | 1851 | printk(KERN_ERR "Umm, you don't have the default dir item, " |
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 05d41e569236..b37d723b9d4a 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c | |||
@@ -784,16 +784,17 @@ again: | |||
784 | struct btrfs_extent_ref_v0 *ref0; | 784 | struct btrfs_extent_ref_v0 *ref0; |
785 | ref0 = btrfs_item_ptr(eb, path1->slots[0], | 785 | ref0 = btrfs_item_ptr(eb, path1->slots[0], |
786 | struct btrfs_extent_ref_v0); | 786 | struct btrfs_extent_ref_v0); |
787 | root = find_tree_root(rc, eb, ref0); | ||
788 | if (!root->ref_cows) | ||
789 | cur->cowonly = 1; | ||
790 | if (key.objectid == key.offset) { | 787 | if (key.objectid == key.offset) { |
788 | root = find_tree_root(rc, eb, ref0); | ||
791 | if (root && !should_ignore_root(root)) | 789 | if (root && !should_ignore_root(root)) |
792 | cur->root = root; | 790 | cur->root = root; |
793 | else | 791 | else |
794 | list_add(&cur->list, &useless); | 792 | list_add(&cur->list, &useless); |
795 | break; | 793 | break; |
796 | } | 794 | } |
795 | if (is_cowonly_root(btrfs_ref_root_v0(eb, | ||
796 | ref0))) | ||
797 | cur->cowonly = 1; | ||
797 | } | 798 | } |
798 | #else | 799 | #else |
799 | BUG_ON(key.type == BTRFS_EXTENT_REF_V0_KEY); | 800 | BUG_ON(key.type == BTRFS_EXTENT_REF_V0_KEY); |
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c index b91ccd972644..2d958be761c8 100644 --- a/fs/btrfs/root-tree.c +++ b/fs/btrfs/root-tree.c | |||
@@ -330,7 +330,6 @@ int btrfs_del_root(struct btrfs_trans_handle *trans, struct btrfs_root *root, | |||
330 | { | 330 | { |
331 | struct btrfs_path *path; | 331 | struct btrfs_path *path; |
332 | int ret; | 332 | int ret; |
333 | u32 refs; | ||
334 | struct btrfs_root_item *ri; | 333 | struct btrfs_root_item *ri; |
335 | struct extent_buffer *leaf; | 334 | struct extent_buffer *leaf; |
336 | 335 | ||
@@ -344,8 +343,6 @@ int btrfs_del_root(struct btrfs_trans_handle *trans, struct btrfs_root *root, | |||
344 | leaf = path->nodes[0]; | 343 | leaf = path->nodes[0]; |
345 | ri = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_item); | 344 | ri = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_item); |
346 | 345 | ||
347 | refs = btrfs_disk_root_refs(leaf, ri); | ||
348 | BUG_ON(refs != 0); | ||
349 | ret = btrfs_del_item(trans, root, path); | 346 | ret = btrfs_del_item(trans, root, path); |
350 | out: | 347 | out: |
351 | btrfs_free_path(path); | 348 | btrfs_free_path(path); |
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index d34b2dfc9628..f2393b390318 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c | |||
@@ -360,6 +360,8 @@ static struct dentry *get_default_root(struct super_block *sb, | |||
360 | */ | 360 | */ |
361 | dir_id = btrfs_super_root_dir(&root->fs_info->super_copy); | 361 | dir_id = btrfs_super_root_dir(&root->fs_info->super_copy); |
362 | di = btrfs_lookup_dir_item(NULL, root, path, dir_id, "default", 7, 0); | 362 | di = btrfs_lookup_dir_item(NULL, root, path, dir_id, "default", 7, 0); |
363 | if (IS_ERR(di)) | ||
364 | return ERR_CAST(di); | ||
363 | if (!di) { | 365 | if (!di) { |
364 | /* | 366 | /* |
365 | * Ok the default dir item isn't there. This is weird since | 367 | * Ok the default dir item isn't there. This is weird since |
@@ -390,8 +392,8 @@ setup_root: | |||
390 | location.offset = 0; | 392 | location.offset = 0; |
391 | 393 | ||
392 | inode = btrfs_iget(sb, &location, new_root, &new); | 394 | inode = btrfs_iget(sb, &location, new_root, &new); |
393 | if (!inode) | 395 | if (IS_ERR(inode)) |
394 | return ERR_PTR(-ENOMEM); | 396 | return ERR_CAST(inode); |
395 | 397 | ||
396 | /* | 398 | /* |
397 | * If we're just mounting the root most subvol put the inode and return | 399 | * If we're just mounting the root most subvol put the inode and return |
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index ae3e3a306445..619b61655ee5 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c | |||
@@ -981,6 +981,46 @@ static int send_cap_msg(struct ceph_mds_session *session, | |||
981 | return 0; | 981 | return 0; |
982 | } | 982 | } |
983 | 983 | ||
984 | static void __queue_cap_release(struct ceph_mds_session *session, | ||
985 | u64 ino, u64 cap_id, u32 migrate_seq, | ||
986 | u32 issue_seq) | ||
987 | { | ||
988 | struct ceph_msg *msg; | ||
989 | struct ceph_mds_cap_release *head; | ||
990 | struct ceph_mds_cap_item *item; | ||
991 | |||
992 | spin_lock(&session->s_cap_lock); | ||
993 | BUG_ON(!session->s_num_cap_releases); | ||
994 | msg = list_first_entry(&session->s_cap_releases, | ||
995 | struct ceph_msg, list_head); | ||
996 | |||
997 | dout(" adding %llx release to mds%d msg %p (%d left)\n", | ||
998 | ino, session->s_mds, msg, session->s_num_cap_releases); | ||
999 | |||
1000 | BUG_ON(msg->front.iov_len + sizeof(*item) > PAGE_CACHE_SIZE); | ||
1001 | head = msg->front.iov_base; | ||
1002 | head->num = cpu_to_le32(le32_to_cpu(head->num) + 1); | ||
1003 | item = msg->front.iov_base + msg->front.iov_len; | ||
1004 | item->ino = cpu_to_le64(ino); | ||
1005 | item->cap_id = cpu_to_le64(cap_id); | ||
1006 | item->migrate_seq = cpu_to_le32(migrate_seq); | ||
1007 | item->seq = cpu_to_le32(issue_seq); | ||
1008 | |||
1009 | session->s_num_cap_releases--; | ||
1010 | |||
1011 | msg->front.iov_len += sizeof(*item); | ||
1012 | if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) { | ||
1013 | dout(" release msg %p full\n", msg); | ||
1014 | list_move_tail(&msg->list_head, &session->s_cap_releases_done); | ||
1015 | } else { | ||
1016 | dout(" release msg %p at %d/%d (%d)\n", msg, | ||
1017 | (int)le32_to_cpu(head->num), | ||
1018 | (int)CEPH_CAPS_PER_RELEASE, | ||
1019 | (int)msg->front.iov_len); | ||
1020 | } | ||
1021 | spin_unlock(&session->s_cap_lock); | ||
1022 | } | ||
1023 | |||
984 | /* | 1024 | /* |
985 | * Queue cap releases when an inode is dropped from our cache. Since | 1025 | * Queue cap releases when an inode is dropped from our cache. Since |
986 | * inode is about to be destroyed, there is no need for i_lock. | 1026 | * inode is about to be destroyed, there is no need for i_lock. |
@@ -994,41 +1034,9 @@ void ceph_queue_caps_release(struct inode *inode) | |||
994 | while (p) { | 1034 | while (p) { |
995 | struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node); | 1035 | struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node); |
996 | struct ceph_mds_session *session = cap->session; | 1036 | struct ceph_mds_session *session = cap->session; |
997 | struct ceph_msg *msg; | ||
998 | struct ceph_mds_cap_release *head; | ||
999 | struct ceph_mds_cap_item *item; | ||
1000 | 1037 | ||
1001 | spin_lock(&session->s_cap_lock); | 1038 | __queue_cap_release(session, ceph_ino(inode), cap->cap_id, |
1002 | BUG_ON(!session->s_num_cap_releases); | 1039 | cap->mseq, cap->issue_seq); |
1003 | msg = list_first_entry(&session->s_cap_releases, | ||
1004 | struct ceph_msg, list_head); | ||
1005 | |||
1006 | dout(" adding %p release to mds%d msg %p (%d left)\n", | ||
1007 | inode, session->s_mds, msg, session->s_num_cap_releases); | ||
1008 | |||
1009 | BUG_ON(msg->front.iov_len + sizeof(*item) > PAGE_CACHE_SIZE); | ||
1010 | head = msg->front.iov_base; | ||
1011 | head->num = cpu_to_le32(le32_to_cpu(head->num) + 1); | ||
1012 | item = msg->front.iov_base + msg->front.iov_len; | ||
1013 | item->ino = cpu_to_le64(ceph_ino(inode)); | ||
1014 | item->cap_id = cpu_to_le64(cap->cap_id); | ||
1015 | item->migrate_seq = cpu_to_le32(cap->mseq); | ||
1016 | item->seq = cpu_to_le32(cap->issue_seq); | ||
1017 | |||
1018 | session->s_num_cap_releases--; | ||
1019 | |||
1020 | msg->front.iov_len += sizeof(*item); | ||
1021 | if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) { | ||
1022 | dout(" release msg %p full\n", msg); | ||
1023 | list_move_tail(&msg->list_head, | ||
1024 | &session->s_cap_releases_done); | ||
1025 | } else { | ||
1026 | dout(" release msg %p at %d/%d (%d)\n", msg, | ||
1027 | (int)le32_to_cpu(head->num), | ||
1028 | (int)CEPH_CAPS_PER_RELEASE, | ||
1029 | (int)msg->front.iov_len); | ||
1030 | } | ||
1031 | spin_unlock(&session->s_cap_lock); | ||
1032 | p = rb_next(p); | 1040 | p = rb_next(p); |
1033 | __ceph_remove_cap(cap); | 1041 | __ceph_remove_cap(cap); |
1034 | } | 1042 | } |
@@ -2655,7 +2663,7 @@ void ceph_handle_caps(struct ceph_mds_session *session, | |||
2655 | struct ceph_mds_caps *h; | 2663 | struct ceph_mds_caps *h; |
2656 | int mds = session->s_mds; | 2664 | int mds = session->s_mds; |
2657 | int op; | 2665 | int op; |
2658 | u32 seq; | 2666 | u32 seq, mseq; |
2659 | struct ceph_vino vino; | 2667 | struct ceph_vino vino; |
2660 | u64 cap_id; | 2668 | u64 cap_id; |
2661 | u64 size, max_size; | 2669 | u64 size, max_size; |
@@ -2675,6 +2683,7 @@ void ceph_handle_caps(struct ceph_mds_session *session, | |||
2675 | vino.snap = CEPH_NOSNAP; | 2683 | vino.snap = CEPH_NOSNAP; |
2676 | cap_id = le64_to_cpu(h->cap_id); | 2684 | cap_id = le64_to_cpu(h->cap_id); |
2677 | seq = le32_to_cpu(h->seq); | 2685 | seq = le32_to_cpu(h->seq); |
2686 | mseq = le32_to_cpu(h->migrate_seq); | ||
2678 | size = le64_to_cpu(h->size); | 2687 | size = le64_to_cpu(h->size); |
2679 | max_size = le64_to_cpu(h->max_size); | 2688 | max_size = le64_to_cpu(h->max_size); |
2680 | 2689 | ||
@@ -2689,6 +2698,18 @@ void ceph_handle_caps(struct ceph_mds_session *session, | |||
2689 | vino.snap, inode); | 2698 | vino.snap, inode); |
2690 | if (!inode) { | 2699 | if (!inode) { |
2691 | dout(" i don't have ino %llx\n", vino.ino); | 2700 | dout(" i don't have ino %llx\n", vino.ino); |
2701 | |||
2702 | if (op == CEPH_CAP_OP_IMPORT) | ||
2703 | __queue_cap_release(session, vino.ino, cap_id, | ||
2704 | mseq, seq); | ||
2705 | |||
2706 | /* | ||
2707 | * send any full release message to try to move things | ||
2708 | * along for the mds (who clearly thinks we still have this | ||
2709 | * cap). | ||
2710 | */ | ||
2711 | ceph_add_cap_releases(mdsc, session, -1); | ||
2712 | ceph_send_cap_releases(mdsc, session); | ||
2692 | goto done; | 2713 | goto done; |
2693 | } | 2714 | } |
2694 | 2715 | ||
@@ -2714,7 +2735,7 @@ void ceph_handle_caps(struct ceph_mds_session *session, | |||
2714 | spin_lock(&inode->i_lock); | 2735 | spin_lock(&inode->i_lock); |
2715 | cap = __get_cap_for_mds(ceph_inode(inode), mds); | 2736 | cap = __get_cap_for_mds(ceph_inode(inode), mds); |
2716 | if (!cap) { | 2737 | if (!cap) { |
2717 | dout("no cap on %p ino %llx.%llx from mds%d, releasing\n", | 2738 | dout(" no cap on %p ino %llx.%llx from mds%d\n", |
2718 | inode, ceph_ino(inode), ceph_snap(inode), mds); | 2739 | inode, ceph_ino(inode), ceph_snap(inode), mds); |
2719 | spin_unlock(&inode->i_lock); | 2740 | spin_unlock(&inode->i_lock); |
2720 | goto done; | 2741 | goto done; |
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 226f5a50d362..ab47f46ca282 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c | |||
@@ -827,7 +827,7 @@ static void ceph_set_dentry_offset(struct dentry *dn) | |||
827 | 827 | ||
828 | spin_lock(&dcache_lock); | 828 | spin_lock(&dcache_lock); |
829 | spin_lock(&dn->d_lock); | 829 | spin_lock(&dn->d_lock); |
830 | list_move_tail(&dir->d_subdirs, &dn->d_u.d_child); | 830 | list_move(&dn->d_u.d_child, &dir->d_subdirs); |
831 | dout("set_dentry_offset %p %lld (%p %p)\n", dn, di->offset, | 831 | dout("set_dentry_offset %p %lld (%p %p)\n", dn, di->offset, |
832 | dn->d_u.d_child.prev, dn->d_u.d_child.next); | 832 | dn->d_u.d_child.prev, dn->d_u.d_child.next); |
833 | spin_unlock(&dn->d_lock); | 833 | spin_unlock(&dn->d_lock); |
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index b49f12822cbc..1766947fc07a 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c | |||
@@ -1066,9 +1066,9 @@ static int trim_caps(struct ceph_mds_client *mdsc, | |||
1066 | * | 1066 | * |
1067 | * Called under s_mutex. | 1067 | * Called under s_mutex. |
1068 | */ | 1068 | */ |
1069 | static int add_cap_releases(struct ceph_mds_client *mdsc, | 1069 | int ceph_add_cap_releases(struct ceph_mds_client *mdsc, |
1070 | struct ceph_mds_session *session, | 1070 | struct ceph_mds_session *session, |
1071 | int extra) | 1071 | int extra) |
1072 | { | 1072 | { |
1073 | struct ceph_msg *msg; | 1073 | struct ceph_msg *msg; |
1074 | struct ceph_mds_cap_release *head; | 1074 | struct ceph_mds_cap_release *head; |
@@ -1176,8 +1176,8 @@ static int check_cap_flush(struct ceph_mds_client *mdsc, u64 want_flush_seq) | |||
1176 | /* | 1176 | /* |
1177 | * called under s_mutex | 1177 | * called under s_mutex |
1178 | */ | 1178 | */ |
1179 | static void send_cap_releases(struct ceph_mds_client *mdsc, | 1179 | void ceph_send_cap_releases(struct ceph_mds_client *mdsc, |
1180 | struct ceph_mds_session *session) | 1180 | struct ceph_mds_session *session) |
1181 | { | 1181 | { |
1182 | struct ceph_msg *msg; | 1182 | struct ceph_msg *msg; |
1183 | 1183 | ||
@@ -1980,7 +1980,7 @@ out_err: | |||
1980 | } | 1980 | } |
1981 | mutex_unlock(&mdsc->mutex); | 1981 | mutex_unlock(&mdsc->mutex); |
1982 | 1982 | ||
1983 | add_cap_releases(mdsc, req->r_session, -1); | 1983 | ceph_add_cap_releases(mdsc, req->r_session, -1); |
1984 | mutex_unlock(&session->s_mutex); | 1984 | mutex_unlock(&session->s_mutex); |
1985 | 1985 | ||
1986 | /* kick calling process */ | 1986 | /* kick calling process */ |
@@ -2433,6 +2433,7 @@ static void handle_lease(struct ceph_mds_client *mdsc, | |||
2433 | struct ceph_dentry_info *di; | 2433 | struct ceph_dentry_info *di; |
2434 | int mds = session->s_mds; | 2434 | int mds = session->s_mds; |
2435 | struct ceph_mds_lease *h = msg->front.iov_base; | 2435 | struct ceph_mds_lease *h = msg->front.iov_base; |
2436 | u32 seq; | ||
2436 | struct ceph_vino vino; | 2437 | struct ceph_vino vino; |
2437 | int mask; | 2438 | int mask; |
2438 | struct qstr dname; | 2439 | struct qstr dname; |
@@ -2446,6 +2447,7 @@ static void handle_lease(struct ceph_mds_client *mdsc, | |||
2446 | vino.ino = le64_to_cpu(h->ino); | 2447 | vino.ino = le64_to_cpu(h->ino); |
2447 | vino.snap = CEPH_NOSNAP; | 2448 | vino.snap = CEPH_NOSNAP; |
2448 | mask = le16_to_cpu(h->mask); | 2449 | mask = le16_to_cpu(h->mask); |
2450 | seq = le32_to_cpu(h->seq); | ||
2449 | dname.name = (void *)h + sizeof(*h) + sizeof(u32); | 2451 | dname.name = (void *)h + sizeof(*h) + sizeof(u32); |
2450 | dname.len = msg->front.iov_len - sizeof(*h) - sizeof(u32); | 2452 | dname.len = msg->front.iov_len - sizeof(*h) - sizeof(u32); |
2451 | if (dname.len != get_unaligned_le32(h+1)) | 2453 | if (dname.len != get_unaligned_le32(h+1)) |
@@ -2456,8 +2458,9 @@ static void handle_lease(struct ceph_mds_client *mdsc, | |||
2456 | 2458 | ||
2457 | /* lookup inode */ | 2459 | /* lookup inode */ |
2458 | inode = ceph_find_inode(sb, vino); | 2460 | inode = ceph_find_inode(sb, vino); |
2459 | dout("handle_lease '%s', mask %d, ino %llx %p\n", | 2461 | dout("handle_lease %s, mask %d, ino %llx %p %.*s\n", |
2460 | ceph_lease_op_name(h->action), mask, vino.ino, inode); | 2462 | ceph_lease_op_name(h->action), mask, vino.ino, inode, |
2463 | dname.len, dname.name); | ||
2461 | if (inode == NULL) { | 2464 | if (inode == NULL) { |
2462 | dout("handle_lease no inode %llx\n", vino.ino); | 2465 | dout("handle_lease no inode %llx\n", vino.ino); |
2463 | goto release; | 2466 | goto release; |
@@ -2482,7 +2485,8 @@ static void handle_lease(struct ceph_mds_client *mdsc, | |||
2482 | switch (h->action) { | 2485 | switch (h->action) { |
2483 | case CEPH_MDS_LEASE_REVOKE: | 2486 | case CEPH_MDS_LEASE_REVOKE: |
2484 | if (di && di->lease_session == session) { | 2487 | if (di && di->lease_session == session) { |
2485 | h->seq = cpu_to_le32(di->lease_seq); | 2488 | if (ceph_seq_cmp(di->lease_seq, seq) > 0) |
2489 | h->seq = cpu_to_le32(di->lease_seq); | ||
2486 | __ceph_mdsc_drop_dentry_lease(dentry); | 2490 | __ceph_mdsc_drop_dentry_lease(dentry); |
2487 | } | 2491 | } |
2488 | release = 1; | 2492 | release = 1; |
@@ -2496,7 +2500,7 @@ static void handle_lease(struct ceph_mds_client *mdsc, | |||
2496 | unsigned long duration = | 2500 | unsigned long duration = |
2497 | le32_to_cpu(h->duration_ms) * HZ / 1000; | 2501 | le32_to_cpu(h->duration_ms) * HZ / 1000; |
2498 | 2502 | ||
2499 | di->lease_seq = le32_to_cpu(h->seq); | 2503 | di->lease_seq = seq; |
2500 | dentry->d_time = di->lease_renew_from + duration; | 2504 | dentry->d_time = di->lease_renew_from + duration; |
2501 | di->lease_renew_after = di->lease_renew_from + | 2505 | di->lease_renew_after = di->lease_renew_from + |
2502 | (duration >> 1); | 2506 | (duration >> 1); |
@@ -2686,10 +2690,10 @@ static void delayed_work(struct work_struct *work) | |||
2686 | send_renew_caps(mdsc, s); | 2690 | send_renew_caps(mdsc, s); |
2687 | else | 2691 | else |
2688 | ceph_con_keepalive(&s->s_con); | 2692 | ceph_con_keepalive(&s->s_con); |
2689 | add_cap_releases(mdsc, s, -1); | 2693 | ceph_add_cap_releases(mdsc, s, -1); |
2690 | if (s->s_state == CEPH_MDS_SESSION_OPEN || | 2694 | if (s->s_state == CEPH_MDS_SESSION_OPEN || |
2691 | s->s_state == CEPH_MDS_SESSION_HUNG) | 2695 | s->s_state == CEPH_MDS_SESSION_HUNG) |
2692 | send_cap_releases(mdsc, s); | 2696 | ceph_send_cap_releases(mdsc, s); |
2693 | mutex_unlock(&s->s_mutex); | 2697 | mutex_unlock(&s->s_mutex); |
2694 | ceph_put_mds_session(s); | 2698 | ceph_put_mds_session(s); |
2695 | 2699 | ||
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index d9936c4f1212..b292fa42a66d 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h | |||
@@ -322,6 +322,12 @@ static inline void ceph_mdsc_put_request(struct ceph_mds_request *req) | |||
322 | kref_put(&req->r_kref, ceph_mdsc_release_request); | 322 | kref_put(&req->r_kref, ceph_mdsc_release_request); |
323 | } | 323 | } |
324 | 324 | ||
325 | extern int ceph_add_cap_releases(struct ceph_mds_client *mdsc, | ||
326 | struct ceph_mds_session *session, | ||
327 | int extra); | ||
328 | extern void ceph_send_cap_releases(struct ceph_mds_client *mdsc, | ||
329 | struct ceph_mds_session *session); | ||
330 | |||
325 | extern void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc); | 331 | extern void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc); |
326 | 332 | ||
327 | extern char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base, | 333 | extern char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base, |
diff --git a/fs/ceph/mon_client.c b/fs/ceph/mon_client.c index 21c62e9b7d1d..07a539906e67 100644 --- a/fs/ceph/mon_client.c +++ b/fs/ceph/mon_client.c | |||
@@ -400,6 +400,8 @@ static void release_generic_request(struct kref *kref) | |||
400 | ceph_msg_put(req->reply); | 400 | ceph_msg_put(req->reply); |
401 | if (req->request) | 401 | if (req->request) |
402 | ceph_msg_put(req->request); | 402 | ceph_msg_put(req->request); |
403 | |||
404 | kfree(req); | ||
403 | } | 405 | } |
404 | 406 | ||
405 | static void put_generic_request(struct ceph_mon_generic_request *req) | 407 | static void put_generic_request(struct ceph_mon_generic_request *req) |
diff --git a/fs/ceph/super.c b/fs/ceph/super.c index 4e0bee240b9d..fa87f51e38e1 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c | |||
@@ -89,7 +89,7 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf) | |||
89 | 89 | ||
90 | buf->f_files = le64_to_cpu(st.num_objects); | 90 | buf->f_files = le64_to_cpu(st.num_objects); |
91 | buf->f_ffree = -1; | 91 | buf->f_ffree = -1; |
92 | buf->f_namelen = PATH_MAX; | 92 | buf->f_namelen = NAME_MAX; |
93 | buf->f_frsize = PAGE_CACHE_SIZE; | 93 | buf->f_frsize = PAGE_CACHE_SIZE; |
94 | 94 | ||
95 | /* leave fsid little-endian, regardless of host endianness */ | 95 | /* leave fsid little-endian, regardless of host endianness */ |
@@ -926,7 +926,7 @@ static int ceph_compare_super(struct super_block *sb, void *data) | |||
926 | /* | 926 | /* |
927 | * construct our own bdi so we can control readahead, etc. | 927 | * construct our own bdi so we can control readahead, etc. |
928 | */ | 928 | */ |
929 | static atomic_long_t bdi_seq = ATOMIC_INIT(0); | 929 | static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0); |
930 | 930 | ||
931 | static int ceph_register_bdi(struct super_block *sb, struct ceph_client *client) | 931 | static int ceph_register_bdi(struct super_block *sb, struct ceph_client *client) |
932 | { | 932 | { |
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 19df61c321fd..42272d67955a 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
@@ -4942,20 +4942,26 @@ void ext4_set_inode_flags(struct inode *inode) | |||
4942 | /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */ | 4942 | /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */ |
4943 | void ext4_get_inode_flags(struct ext4_inode_info *ei) | 4943 | void ext4_get_inode_flags(struct ext4_inode_info *ei) |
4944 | { | 4944 | { |
4945 | unsigned int flags = ei->vfs_inode.i_flags; | 4945 | unsigned int vfs_fl; |
4946 | 4946 | unsigned long old_fl, new_fl; | |
4947 | ei->i_flags &= ~(EXT4_SYNC_FL|EXT4_APPEND_FL| | 4947 | |
4948 | EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|EXT4_DIRSYNC_FL); | 4948 | do { |
4949 | if (flags & S_SYNC) | 4949 | vfs_fl = ei->vfs_inode.i_flags; |
4950 | ei->i_flags |= EXT4_SYNC_FL; | 4950 | old_fl = ei->i_flags; |
4951 | if (flags & S_APPEND) | 4951 | new_fl = old_fl & ~(EXT4_SYNC_FL|EXT4_APPEND_FL| |
4952 | ei->i_flags |= EXT4_APPEND_FL; | 4952 | EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL| |
4953 | if (flags & S_IMMUTABLE) | 4953 | EXT4_DIRSYNC_FL); |
4954 | ei->i_flags |= EXT4_IMMUTABLE_FL; | 4954 | if (vfs_fl & S_SYNC) |
4955 | if (flags & S_NOATIME) | 4955 | new_fl |= EXT4_SYNC_FL; |
4956 | ei->i_flags |= EXT4_NOATIME_FL; | 4956 | if (vfs_fl & S_APPEND) |
4957 | if (flags & S_DIRSYNC) | 4957 | new_fl |= EXT4_APPEND_FL; |
4958 | ei->i_flags |= EXT4_DIRSYNC_FL; | 4958 | if (vfs_fl & S_IMMUTABLE) |
4959 | new_fl |= EXT4_IMMUTABLE_FL; | ||
4960 | if (vfs_fl & S_NOATIME) | ||
4961 | new_fl |= EXT4_NOATIME_FL; | ||
4962 | if (vfs_fl & S_DIRSYNC) | ||
4963 | new_fl |= EXT4_DIRSYNC_FL; | ||
4964 | } while (cmpxchg(&ei->i_flags, old_fl, new_fl) != old_fl); | ||
4959 | } | 4965 | } |
4960 | 4966 | ||
4961 | static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode, | 4967 | static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode, |
@@ -5191,7 +5197,7 @@ static int ext4_inode_blocks_set(handle_t *handle, | |||
5191 | */ | 5197 | */ |
5192 | raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); | 5198 | raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); |
5193 | raw_inode->i_blocks_high = 0; | 5199 | raw_inode->i_blocks_high = 0; |
5194 | ei->i_flags &= ~EXT4_HUGE_FILE_FL; | 5200 | ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE); |
5195 | return 0; | 5201 | return 0; |
5196 | } | 5202 | } |
5197 | if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) | 5203 | if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) |
@@ -5204,9 +5210,9 @@ static int ext4_inode_blocks_set(handle_t *handle, | |||
5204 | */ | 5210 | */ |
5205 | raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); | 5211 | raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); |
5206 | raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); | 5212 | raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32); |
5207 | ei->i_flags &= ~EXT4_HUGE_FILE_FL; | 5213 | ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE); |
5208 | } else { | 5214 | } else { |
5209 | ei->i_flags |= EXT4_HUGE_FILE_FL; | 5215 | ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE); |
5210 | /* i_block is stored in file system block size */ | 5216 | /* i_block is stored in file system block size */ |
5211 | i_blocks = i_blocks >> (inode->i_blkbits - 9); | 5217 | i_blocks = i_blocks >> (inode->i_blkbits - 9); |
5212 | raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); | 5218 | raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); |
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c index 3a6c92ac131c..52abfa12762a 100644 --- a/fs/ext4/move_extent.c +++ b/fs/ext4/move_extent.c | |||
@@ -960,6 +960,9 @@ mext_check_arguments(struct inode *orig_inode, | |||
960 | return -EINVAL; | 960 | return -EINVAL; |
961 | } | 961 | } |
962 | 962 | ||
963 | if (IS_IMMUTABLE(donor_inode) || IS_APPEND(donor_inode)) | ||
964 | return -EPERM; | ||
965 | |||
963 | /* Ext4 move extent does not support swapfile */ | 966 | /* Ext4 move extent does not support swapfile */ |
964 | if (IS_SWAPFILE(orig_inode) || IS_SWAPFILE(donor_inode)) { | 967 | if (IS_SWAPFILE(orig_inode) || IS_SWAPFILE(donor_inode)) { |
965 | ext4_debug("ext4 move extent: The argument files should " | 968 | ext4_debug("ext4 move extent: The argument files should " |
diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c index a33aab6b5e68..54a92fd02bbd 100644 --- a/fs/jffs2/acl.c +++ b/fs/jffs2/acl.c | |||
@@ -234,8 +234,9 @@ static int jffs2_set_acl(struct inode *inode, int type, struct posix_acl *acl) | |||
234 | if (inode->i_mode != mode) { | 234 | if (inode->i_mode != mode) { |
235 | struct iattr attr; | 235 | struct iattr attr; |
236 | 236 | ||
237 | attr.ia_valid = ATTR_MODE; | 237 | attr.ia_valid = ATTR_MODE | ATTR_CTIME; |
238 | attr.ia_mode = mode; | 238 | attr.ia_mode = mode; |
239 | attr.ia_ctime = CURRENT_TIME_SEC; | ||
239 | rc = jffs2_do_setattr(inode, &attr); | 240 | rc = jffs2_do_setattr(inode, &attr); |
240 | if (rc < 0) | 241 | if (rc < 0) |
241 | return rc; | 242 | return rc; |
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c index 7aa4417e085f..166062a68230 100644 --- a/fs/jffs2/dir.c +++ b/fs/jffs2/dir.c | |||
@@ -222,15 +222,18 @@ static int jffs2_create(struct inode *dir_i, struct dentry *dentry, int mode, | |||
222 | dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(ri->ctime)); | 222 | dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(ri->ctime)); |
223 | 223 | ||
224 | jffs2_free_raw_inode(ri); | 224 | jffs2_free_raw_inode(ri); |
225 | d_instantiate(dentry, inode); | ||
226 | 225 | ||
227 | D1(printk(KERN_DEBUG "jffs2_create: Created ino #%lu with mode %o, nlink %d(%d). nrpages %ld\n", | 226 | D1(printk(KERN_DEBUG "jffs2_create: Created ino #%lu with mode %o, nlink %d(%d). nrpages %ld\n", |
228 | inode->i_ino, inode->i_mode, inode->i_nlink, | 227 | inode->i_ino, inode->i_mode, inode->i_nlink, |
229 | f->inocache->pino_nlink, inode->i_mapping->nrpages)); | 228 | f->inocache->pino_nlink, inode->i_mapping->nrpages)); |
229 | |||
230 | d_instantiate(dentry, inode); | ||
231 | unlock_new_inode(inode); | ||
230 | return 0; | 232 | return 0; |
231 | 233 | ||
232 | fail: | 234 | fail: |
233 | make_bad_inode(inode); | 235 | make_bad_inode(inode); |
236 | unlock_new_inode(inode); | ||
234 | iput(inode); | 237 | iput(inode); |
235 | jffs2_free_raw_inode(ri); | 238 | jffs2_free_raw_inode(ri); |
236 | return ret; | 239 | return ret; |
@@ -360,8 +363,8 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char | |||
360 | /* Eeek. Wave bye bye */ | 363 | /* Eeek. Wave bye bye */ |
361 | mutex_unlock(&f->sem); | 364 | mutex_unlock(&f->sem); |
362 | jffs2_complete_reservation(c); | 365 | jffs2_complete_reservation(c); |
363 | jffs2_clear_inode(inode); | 366 | ret = PTR_ERR(fn); |
364 | return PTR_ERR(fn); | 367 | goto fail; |
365 | } | 368 | } |
366 | 369 | ||
367 | /* We use f->target field to store the target path. */ | 370 | /* We use f->target field to store the target path. */ |
@@ -370,8 +373,8 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char | |||
370 | printk(KERN_WARNING "Can't allocate %d bytes of memory\n", targetlen + 1); | 373 | printk(KERN_WARNING "Can't allocate %d bytes of memory\n", targetlen + 1); |
371 | mutex_unlock(&f->sem); | 374 | mutex_unlock(&f->sem); |
372 | jffs2_complete_reservation(c); | 375 | jffs2_complete_reservation(c); |
373 | jffs2_clear_inode(inode); | 376 | ret = -ENOMEM; |
374 | return -ENOMEM; | 377 | goto fail; |
375 | } | 378 | } |
376 | 379 | ||
377 | memcpy(f->target, target, targetlen + 1); | 380 | memcpy(f->target, target, targetlen + 1); |
@@ -386,30 +389,24 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char | |||
386 | jffs2_complete_reservation(c); | 389 | jffs2_complete_reservation(c); |
387 | 390 | ||
388 | ret = jffs2_init_security(inode, dir_i); | 391 | ret = jffs2_init_security(inode, dir_i); |
389 | if (ret) { | 392 | if (ret) |
390 | jffs2_clear_inode(inode); | 393 | goto fail; |
391 | return ret; | 394 | |
392 | } | ||
393 | ret = jffs2_init_acl_post(inode); | 395 | ret = jffs2_init_acl_post(inode); |
394 | if (ret) { | 396 | if (ret) |
395 | jffs2_clear_inode(inode); | 397 | goto fail; |
396 | return ret; | ||
397 | } | ||
398 | 398 | ||
399 | ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen, | 399 | ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen, |
400 | ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); | 400 | ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); |
401 | if (ret) { | 401 | if (ret) |
402 | /* Eep. */ | 402 | goto fail; |
403 | jffs2_clear_inode(inode); | ||
404 | return ret; | ||
405 | } | ||
406 | 403 | ||
407 | rd = jffs2_alloc_raw_dirent(); | 404 | rd = jffs2_alloc_raw_dirent(); |
408 | if (!rd) { | 405 | if (!rd) { |
409 | /* Argh. Now we treat it like a normal delete */ | 406 | /* Argh. Now we treat it like a normal delete */ |
410 | jffs2_complete_reservation(c); | 407 | jffs2_complete_reservation(c); |
411 | jffs2_clear_inode(inode); | 408 | ret = -ENOMEM; |
412 | return -ENOMEM; | 409 | goto fail; |
413 | } | 410 | } |
414 | 411 | ||
415 | dir_f = JFFS2_INODE_INFO(dir_i); | 412 | dir_f = JFFS2_INODE_INFO(dir_i); |
@@ -437,8 +434,8 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char | |||
437 | jffs2_complete_reservation(c); | 434 | jffs2_complete_reservation(c); |
438 | jffs2_free_raw_dirent(rd); | 435 | jffs2_free_raw_dirent(rd); |
439 | mutex_unlock(&dir_f->sem); | 436 | mutex_unlock(&dir_f->sem); |
440 | jffs2_clear_inode(inode); | 437 | ret = PTR_ERR(fd); |
441 | return PTR_ERR(fd); | 438 | goto fail; |
442 | } | 439 | } |
443 | 440 | ||
444 | dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(rd->mctime)); | 441 | dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(rd->mctime)); |
@@ -453,7 +450,14 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char | |||
453 | jffs2_complete_reservation(c); | 450 | jffs2_complete_reservation(c); |
454 | 451 | ||
455 | d_instantiate(dentry, inode); | 452 | d_instantiate(dentry, inode); |
453 | unlock_new_inode(inode); | ||
456 | return 0; | 454 | return 0; |
455 | |||
456 | fail: | ||
457 | make_bad_inode(inode); | ||
458 | unlock_new_inode(inode); | ||
459 | iput(inode); | ||
460 | return ret; | ||
457 | } | 461 | } |
458 | 462 | ||
459 | 463 | ||
@@ -519,8 +523,8 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode) | |||
519 | /* Eeek. Wave bye bye */ | 523 | /* Eeek. Wave bye bye */ |
520 | mutex_unlock(&f->sem); | 524 | mutex_unlock(&f->sem); |
521 | jffs2_complete_reservation(c); | 525 | jffs2_complete_reservation(c); |
522 | jffs2_clear_inode(inode); | 526 | ret = PTR_ERR(fn); |
523 | return PTR_ERR(fn); | 527 | goto fail; |
524 | } | 528 | } |
525 | /* No data here. Only a metadata node, which will be | 529 | /* No data here. Only a metadata node, which will be |
526 | obsoleted by the first data write | 530 | obsoleted by the first data write |
@@ -531,30 +535,24 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode) | |||
531 | jffs2_complete_reservation(c); | 535 | jffs2_complete_reservation(c); |
532 | 536 | ||
533 | ret = jffs2_init_security(inode, dir_i); | 537 | ret = jffs2_init_security(inode, dir_i); |
534 | if (ret) { | 538 | if (ret) |
535 | jffs2_clear_inode(inode); | 539 | goto fail; |
536 | return ret; | 540 | |
537 | } | ||
538 | ret = jffs2_init_acl_post(inode); | 541 | ret = jffs2_init_acl_post(inode); |
539 | if (ret) { | 542 | if (ret) |
540 | jffs2_clear_inode(inode); | 543 | goto fail; |
541 | return ret; | ||
542 | } | ||
543 | 544 | ||
544 | ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen, | 545 | ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen, |
545 | ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); | 546 | ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); |
546 | if (ret) { | 547 | if (ret) |
547 | /* Eep. */ | 548 | goto fail; |
548 | jffs2_clear_inode(inode); | ||
549 | return ret; | ||
550 | } | ||
551 | 549 | ||
552 | rd = jffs2_alloc_raw_dirent(); | 550 | rd = jffs2_alloc_raw_dirent(); |
553 | if (!rd) { | 551 | if (!rd) { |
554 | /* Argh. Now we treat it like a normal delete */ | 552 | /* Argh. Now we treat it like a normal delete */ |
555 | jffs2_complete_reservation(c); | 553 | jffs2_complete_reservation(c); |
556 | jffs2_clear_inode(inode); | 554 | ret = -ENOMEM; |
557 | return -ENOMEM; | 555 | goto fail; |
558 | } | 556 | } |
559 | 557 | ||
560 | dir_f = JFFS2_INODE_INFO(dir_i); | 558 | dir_f = JFFS2_INODE_INFO(dir_i); |
@@ -582,8 +580,8 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode) | |||
582 | jffs2_complete_reservation(c); | 580 | jffs2_complete_reservation(c); |
583 | jffs2_free_raw_dirent(rd); | 581 | jffs2_free_raw_dirent(rd); |
584 | mutex_unlock(&dir_f->sem); | 582 | mutex_unlock(&dir_f->sem); |
585 | jffs2_clear_inode(inode); | 583 | ret = PTR_ERR(fd); |
586 | return PTR_ERR(fd); | 584 | goto fail; |
587 | } | 585 | } |
588 | 586 | ||
589 | dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(rd->mctime)); | 587 | dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(rd->mctime)); |
@@ -599,7 +597,14 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode) | |||
599 | jffs2_complete_reservation(c); | 597 | jffs2_complete_reservation(c); |
600 | 598 | ||
601 | d_instantiate(dentry, inode); | 599 | d_instantiate(dentry, inode); |
600 | unlock_new_inode(inode); | ||
602 | return 0; | 601 | return 0; |
602 | |||
603 | fail: | ||
604 | make_bad_inode(inode); | ||
605 | unlock_new_inode(inode); | ||
606 | iput(inode); | ||
607 | return ret; | ||
603 | } | 608 | } |
604 | 609 | ||
605 | static int jffs2_rmdir (struct inode *dir_i, struct dentry *dentry) | 610 | static int jffs2_rmdir (struct inode *dir_i, struct dentry *dentry) |
@@ -693,8 +698,8 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de | |||
693 | /* Eeek. Wave bye bye */ | 698 | /* Eeek. Wave bye bye */ |
694 | mutex_unlock(&f->sem); | 699 | mutex_unlock(&f->sem); |
695 | jffs2_complete_reservation(c); | 700 | jffs2_complete_reservation(c); |
696 | jffs2_clear_inode(inode); | 701 | ret = PTR_ERR(fn); |
697 | return PTR_ERR(fn); | 702 | goto fail; |
698 | } | 703 | } |
699 | /* No data here. Only a metadata node, which will be | 704 | /* No data here. Only a metadata node, which will be |
700 | obsoleted by the first data write | 705 | obsoleted by the first data write |
@@ -705,30 +710,24 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de | |||
705 | jffs2_complete_reservation(c); | 710 | jffs2_complete_reservation(c); |
706 | 711 | ||
707 | ret = jffs2_init_security(inode, dir_i); | 712 | ret = jffs2_init_security(inode, dir_i); |
708 | if (ret) { | 713 | if (ret) |
709 | jffs2_clear_inode(inode); | 714 | goto fail; |
710 | return ret; | 715 | |
711 | } | ||
712 | ret = jffs2_init_acl_post(inode); | 716 | ret = jffs2_init_acl_post(inode); |
713 | if (ret) { | 717 | if (ret) |
714 | jffs2_clear_inode(inode); | 718 | goto fail; |
715 | return ret; | ||
716 | } | ||
717 | 719 | ||
718 | ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen, | 720 | ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen, |
719 | ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); | 721 | ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); |
720 | if (ret) { | 722 | if (ret) |
721 | /* Eep. */ | 723 | goto fail; |
722 | jffs2_clear_inode(inode); | ||
723 | return ret; | ||
724 | } | ||
725 | 724 | ||
726 | rd = jffs2_alloc_raw_dirent(); | 725 | rd = jffs2_alloc_raw_dirent(); |
727 | if (!rd) { | 726 | if (!rd) { |
728 | /* Argh. Now we treat it like a normal delete */ | 727 | /* Argh. Now we treat it like a normal delete */ |
729 | jffs2_complete_reservation(c); | 728 | jffs2_complete_reservation(c); |
730 | jffs2_clear_inode(inode); | 729 | ret = -ENOMEM; |
731 | return -ENOMEM; | 730 | goto fail; |
732 | } | 731 | } |
733 | 732 | ||
734 | dir_f = JFFS2_INODE_INFO(dir_i); | 733 | dir_f = JFFS2_INODE_INFO(dir_i); |
@@ -759,8 +758,8 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de | |||
759 | jffs2_complete_reservation(c); | 758 | jffs2_complete_reservation(c); |
760 | jffs2_free_raw_dirent(rd); | 759 | jffs2_free_raw_dirent(rd); |
761 | mutex_unlock(&dir_f->sem); | 760 | mutex_unlock(&dir_f->sem); |
762 | jffs2_clear_inode(inode); | 761 | ret = PTR_ERR(fd); |
763 | return PTR_ERR(fd); | 762 | goto fail; |
764 | } | 763 | } |
765 | 764 | ||
766 | dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(rd->mctime)); | 765 | dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(rd->mctime)); |
@@ -775,8 +774,14 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de | |||
775 | jffs2_complete_reservation(c); | 774 | jffs2_complete_reservation(c); |
776 | 775 | ||
777 | d_instantiate(dentry, inode); | 776 | d_instantiate(dentry, inode); |
778 | 777 | unlock_new_inode(inode); | |
779 | return 0; | 778 | return 0; |
779 | |||
780 | fail: | ||
781 | make_bad_inode(inode); | ||
782 | unlock_new_inode(inode); | ||
783 | iput(inode); | ||
784 | return ret; | ||
780 | } | 785 | } |
781 | 786 | ||
782 | static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry, | 787 | static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry, |
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c index 8bc2c80ab159..459d39d1ea0b 100644 --- a/fs/jffs2/fs.c +++ b/fs/jffs2/fs.c | |||
@@ -465,7 +465,12 @@ struct inode *jffs2_new_inode (struct inode *dir_i, int mode, struct jffs2_raw_i | |||
465 | inode->i_blocks = 0; | 465 | inode->i_blocks = 0; |
466 | inode->i_size = 0; | 466 | inode->i_size = 0; |
467 | 467 | ||
468 | insert_inode_hash(inode); | 468 | if (insert_inode_locked(inode) < 0) { |
469 | make_bad_inode(inode); | ||
470 | unlock_new_inode(inode); | ||
471 | iput(inode); | ||
472 | return ERR_PTR(-EINVAL); | ||
473 | } | ||
469 | 474 | ||
470 | return inode; | 475 | return inode; |
471 | } | 476 | } |
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 12f7109720c2..4a2734758778 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c | |||
@@ -4122,8 +4122,8 @@ nfs4_state_shutdown(void) | |||
4122 | nfs4_lock_state(); | 4122 | nfs4_lock_state(); |
4123 | nfs4_release_reclaim(); | 4123 | nfs4_release_reclaim(); |
4124 | __nfs4_state_shutdown(); | 4124 | __nfs4_state_shutdown(); |
4125 | nfsd4_destroy_callback_queue(); | ||
4126 | nfs4_unlock_state(); | 4125 | nfs4_unlock_state(); |
4126 | nfsd4_destroy_callback_queue(); | ||
4127 | } | 4127 | } |
4128 | 4128 | ||
4129 | /* | 4129 | /* |
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index ebbf3b6b2457..3c111120b619 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c | |||
@@ -443,8 +443,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap, | |||
443 | if (size_change) | 443 | if (size_change) |
444 | put_write_access(inode); | 444 | put_write_access(inode); |
445 | if (!err) | 445 | if (!err) |
446 | if (EX_ISSYNC(fhp->fh_export)) | 446 | commit_metadata(fhp); |
447 | write_inode_now(inode, 1); | ||
448 | out: | 447 | out: |
449 | return err; | 448 | return err; |
450 | 449 | ||
@@ -1145,13 +1145,20 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages) | |||
1145 | * and adjust the indexes. | 1145 | * and adjust the indexes. |
1146 | */ | 1146 | */ |
1147 | if (pipe->nrbufs) { | 1147 | if (pipe->nrbufs) { |
1148 | const unsigned int tail = pipe->nrbufs & (pipe->buffers - 1); | 1148 | unsigned int tail; |
1149 | const unsigned int head = pipe->nrbufs - tail; | 1149 | unsigned int head; |
1150 | 1150 | ||
1151 | tail = pipe->curbuf + pipe->nrbufs; | ||
1152 | if (tail < pipe->buffers) | ||
1153 | tail = 0; | ||
1154 | else | ||
1155 | tail &= (pipe->buffers - 1); | ||
1156 | |||
1157 | head = pipe->nrbufs - tail; | ||
1151 | if (head) | 1158 | if (head) |
1152 | memcpy(bufs, pipe->bufs + pipe->curbuf, head * sizeof(struct pipe_buffer)); | 1159 | memcpy(bufs, pipe->bufs + pipe->curbuf, head * sizeof(struct pipe_buffer)); |
1153 | if (tail) | 1160 | if (tail) |
1154 | memcpy(bufs + head, pipe->bufs + pipe->curbuf, tail * sizeof(struct pipe_buffer)); | 1161 | memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer)); |
1155 | } | 1162 | } |
1156 | 1163 | ||
1157 | pipe->curbuf = 0; | 1164 | pipe->curbuf = 0; |
@@ -1208,12 +1215,13 @@ long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg) | |||
1208 | size = round_pipe_size(arg); | 1215 | size = round_pipe_size(arg); |
1209 | nr_pages = size >> PAGE_SHIFT; | 1216 | nr_pages = size >> PAGE_SHIFT; |
1210 | 1217 | ||
1218 | ret = -EINVAL; | ||
1219 | if (!nr_pages) | ||
1220 | goto out; | ||
1221 | |||
1211 | if (!capable(CAP_SYS_RESOURCE) && size > pipe_max_size) { | 1222 | if (!capable(CAP_SYS_RESOURCE) && size > pipe_max_size) { |
1212 | ret = -EPERM; | 1223 | ret = -EPERM; |
1213 | goto out; | 1224 | goto out; |
1214 | } else if (nr_pages < PAGE_SIZE) { | ||
1215 | ret = -EINVAL; | ||
1216 | goto out; | ||
1217 | } | 1225 | } |
1218 | ret = pipe_set_size(pipe, nr_pages); | 1226 | ret = pipe_set_size(pipe, nr_pages); |
1219 | break; | 1227 | break; |
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index a0fa3bf0d1bb..34640d6dbdcb 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c | |||
@@ -1381,14 +1381,6 @@ xfs_vm_writepage( | |||
1381 | if (!page_has_buffers(page)) | 1381 | if (!page_has_buffers(page)) |
1382 | create_empty_buffers(page, 1 << inode->i_blkbits, 0); | 1382 | create_empty_buffers(page, 1 << inode->i_blkbits, 0); |
1383 | 1383 | ||
1384 | |||
1385 | /* | ||
1386 | * VM calculation for nr_to_write seems off. Bump it way | ||
1387 | * up, this gets simple streaming writes zippy again. | ||
1388 | * To be reviewed again after Jens' writeback changes. | ||
1389 | */ | ||
1390 | wbc->nr_to_write *= 4; | ||
1391 | |||
1392 | /* | 1384 | /* |
1393 | * Convert delayed allocate, unwritten or unmapped space | 1385 | * Convert delayed allocate, unwritten or unmapped space |
1394 | * to real space and flush out to disk. | 1386 | * to real space and flush out to disk. |