aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-01-25 15:34:27 -0500
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-01-25 15:34:27 -0500
commit74790147fb4e9b71083a62a525ab283a275d63b7 (patch)
tree2805293e7f1806ce2fd804255f868ab94edeae88 /fs
parentf6dcf8e747a0723ace5275334bacfcd88ab39333 (diff)
parent949db153b6466c6f7cad5a427ecea94985927311 (diff)
Merge 3.8-rc5 into char-misc-next
This pulls in all of the 3.8-rc5 fixes into this branch so we can test easier. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'fs')
-rw-r--r--fs/Kconfig10
-rw-r--r--fs/btrfs/extent-tree.c6
-rw-r--r--fs/btrfs/extent_map.c13
-rw-r--r--fs/btrfs/extent_map.h1
-rw-r--r--fs/btrfs/file-item.c4
-rw-r--r--fs/btrfs/file.c10
-rw-r--r--fs/btrfs/free-space-cache.c20
-rw-r--r--fs/btrfs/inode.c137
-rw-r--r--fs/btrfs/ioctl.c129
-rw-r--r--fs/btrfs/qgroup.c20
-rw-r--r--fs/btrfs/send.c4
-rw-r--r--fs/btrfs/super.c2
-rw-r--r--fs/btrfs/transaction.c19
-rw-r--r--fs/btrfs/tree-log.c10
-rw-r--r--fs/btrfs/volumes.c23
-rw-r--r--fs/cifs/cifs_dfs_ref.c2
-rw-r--r--fs/cifs/connect.c2
-rw-r--r--fs/f2fs/acl.c13
-rw-r--r--fs/f2fs/checkpoint.c3
-rw-r--r--fs/f2fs/data.c17
-rw-r--r--fs/f2fs/debug.c50
-rw-r--r--fs/f2fs/dir.c2
-rw-r--r--fs/f2fs/f2fs.h18
-rw-r--r--fs/f2fs/file.c16
-rw-r--r--fs/f2fs/gc.c68
-rw-r--r--fs/f2fs/inode.c3
-rw-r--r--fs/f2fs/node.c19
-rw-r--r--fs/f2fs/recovery.c10
-rw-r--r--fs/f2fs/segment.c2
-rw-r--r--fs/f2fs/super.c97
-rw-r--r--fs/f2fs/xattr.c2
-rw-r--r--fs/fuse/Kconfig16
-rw-r--r--fs/fuse/cuse.c36
-rw-r--r--fs/fuse/dev.c5
-rw-r--r--fs/fuse/file.c5
35 files changed, 530 insertions, 264 deletions
diff --git a/fs/Kconfig b/fs/Kconfig
index cfe512fd1caf..780725a463b1 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -68,16 +68,6 @@ source "fs/quota/Kconfig"
68source "fs/autofs4/Kconfig" 68source "fs/autofs4/Kconfig"
69source "fs/fuse/Kconfig" 69source "fs/fuse/Kconfig"
70 70
71config CUSE
72 tristate "Character device in Userspace support"
73 depends on FUSE_FS
74 help
75 This FUSE extension allows character devices to be
76 implemented in userspace.
77
78 If you want to develop or use userspace character device
79 based on CUSE, answer Y or M.
80
81config GENERIC_ACL 71config GENERIC_ACL
82 bool 72 bool
83 select FS_POSIX_ACL 73 select FS_POSIX_ACL
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 521e9d4424f6..a8b8adc05070 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3997,7 +3997,7 @@ again:
3997 * We make the other tasks wait for the flush only when we can flush 3997 * We make the other tasks wait for the flush only when we can flush
3998 * all things. 3998 * all things.
3999 */ 3999 */
4000 if (ret && flush == BTRFS_RESERVE_FLUSH_ALL) { 4000 if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
4001 flushing = true; 4001 flushing = true;
4002 space_info->flush = 1; 4002 space_info->flush = 1;
4003 } 4003 }
@@ -5560,7 +5560,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
5560 int empty_cluster = 2 * 1024 * 1024; 5560 int empty_cluster = 2 * 1024 * 1024;
5561 struct btrfs_space_info *space_info; 5561 struct btrfs_space_info *space_info;
5562 int loop = 0; 5562 int loop = 0;
5563 int index = 0; 5563 int index = __get_raid_index(data);
5564 int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ? 5564 int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ?
5565 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC; 5565 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
5566 bool found_uncached_bg = false; 5566 bool found_uncached_bg = false;
@@ -6788,11 +6788,13 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
6788 &wc->flags[level]); 6788 &wc->flags[level]);
6789 if (ret < 0) { 6789 if (ret < 0) {
6790 btrfs_tree_unlock_rw(eb, path->locks[level]); 6790 btrfs_tree_unlock_rw(eb, path->locks[level]);
6791 path->locks[level] = 0;
6791 return ret; 6792 return ret;
6792 } 6793 }
6793 BUG_ON(wc->refs[level] == 0); 6794 BUG_ON(wc->refs[level] == 0);
6794 if (wc->refs[level] == 1) { 6795 if (wc->refs[level] == 1) {
6795 btrfs_tree_unlock_rw(eb, path->locks[level]); 6796 btrfs_tree_unlock_rw(eb, path->locks[level]);
6797 path->locks[level] = 0;
6796 return 1; 6798 return 1;
6797 } 6799 }
6798 } 6800 }
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index f169d6b11d7f..2e8cae63d247 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -171,6 +171,10 @@ static int mergable_maps(struct extent_map *prev, struct extent_map *next)
171 if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags)) 171 if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags))
172 return 0; 172 return 0;
173 173
174 if (test_bit(EXTENT_FLAG_LOGGING, &prev->flags) ||
175 test_bit(EXTENT_FLAG_LOGGING, &next->flags))
176 return 0;
177
174 if (extent_map_end(prev) == next->start && 178 if (extent_map_end(prev) == next->start &&
175 prev->flags == next->flags && 179 prev->flags == next->flags &&
176 prev->bdev == next->bdev && 180 prev->bdev == next->bdev &&
@@ -255,7 +259,8 @@ int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len,
255 if (!em) 259 if (!em)
256 goto out; 260 goto out;
257 261
258 list_move(&em->list, &tree->modified_extents); 262 if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags))
263 list_move(&em->list, &tree->modified_extents);
259 em->generation = gen; 264 em->generation = gen;
260 clear_bit(EXTENT_FLAG_PINNED, &em->flags); 265 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
261 em->mod_start = em->start; 266 em->mod_start = em->start;
@@ -280,6 +285,12 @@ out:
280 285
281} 286}
282 287
288void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em)
289{
290 clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
291 try_merge_map(tree, em);
292}
293
283/** 294/**
284 * add_extent_mapping - add new extent map to the extent tree 295 * add_extent_mapping - add new extent map to the extent tree
285 * @tree: tree to insert new map in 296 * @tree: tree to insert new map in
diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h
index 922943ce29e8..c6598c89cff8 100644
--- a/fs/btrfs/extent_map.h
+++ b/fs/btrfs/extent_map.h
@@ -69,6 +69,7 @@ void free_extent_map(struct extent_map *em);
69int __init extent_map_init(void); 69int __init extent_map_init(void);
70void extent_map_exit(void); 70void extent_map_exit(void);
71int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len, u64 gen); 71int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len, u64 gen);
72void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em);
72struct extent_map *search_extent_mapping(struct extent_map_tree *tree, 73struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
73 u64 start, u64 len); 74 u64 start, u64 len);
74#endif 75#endif
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index bd38cef42358..94aa53b38721 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -460,8 +460,8 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
460 if (!contig) 460 if (!contig)
461 offset = page_offset(bvec->bv_page) + bvec->bv_offset; 461 offset = page_offset(bvec->bv_page) + bvec->bv_offset;
462 462
463 if (!contig && (offset >= ordered->file_offset + ordered->len || 463 if (offset >= ordered->file_offset + ordered->len ||
464 offset < ordered->file_offset)) { 464 offset < ordered->file_offset) {
465 unsigned long bytes_left; 465 unsigned long bytes_left;
466 sums->len = this_sum_bytes; 466 sums->len = this_sum_bytes;
467 this_sum_bytes = 0; 467 this_sum_bytes = 0;
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 77061bf43edb..f76b1fd160d4 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -2241,6 +2241,7 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
2241 if (lockend <= lockstart) 2241 if (lockend <= lockstart)
2242 lockend = lockstart + root->sectorsize; 2242 lockend = lockstart + root->sectorsize;
2243 2243
2244 lockend--;
2244 len = lockend - lockstart + 1; 2245 len = lockend - lockstart + 1;
2245 2246
2246 len = max_t(u64, len, root->sectorsize); 2247 len = max_t(u64, len, root->sectorsize);
@@ -2307,9 +2308,12 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
2307 } 2308 }
2308 } 2309 }
2309 2310
2310 *offset = start; 2311 if (!test_bit(EXTENT_FLAG_PREALLOC,
2311 free_extent_map(em); 2312 &em->flags)) {
2312 break; 2313 *offset = start;
2314 free_extent_map(em);
2315 break;
2316 }
2313 } 2317 }
2314 } 2318 }
2315 2319
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 59ea2e4349c9..0be7a8742a43 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -1862,11 +1862,13 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
1862{ 1862{
1863 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 1863 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1864 struct btrfs_free_space *info; 1864 struct btrfs_free_space *info;
1865 int ret = 0; 1865 int ret;
1866 bool re_search = false;
1866 1867
1867 spin_lock(&ctl->tree_lock); 1868 spin_lock(&ctl->tree_lock);
1868 1869
1869again: 1870again:
1871 ret = 0;
1870 if (!bytes) 1872 if (!bytes)
1871 goto out_lock; 1873 goto out_lock;
1872 1874
@@ -1879,17 +1881,17 @@ again:
1879 info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 1881 info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
1880 1, 0); 1882 1, 0);
1881 if (!info) { 1883 if (!info) {
1882 /* the tree logging code might be calling us before we 1884 /*
1883 * have fully loaded the free space rbtree for this 1885 * If we found a partial bit of our free space in a
1884 * block group. So it is possible the entry won't 1886 * bitmap but then couldn't find the other part this may
1885 * be in the rbtree yet at all. The caching code 1887 * be a problem, so WARN about it.
1886 * will make sure not to put it in the rbtree if
1887 * the logging code has pinned it.
1888 */ 1888 */
1889 WARN_ON(re_search);
1889 goto out_lock; 1890 goto out_lock;
1890 } 1891 }
1891 } 1892 }
1892 1893
1894 re_search = false;
1893 if (!info->bitmap) { 1895 if (!info->bitmap) {
1894 unlink_free_space(ctl, info); 1896 unlink_free_space(ctl, info);
1895 if (offset == info->offset) { 1897 if (offset == info->offset) {
@@ -1935,8 +1937,10 @@ again:
1935 } 1937 }
1936 1938
1937 ret = remove_from_bitmap(ctl, info, &offset, &bytes); 1939 ret = remove_from_bitmap(ctl, info, &offset, &bytes);
1938 if (ret == -EAGAIN) 1940 if (ret == -EAGAIN) {
1941 re_search = true;
1939 goto again; 1942 goto again;
1943 }
1940 BUG_ON(ret); /* logic error */ 1944 BUG_ON(ret); /* logic error */
1941out_lock: 1945out_lock:
1942 spin_unlock(&ctl->tree_lock); 1946 spin_unlock(&ctl->tree_lock);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 16d9e8e191e6..cc93b23ca352 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -88,7 +88,7 @@ static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
88 [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK, 88 [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK,
89}; 89};
90 90
91static int btrfs_setsize(struct inode *inode, loff_t newsize); 91static int btrfs_setsize(struct inode *inode, struct iattr *attr);
92static int btrfs_truncate(struct inode *inode); 92static int btrfs_truncate(struct inode *inode);
93static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent); 93static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent);
94static noinline int cow_file_range(struct inode *inode, 94static noinline int cow_file_range(struct inode *inode,
@@ -2478,6 +2478,18 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
2478 continue; 2478 continue;
2479 } 2479 }
2480 nr_truncate++; 2480 nr_truncate++;
2481
2482 /* 1 for the orphan item deletion. */
2483 trans = btrfs_start_transaction(root, 1);
2484 if (IS_ERR(trans)) {
2485 ret = PTR_ERR(trans);
2486 goto out;
2487 }
2488 ret = btrfs_orphan_add(trans, inode);
2489 btrfs_end_transaction(trans, root);
2490 if (ret)
2491 goto out;
2492
2481 ret = btrfs_truncate(inode); 2493 ret = btrfs_truncate(inode);
2482 } else { 2494 } else {
2483 nr_unlink++; 2495 nr_unlink++;
@@ -3665,6 +3677,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
3665 block_end - cur_offset, 0); 3677 block_end - cur_offset, 0);
3666 if (IS_ERR(em)) { 3678 if (IS_ERR(em)) {
3667 err = PTR_ERR(em); 3679 err = PTR_ERR(em);
3680 em = NULL;
3668 break; 3681 break;
3669 } 3682 }
3670 last_byte = min(extent_map_end(em), block_end); 3683 last_byte = min(extent_map_end(em), block_end);
@@ -3748,16 +3761,27 @@ next:
3748 return err; 3761 return err;
3749} 3762}
3750 3763
3751static int btrfs_setsize(struct inode *inode, loff_t newsize) 3764static int btrfs_setsize(struct inode *inode, struct iattr *attr)
3752{ 3765{
3753 struct btrfs_root *root = BTRFS_I(inode)->root; 3766 struct btrfs_root *root = BTRFS_I(inode)->root;
3754 struct btrfs_trans_handle *trans; 3767 struct btrfs_trans_handle *trans;
3755 loff_t oldsize = i_size_read(inode); 3768 loff_t oldsize = i_size_read(inode);
3769 loff_t newsize = attr->ia_size;
3770 int mask = attr->ia_valid;
3756 int ret; 3771 int ret;
3757 3772
3758 if (newsize == oldsize) 3773 if (newsize == oldsize)
3759 return 0; 3774 return 0;
3760 3775
3776 /*
3777 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
3778 * special case where we need to update the times despite not having
3779 * these flags set. For all other operations the VFS set these flags
3780 * explicitly if it wants a timestamp update.
3781 */
3782 if (newsize != oldsize && (!(mask & (ATTR_CTIME | ATTR_MTIME))))
3783 inode->i_ctime = inode->i_mtime = current_fs_time(inode->i_sb);
3784
3761 if (newsize > oldsize) { 3785 if (newsize > oldsize) {
3762 truncate_pagecache(inode, oldsize, newsize); 3786 truncate_pagecache(inode, oldsize, newsize);
3763 ret = btrfs_cont_expand(inode, oldsize, newsize); 3787 ret = btrfs_cont_expand(inode, oldsize, newsize);
@@ -3783,9 +3807,34 @@ static int btrfs_setsize(struct inode *inode, loff_t newsize)
3783 set_bit(BTRFS_INODE_ORDERED_DATA_CLOSE, 3807 set_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
3784 &BTRFS_I(inode)->runtime_flags); 3808 &BTRFS_I(inode)->runtime_flags);
3785 3809
3810 /*
3811 * 1 for the orphan item we're going to add
3812 * 1 for the orphan item deletion.
3813 */
3814 trans = btrfs_start_transaction(root, 2);
3815 if (IS_ERR(trans))
3816 return PTR_ERR(trans);
3817
3818 /*
3819 * We need to do this in case we fail at _any_ point during the
3820 * actual truncate. Once we do the truncate_setsize we could
3821 * invalidate pages which forces any outstanding ordered io to
3822 * be instantly completed which will give us extents that need
3823 * to be truncated. If we fail to get an orphan inode down we
3824 * could have left over extents that were never meant to live,
3825 * so we need to garuntee from this point on that everything
3826 * will be consistent.
3827 */
3828 ret = btrfs_orphan_add(trans, inode);
3829 btrfs_end_transaction(trans, root);
3830 if (ret)
3831 return ret;
3832
3786 /* we don't support swapfiles, so vmtruncate shouldn't fail */ 3833 /* we don't support swapfiles, so vmtruncate shouldn't fail */
3787 truncate_setsize(inode, newsize); 3834 truncate_setsize(inode, newsize);
3788 ret = btrfs_truncate(inode); 3835 ret = btrfs_truncate(inode);
3836 if (ret && inode->i_nlink)
3837 btrfs_orphan_del(NULL, inode);
3789 } 3838 }
3790 3839
3791 return ret; 3840 return ret;
@@ -3805,7 +3854,7 @@ static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
3805 return err; 3854 return err;
3806 3855
3807 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 3856 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
3808 err = btrfs_setsize(inode, attr->ia_size); 3857 err = btrfs_setsize(inode, attr);
3809 if (err) 3858 if (err)
3810 return err; 3859 return err;
3811 } 3860 }
@@ -5572,10 +5621,13 @@ struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *pag
5572 return em; 5621 return em;
5573 if (em) { 5622 if (em) {
5574 /* 5623 /*
5575 * if our em maps to a hole, there might 5624 * if our em maps to
5576 * actually be delalloc bytes behind it 5625 * - a hole or
5626 * - a pre-alloc extent,
5627 * there might actually be delalloc bytes behind it.
5577 */ 5628 */
5578 if (em->block_start != EXTENT_MAP_HOLE) 5629 if (em->block_start != EXTENT_MAP_HOLE &&
5630 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
5579 return em; 5631 return em;
5580 else 5632 else
5581 hole_em = em; 5633 hole_em = em;
@@ -5657,6 +5709,8 @@ struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *pag
5657 */ 5709 */
5658 em->block_start = hole_em->block_start; 5710 em->block_start = hole_em->block_start;
5659 em->block_len = hole_len; 5711 em->block_len = hole_len;
5712 if (test_bit(EXTENT_FLAG_PREALLOC, &hole_em->flags))
5713 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
5660 } else { 5714 } else {
5661 em->start = range_start; 5715 em->start = range_start;
5662 em->len = found; 5716 em->len = found;
@@ -6915,11 +6969,9 @@ static int btrfs_truncate(struct inode *inode)
6915 6969
6916 /* 6970 /*
6917 * 1 for the truncate slack space 6971 * 1 for the truncate slack space
6918 * 1 for the orphan item we're going to add
6919 * 1 for the orphan item deletion
6920 * 1 for updating the inode. 6972 * 1 for updating the inode.
6921 */ 6973 */
6922 trans = btrfs_start_transaction(root, 4); 6974 trans = btrfs_start_transaction(root, 2);
6923 if (IS_ERR(trans)) { 6975 if (IS_ERR(trans)) {
6924 err = PTR_ERR(trans); 6976 err = PTR_ERR(trans);
6925 goto out; 6977 goto out;
@@ -6930,12 +6982,6 @@ static int btrfs_truncate(struct inode *inode)
6930 min_size); 6982 min_size);
6931 BUG_ON(ret); 6983 BUG_ON(ret);
6932 6984
6933 ret = btrfs_orphan_add(trans, inode);
6934 if (ret) {
6935 btrfs_end_transaction(trans, root);
6936 goto out;
6937 }
6938
6939 /* 6985 /*
6940 * setattr is responsible for setting the ordered_data_close flag, 6986 * setattr is responsible for setting the ordered_data_close flag,
6941 * but that is only tested during the last file release. That 6987 * but that is only tested during the last file release. That
@@ -7004,12 +7050,6 @@ static int btrfs_truncate(struct inode *inode)
7004 ret = btrfs_orphan_del(trans, inode); 7050 ret = btrfs_orphan_del(trans, inode);
7005 if (ret) 7051 if (ret)
7006 err = ret; 7052 err = ret;
7007 } else if (ret && inode->i_nlink > 0) {
7008 /*
7009 * Failed to do the truncate, remove us from the in memory
7010 * orphan list.
7011 */
7012 ret = btrfs_orphan_del(NULL, inode);
7013 } 7053 }
7014 7054
7015 if (trans) { 7055 if (trans) {
@@ -7531,41 +7571,61 @@ void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work)
7531 */ 7571 */
7532int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput) 7572int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
7533{ 7573{
7534 struct list_head *head = &root->fs_info->delalloc_inodes;
7535 struct btrfs_inode *binode; 7574 struct btrfs_inode *binode;
7536 struct inode *inode; 7575 struct inode *inode;
7537 struct btrfs_delalloc_work *work, *next; 7576 struct btrfs_delalloc_work *work, *next;
7538 struct list_head works; 7577 struct list_head works;
7578 struct list_head splice;
7539 int ret = 0; 7579 int ret = 0;
7540 7580
7541 if (root->fs_info->sb->s_flags & MS_RDONLY) 7581 if (root->fs_info->sb->s_flags & MS_RDONLY)
7542 return -EROFS; 7582 return -EROFS;
7543 7583
7544 INIT_LIST_HEAD(&works); 7584 INIT_LIST_HEAD(&works);
7545 7585 INIT_LIST_HEAD(&splice);
7586again:
7546 spin_lock(&root->fs_info->delalloc_lock); 7587 spin_lock(&root->fs_info->delalloc_lock);
7547 while (!list_empty(head)) { 7588 list_splice_init(&root->fs_info->delalloc_inodes, &splice);
7548 binode = list_entry(head->next, struct btrfs_inode, 7589 while (!list_empty(&splice)) {
7590 binode = list_entry(splice.next, struct btrfs_inode,
7549 delalloc_inodes); 7591 delalloc_inodes);
7592
7593 list_del_init(&binode->delalloc_inodes);
7594
7550 inode = igrab(&binode->vfs_inode); 7595 inode = igrab(&binode->vfs_inode);
7551 if (!inode) 7596 if (!inode)
7552 list_del_init(&binode->delalloc_inodes); 7597 continue;
7598
7599 list_add_tail(&binode->delalloc_inodes,
7600 &root->fs_info->delalloc_inodes);
7553 spin_unlock(&root->fs_info->delalloc_lock); 7601 spin_unlock(&root->fs_info->delalloc_lock);
7554 if (inode) { 7602
7555 work = btrfs_alloc_delalloc_work(inode, 0, delay_iput); 7603 work = btrfs_alloc_delalloc_work(inode, 0, delay_iput);
7556 if (!work) { 7604 if (unlikely(!work)) {
7557 ret = -ENOMEM; 7605 ret = -ENOMEM;
7558 goto out; 7606 goto out;
7559 }
7560 list_add_tail(&work->list, &works);
7561 btrfs_queue_worker(&root->fs_info->flush_workers,
7562 &work->work);
7563 } 7607 }
7608 list_add_tail(&work->list, &works);
7609 btrfs_queue_worker(&root->fs_info->flush_workers,
7610 &work->work);
7611
7564 cond_resched(); 7612 cond_resched();
7565 spin_lock(&root->fs_info->delalloc_lock); 7613 spin_lock(&root->fs_info->delalloc_lock);
7566 } 7614 }
7567 spin_unlock(&root->fs_info->delalloc_lock); 7615 spin_unlock(&root->fs_info->delalloc_lock);
7568 7616
7617 list_for_each_entry_safe(work, next, &works, list) {
7618 list_del_init(&work->list);
7619 btrfs_wait_and_free_delalloc_work(work);
7620 }
7621
7622 spin_lock(&root->fs_info->delalloc_lock);
7623 if (!list_empty(&root->fs_info->delalloc_inodes)) {
7624 spin_unlock(&root->fs_info->delalloc_lock);
7625 goto again;
7626 }
7627 spin_unlock(&root->fs_info->delalloc_lock);
7628
7569 /* the filemap_flush will queue IO into the worker threads, but 7629 /* the filemap_flush will queue IO into the worker threads, but
7570 * we have to make sure the IO is actually started and that 7630 * we have to make sure the IO is actually started and that
7571 * ordered extents get created before we return 7631 * ordered extents get created before we return
@@ -7578,11 +7638,18 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
7578 atomic_read(&root->fs_info->async_delalloc_pages) == 0)); 7638 atomic_read(&root->fs_info->async_delalloc_pages) == 0));
7579 } 7639 }
7580 atomic_dec(&root->fs_info->async_submit_draining); 7640 atomic_dec(&root->fs_info->async_submit_draining);
7641 return 0;
7581out: 7642out:
7582 list_for_each_entry_safe(work, next, &works, list) { 7643 list_for_each_entry_safe(work, next, &works, list) {
7583 list_del_init(&work->list); 7644 list_del_init(&work->list);
7584 btrfs_wait_and_free_delalloc_work(work); 7645 btrfs_wait_and_free_delalloc_work(work);
7585 } 7646 }
7647
7648 if (!list_empty_careful(&splice)) {
7649 spin_lock(&root->fs_info->delalloc_lock);
7650 list_splice_tail(&splice, &root->fs_info->delalloc_inodes);
7651 spin_unlock(&root->fs_info->delalloc_lock);
7652 }
7586 return ret; 7653 return ret;
7587} 7654}
7588 7655
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 4b4516770f05..5b22d45d3c6a 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -1339,7 +1339,8 @@ static noinline int btrfs_ioctl_resize(struct file *file,
1339 if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running, 1339 if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running,
1340 1)) { 1340 1)) {
1341 pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n"); 1341 pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n");
1342 return -EINPROGRESS; 1342 mnt_drop_write_file(file);
1343 return -EINVAL;
1343 } 1344 }
1344 1345
1345 mutex_lock(&root->fs_info->volume_mutex); 1346 mutex_lock(&root->fs_info->volume_mutex);
@@ -1362,6 +1363,7 @@ static noinline int btrfs_ioctl_resize(struct file *file,
1362 printk(KERN_INFO "btrfs: resizing devid %llu\n", 1363 printk(KERN_INFO "btrfs: resizing devid %llu\n",
1363 (unsigned long long)devid); 1364 (unsigned long long)devid);
1364 } 1365 }
1366
1365 device = btrfs_find_device(root->fs_info, devid, NULL, NULL); 1367 device = btrfs_find_device(root->fs_info, devid, NULL, NULL);
1366 if (!device) { 1368 if (!device) {
1367 printk(KERN_INFO "btrfs: resizer unable to find device %llu\n", 1369 printk(KERN_INFO "btrfs: resizer unable to find device %llu\n",
@@ -1369,9 +1371,10 @@ static noinline int btrfs_ioctl_resize(struct file *file,
1369 ret = -EINVAL; 1371 ret = -EINVAL;
1370 goto out_free; 1372 goto out_free;
1371 } 1373 }
1372 if (device->fs_devices && device->fs_devices->seeding) { 1374
1375 if (!device->writeable) {
1373 printk(KERN_INFO "btrfs: resizer unable to apply on " 1376 printk(KERN_INFO "btrfs: resizer unable to apply on "
1374 "seeding device %llu\n", 1377 "readonly device %llu\n",
1375 (unsigned long long)devid); 1378 (unsigned long long)devid);
1376 ret = -EINVAL; 1379 ret = -EINVAL;
1377 goto out_free; 1380 goto out_free;
@@ -1443,8 +1446,8 @@ out_free:
1443 kfree(vol_args); 1446 kfree(vol_args);
1444out: 1447out:
1445 mutex_unlock(&root->fs_info->volume_mutex); 1448 mutex_unlock(&root->fs_info->volume_mutex);
1446 mnt_drop_write_file(file);
1447 atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0); 1449 atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0);
1450 mnt_drop_write_file(file);
1448 return ret; 1451 return ret;
1449} 1452}
1450 1453
@@ -2095,13 +2098,13 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
2095 err = inode_permission(inode, MAY_WRITE | MAY_EXEC); 2098 err = inode_permission(inode, MAY_WRITE | MAY_EXEC);
2096 if (err) 2099 if (err)
2097 goto out_dput; 2100 goto out_dput;
2098
2099 /* check if subvolume may be deleted by a non-root user */
2100 err = btrfs_may_delete(dir, dentry, 1);
2101 if (err)
2102 goto out_dput;
2103 } 2101 }
2104 2102
2103 /* check if subvolume may be deleted by a user */
2104 err = btrfs_may_delete(dir, dentry, 1);
2105 if (err)
2106 goto out_dput;
2107
2105 if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) { 2108 if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) {
2106 err = -EINVAL; 2109 err = -EINVAL;
2107 goto out_dput; 2110 goto out_dput;
@@ -2183,19 +2186,20 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
2183 struct btrfs_ioctl_defrag_range_args *range; 2186 struct btrfs_ioctl_defrag_range_args *range;
2184 int ret; 2187 int ret;
2185 2188
2186 if (btrfs_root_readonly(root)) 2189 ret = mnt_want_write_file(file);
2187 return -EROFS; 2190 if (ret)
2191 return ret;
2188 2192
2189 if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running, 2193 if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running,
2190 1)) { 2194 1)) {
2191 pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n"); 2195 pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n");
2192 return -EINPROGRESS; 2196 mnt_drop_write_file(file);
2197 return -EINVAL;
2193 } 2198 }
2194 ret = mnt_want_write_file(file); 2199
2195 if (ret) { 2200 if (btrfs_root_readonly(root)) {
2196 atomic_set(&root->fs_info->mutually_exclusive_operation_running, 2201 ret = -EROFS;
2197 0); 2202 goto out;
2198 return ret;
2199 } 2203 }
2200 2204
2201 switch (inode->i_mode & S_IFMT) { 2205 switch (inode->i_mode & S_IFMT) {
@@ -2247,8 +2251,8 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
2247 ret = -EINVAL; 2251 ret = -EINVAL;
2248 } 2252 }
2249out: 2253out:
2250 mnt_drop_write_file(file);
2251 atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0); 2254 atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0);
2255 mnt_drop_write_file(file);
2252 return ret; 2256 return ret;
2253} 2257}
2254 2258
@@ -2263,7 +2267,7 @@ static long btrfs_ioctl_add_dev(struct btrfs_root *root, void __user *arg)
2263 if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running, 2267 if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running,
2264 1)) { 2268 1)) {
2265 pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n"); 2269 pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n");
2266 return -EINPROGRESS; 2270 return -EINVAL;
2267 } 2271 }
2268 2272
2269 mutex_lock(&root->fs_info->volume_mutex); 2273 mutex_lock(&root->fs_info->volume_mutex);
@@ -2300,7 +2304,7 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
2300 1)) { 2304 1)) {
2301 pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n"); 2305 pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n");
2302 mnt_drop_write_file(file); 2306 mnt_drop_write_file(file);
2303 return -EINPROGRESS; 2307 return -EINVAL;
2304 } 2308 }
2305 2309
2306 mutex_lock(&root->fs_info->volume_mutex); 2310 mutex_lock(&root->fs_info->volume_mutex);
@@ -2316,8 +2320,8 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
2316 kfree(vol_args); 2320 kfree(vol_args);
2317out: 2321out:
2318 mutex_unlock(&root->fs_info->volume_mutex); 2322 mutex_unlock(&root->fs_info->volume_mutex);
2319 mnt_drop_write_file(file);
2320 atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0); 2323 atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0);
2324 mnt_drop_write_file(file);
2321 return ret; 2325 return ret;
2322} 2326}
2323 2327
@@ -3437,8 +3441,8 @@ static long btrfs_ioctl_balance(struct file *file, void __user *arg)
3437 struct btrfs_fs_info *fs_info = root->fs_info; 3441 struct btrfs_fs_info *fs_info = root->fs_info;
3438 struct btrfs_ioctl_balance_args *bargs; 3442 struct btrfs_ioctl_balance_args *bargs;
3439 struct btrfs_balance_control *bctl; 3443 struct btrfs_balance_control *bctl;
3444 bool need_unlock; /* for mut. excl. ops lock */
3440 int ret; 3445 int ret;
3441 int need_to_clear_lock = 0;
3442 3446
3443 if (!capable(CAP_SYS_ADMIN)) 3447 if (!capable(CAP_SYS_ADMIN))
3444 return -EPERM; 3448 return -EPERM;
@@ -3447,14 +3451,61 @@ static long btrfs_ioctl_balance(struct file *file, void __user *arg)
3447 if (ret) 3451 if (ret)
3448 return ret; 3452 return ret;
3449 3453
3450 mutex_lock(&fs_info->volume_mutex); 3454again:
3455 if (!atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1)) {
3456 mutex_lock(&fs_info->volume_mutex);
3457 mutex_lock(&fs_info->balance_mutex);
3458 need_unlock = true;
3459 goto locked;
3460 }
3461
3462 /*
3463 * mut. excl. ops lock is locked. Three possibilites:
3464 * (1) some other op is running
3465 * (2) balance is running
3466 * (3) balance is paused -- special case (think resume)
3467 */
3451 mutex_lock(&fs_info->balance_mutex); 3468 mutex_lock(&fs_info->balance_mutex);
3469 if (fs_info->balance_ctl) {
3470 /* this is either (2) or (3) */
3471 if (!atomic_read(&fs_info->balance_running)) {
3472 mutex_unlock(&fs_info->balance_mutex);
3473 if (!mutex_trylock(&fs_info->volume_mutex))
3474 goto again;
3475 mutex_lock(&fs_info->balance_mutex);
3476
3477 if (fs_info->balance_ctl &&
3478 !atomic_read(&fs_info->balance_running)) {
3479 /* this is (3) */
3480 need_unlock = false;
3481 goto locked;
3482 }
3483
3484 mutex_unlock(&fs_info->balance_mutex);
3485 mutex_unlock(&fs_info->volume_mutex);
3486 goto again;
3487 } else {
3488 /* this is (2) */
3489 mutex_unlock(&fs_info->balance_mutex);
3490 ret = -EINPROGRESS;
3491 goto out;
3492 }
3493 } else {
3494 /* this is (1) */
3495 mutex_unlock(&fs_info->balance_mutex);
3496 pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n");
3497 ret = -EINVAL;
3498 goto out;
3499 }
3500
3501locked:
3502 BUG_ON(!atomic_read(&fs_info->mutually_exclusive_operation_running));
3452 3503
3453 if (arg) { 3504 if (arg) {
3454 bargs = memdup_user(arg, sizeof(*bargs)); 3505 bargs = memdup_user(arg, sizeof(*bargs));
3455 if (IS_ERR(bargs)) { 3506 if (IS_ERR(bargs)) {
3456 ret = PTR_ERR(bargs); 3507 ret = PTR_ERR(bargs);
3457 goto out; 3508 goto out_unlock;
3458 } 3509 }
3459 3510
3460 if (bargs->flags & BTRFS_BALANCE_RESUME) { 3511 if (bargs->flags & BTRFS_BALANCE_RESUME) {
@@ -3474,13 +3525,10 @@ static long btrfs_ioctl_balance(struct file *file, void __user *arg)
3474 bargs = NULL; 3525 bargs = NULL;
3475 } 3526 }
3476 3527
3477 if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running, 3528 if (fs_info->balance_ctl) {
3478 1)) {
3479 pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n");
3480 ret = -EINPROGRESS; 3529 ret = -EINPROGRESS;
3481 goto out_bargs; 3530 goto out_bargs;
3482 } 3531 }
3483 need_to_clear_lock = 1;
3484 3532
3485 bctl = kzalloc(sizeof(*bctl), GFP_NOFS); 3533 bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
3486 if (!bctl) { 3534 if (!bctl) {
@@ -3501,11 +3549,17 @@ static long btrfs_ioctl_balance(struct file *file, void __user *arg)
3501 } 3549 }
3502 3550
3503do_balance: 3551do_balance:
3504 ret = btrfs_balance(bctl, bargs);
3505 /* 3552 /*
3506 * bctl is freed in __cancel_balance or in free_fs_info if 3553 * Ownership of bctl and mutually_exclusive_operation_running
3507 * restriper was paused all the way until unmount 3554 * goes to to btrfs_balance. bctl is freed in __cancel_balance,
3555 * or, if restriper was paused all the way until unmount, in
3556 * free_fs_info. mutually_exclusive_operation_running is
3557 * cleared in __cancel_balance.
3508 */ 3558 */
3559 need_unlock = false;
3560
3561 ret = btrfs_balance(bctl, bargs);
3562
3509 if (arg) { 3563 if (arg) {
3510 if (copy_to_user(arg, bargs, sizeof(*bargs))) 3564 if (copy_to_user(arg, bargs, sizeof(*bargs)))
3511 ret = -EFAULT; 3565 ret = -EFAULT;
@@ -3513,12 +3567,12 @@ do_balance:
3513 3567
3514out_bargs: 3568out_bargs:
3515 kfree(bargs); 3569 kfree(bargs);
3516out: 3570out_unlock:
3517 if (need_to_clear_lock)
3518 atomic_set(&root->fs_info->mutually_exclusive_operation_running,
3519 0);
3520 mutex_unlock(&fs_info->balance_mutex); 3571 mutex_unlock(&fs_info->balance_mutex);
3521 mutex_unlock(&fs_info->volume_mutex); 3572 mutex_unlock(&fs_info->volume_mutex);
3573 if (need_unlock)
3574 atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3575out:
3522 mnt_drop_write_file(file); 3576 mnt_drop_write_file(file);
3523 return ret; 3577 return ret;
3524} 3578}
@@ -3698,6 +3752,11 @@ static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg)
3698 goto drop_write; 3752 goto drop_write;
3699 } 3753 }
3700 3754
3755 if (!sa->qgroupid) {
3756 ret = -EINVAL;
3757 goto out;
3758 }
3759
3701 trans = btrfs_join_transaction(root); 3760 trans = btrfs_join_transaction(root);
3702 if (IS_ERR(trans)) { 3761 if (IS_ERR(trans)) {
3703 ret = PTR_ERR(trans); 3762 ret = PTR_ERR(trans);
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index fe9d02c45f8e..a5c856234323 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -379,6 +379,13 @@ next1:
379 379
380 ret = add_relation_rb(fs_info, found_key.objectid, 380 ret = add_relation_rb(fs_info, found_key.objectid,
381 found_key.offset); 381 found_key.offset);
382 if (ret == -ENOENT) {
383 printk(KERN_WARNING
384 "btrfs: orphan qgroup relation 0x%llx->0x%llx\n",
385 (unsigned long long)found_key.objectid,
386 (unsigned long long)found_key.offset);
387 ret = 0; /* ignore the error */
388 }
382 if (ret) 389 if (ret)
383 goto out; 390 goto out;
384next2: 391next2:
@@ -956,17 +963,28 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans,
956 struct btrfs_fs_info *fs_info, u64 qgroupid) 963 struct btrfs_fs_info *fs_info, u64 qgroupid)
957{ 964{
958 struct btrfs_root *quota_root; 965 struct btrfs_root *quota_root;
966 struct btrfs_qgroup *qgroup;
959 int ret = 0; 967 int ret = 0;
960 968
961 quota_root = fs_info->quota_root; 969 quota_root = fs_info->quota_root;
962 if (!quota_root) 970 if (!quota_root)
963 return -EINVAL; 971 return -EINVAL;
964 972
973 /* check if there are no relations to this qgroup */
974 spin_lock(&fs_info->qgroup_lock);
975 qgroup = find_qgroup_rb(fs_info, qgroupid);
976 if (qgroup) {
977 if (!list_empty(&qgroup->groups) || !list_empty(&qgroup->members)) {
978 spin_unlock(&fs_info->qgroup_lock);
979 return -EBUSY;
980 }
981 }
982 spin_unlock(&fs_info->qgroup_lock);
983
965 ret = del_qgroup_item(trans, quota_root, qgroupid); 984 ret = del_qgroup_item(trans, quota_root, qgroupid);
966 985
967 spin_lock(&fs_info->qgroup_lock); 986 spin_lock(&fs_info->qgroup_lock);
968 del_qgroup_rb(quota_root->fs_info, qgroupid); 987 del_qgroup_rb(quota_root->fs_info, qgroupid);
969
970 spin_unlock(&fs_info->qgroup_lock); 988 spin_unlock(&fs_info->qgroup_lock);
971 989
972 return ret; 990 return ret;
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 54454542ad40..321b7fb4e441 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -1814,8 +1814,10 @@ static int name_cache_insert(struct send_ctx *sctx,
1814 (unsigned long)nce->ino); 1814 (unsigned long)nce->ino);
1815 if (!nce_head) { 1815 if (!nce_head) {
1816 nce_head = kmalloc(sizeof(*nce_head), GFP_NOFS); 1816 nce_head = kmalloc(sizeof(*nce_head), GFP_NOFS);
1817 if (!nce_head) 1817 if (!nce_head) {
1818 kfree(nce);
1818 return -ENOMEM; 1819 return -ENOMEM;
1820 }
1819 INIT_LIST_HEAD(nce_head); 1821 INIT_LIST_HEAD(nce_head);
1820 1822
1821 ret = radix_tree_insert(&sctx->name_cache, nce->ino, nce_head); 1823 ret = radix_tree_insert(&sctx->name_cache, nce->ino, nce_head);
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 99545df1b86c..d8982e9601d3 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -267,7 +267,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
267 function, line, errstr); 267 function, line, errstr);
268 return; 268 return;
269 } 269 }
270 trans->transaction->aborted = errno; 270 ACCESS_ONCE(trans->transaction->aborted) = errno;
271 __btrfs_std_error(root->fs_info, function, line, errno, NULL); 271 __btrfs_std_error(root->fs_info, function, line, errno, NULL);
272} 272}
273/* 273/*
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 87fac9a21ea5..f15494699f3b 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -1468,7 +1468,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1468 goto cleanup_transaction; 1468 goto cleanup_transaction;
1469 } 1469 }
1470 1470
1471 if (cur_trans->aborted) { 1471 /* Stop the commit early if ->aborted is set */
1472 if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
1472 ret = cur_trans->aborted; 1473 ret = cur_trans->aborted;
1473 goto cleanup_transaction; 1474 goto cleanup_transaction;
1474 } 1475 }
@@ -1574,6 +1575,11 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1574 wait_event(cur_trans->writer_wait, 1575 wait_event(cur_trans->writer_wait,
1575 atomic_read(&cur_trans->num_writers) == 1); 1576 atomic_read(&cur_trans->num_writers) == 1);
1576 1577
1578 /* ->aborted might be set after the previous check, so check it */
1579 if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
1580 ret = cur_trans->aborted;
1581 goto cleanup_transaction;
1582 }
1577 /* 1583 /*
1578 * the reloc mutex makes sure that we stop 1584 * the reloc mutex makes sure that we stop
1579 * the balancing code from coming in and moving 1585 * the balancing code from coming in and moving
@@ -1657,6 +1663,17 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1657 goto cleanup_transaction; 1663 goto cleanup_transaction;
1658 } 1664 }
1659 1665
1666 /*
1667 * The tasks which save the space cache and inode cache may also
1668 * update ->aborted, check it.
1669 */
1670 if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
1671 ret = cur_trans->aborted;
1672 mutex_unlock(&root->fs_info->tree_log_mutex);
1673 mutex_unlock(&root->fs_info->reloc_mutex);
1674 goto cleanup_transaction;
1675 }
1676
1660 btrfs_prepare_extent_commit(trans, root); 1677 btrfs_prepare_extent_commit(trans, root);
1661 1678
1662 cur_trans = root->fs_info->running_transaction; 1679 cur_trans = root->fs_info->running_transaction;
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 83186c7e45d4..9027bb1e7466 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -3357,6 +3357,11 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
3357 if (skip_csum) 3357 if (skip_csum)
3358 return 0; 3358 return 0;
3359 3359
3360 if (em->compress_type) {
3361 csum_offset = 0;
3362 csum_len = block_len;
3363 }
3364
3360 /* block start is already adjusted for the file extent offset. */ 3365 /* block start is already adjusted for the file extent offset. */
3361 ret = btrfs_lookup_csums_range(log->fs_info->csum_root, 3366 ret = btrfs_lookup_csums_range(log->fs_info->csum_root,
3362 em->block_start + csum_offset, 3367 em->block_start + csum_offset,
@@ -3410,13 +3415,13 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
3410 em = list_entry(extents.next, struct extent_map, list); 3415 em = list_entry(extents.next, struct extent_map, list);
3411 3416
3412 list_del_init(&em->list); 3417 list_del_init(&em->list);
3413 clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
3414 3418
3415 /* 3419 /*
3416 * If we had an error we just need to delete everybody from our 3420 * If we had an error we just need to delete everybody from our
3417 * private list. 3421 * private list.
3418 */ 3422 */
3419 if (ret) { 3423 if (ret) {
3424 clear_em_logging(tree, em);
3420 free_extent_map(em); 3425 free_extent_map(em);
3421 continue; 3426 continue;
3422 } 3427 }
@@ -3424,8 +3429,9 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
3424 write_unlock(&tree->lock); 3429 write_unlock(&tree->lock);
3425 3430
3426 ret = log_one_extent(trans, inode, root, em, path); 3431 ret = log_one_extent(trans, inode, root, em, path);
3427 free_extent_map(em);
3428 write_lock(&tree->lock); 3432 write_lock(&tree->lock);
3433 clear_em_logging(tree, em);
3434 free_extent_map(em);
3429 } 3435 }
3430 WARN_ON(!list_empty(&extents)); 3436 WARN_ON(!list_empty(&extents));
3431 write_unlock(&tree->lock); 3437 write_unlock(&tree->lock);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 5cce6aa74012..15f6efdf6463 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1431,7 +1431,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1431 } 1431 }
1432 } else { 1432 } else {
1433 ret = btrfs_get_bdev_and_sb(device_path, 1433 ret = btrfs_get_bdev_and_sb(device_path,
1434 FMODE_READ | FMODE_EXCL, 1434 FMODE_WRITE | FMODE_EXCL,
1435 root->fs_info->bdev_holder, 0, 1435 root->fs_info->bdev_holder, 0,
1436 &bdev, &bh); 1436 &bdev, &bh);
1437 if (ret) 1437 if (ret)
@@ -2614,7 +2614,14 @@ static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
2614 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 2614 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2615 chunk_used = btrfs_block_group_used(&cache->item); 2615 chunk_used = btrfs_block_group_used(&cache->item);
2616 2616
2617 user_thresh = div_factor_fine(cache->key.offset, bargs->usage); 2617 if (bargs->usage == 0)
2618 user_thresh = 0;
2619 else if (bargs->usage > 100)
2620 user_thresh = cache->key.offset;
2621 else
2622 user_thresh = div_factor_fine(cache->key.offset,
2623 bargs->usage);
2624
2618 if (chunk_used < user_thresh) 2625 if (chunk_used < user_thresh)
2619 ret = 0; 2626 ret = 0;
2620 2627
@@ -2959,6 +2966,8 @@ static void __cancel_balance(struct btrfs_fs_info *fs_info)
2959 unset_balance_control(fs_info); 2966 unset_balance_control(fs_info);
2960 ret = del_balance_item(fs_info->tree_root); 2967 ret = del_balance_item(fs_info->tree_root);
2961 BUG_ON(ret); 2968 BUG_ON(ret);
2969
2970 atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
2962} 2971}
2963 2972
2964void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock, 2973void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock,
@@ -3138,8 +3147,10 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
3138out: 3147out:
3139 if (bctl->flags & BTRFS_BALANCE_RESUME) 3148 if (bctl->flags & BTRFS_BALANCE_RESUME)
3140 __cancel_balance(fs_info); 3149 __cancel_balance(fs_info);
3141 else 3150 else {
3142 kfree(bctl); 3151 kfree(bctl);
3152 atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3153 }
3143 return ret; 3154 return ret;
3144} 3155}
3145 3156
@@ -3156,7 +3167,6 @@ static int balance_kthread(void *data)
3156 ret = btrfs_balance(fs_info->balance_ctl, NULL); 3167 ret = btrfs_balance(fs_info->balance_ctl, NULL);
3157 } 3168 }
3158 3169
3159 atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
3160 mutex_unlock(&fs_info->balance_mutex); 3170 mutex_unlock(&fs_info->balance_mutex);
3161 mutex_unlock(&fs_info->volume_mutex); 3171 mutex_unlock(&fs_info->volume_mutex);
3162 3172
@@ -3179,7 +3189,6 @@ int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
3179 return 0; 3189 return 0;
3180 } 3190 }
3181 3191
3182 WARN_ON(atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1));
3183 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance"); 3192 tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
3184 if (IS_ERR(tsk)) 3193 if (IS_ERR(tsk))
3185 return PTR_ERR(tsk); 3194 return PTR_ERR(tsk);
@@ -3233,6 +3242,8 @@ int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
3233 btrfs_balance_sys(leaf, item, &disk_bargs); 3242 btrfs_balance_sys(leaf, item, &disk_bargs);
3234 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs); 3243 btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
3235 3244
3245 WARN_ON(atomic_xchg(&fs_info->mutually_exclusive_operation_running, 1));
3246
3236 mutex_lock(&fs_info->volume_mutex); 3247 mutex_lock(&fs_info->volume_mutex);
3237 mutex_lock(&fs_info->balance_mutex); 3248 mutex_lock(&fs_info->balance_mutex);
3238 3249
@@ -3496,7 +3507,7 @@ struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
3496 { 1, 1, 2, 2, 2, 2 /* raid1 */ }, 3507 { 1, 1, 2, 2, 2, 2 /* raid1 */ },
3497 { 1, 2, 1, 1, 1, 2 /* dup */ }, 3508 { 1, 2, 1, 1, 1, 2 /* dup */ },
3498 { 1, 1, 0, 2, 1, 1 /* raid0 */ }, 3509 { 1, 1, 0, 2, 1, 1 /* raid0 */ },
3499 { 1, 1, 0, 1, 1, 1 /* single */ }, 3510 { 1, 1, 1, 1, 1, 1 /* single */ },
3500}; 3511};
3501 3512
3502static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, 3513static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index ce5cbd717bfc..210fce2df308 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -226,6 +226,8 @@ compose_mount_options_out:
226compose_mount_options_err: 226compose_mount_options_err:
227 kfree(mountdata); 227 kfree(mountdata);
228 mountdata = ERR_PTR(rc); 228 mountdata = ERR_PTR(rc);
229 kfree(*devname);
230 *devname = NULL;
229 goto compose_mount_options_out; 231 goto compose_mount_options_out;
230} 232}
231 233
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 17c3643e5950..12b3da39733b 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1917,7 +1917,7 @@ srcip_matches(struct sockaddr *srcaddr, struct sockaddr *rhs)
1917 } 1917 }
1918 case AF_INET6: { 1918 case AF_INET6: {
1919 struct sockaddr_in6 *saddr6 = (struct sockaddr_in6 *)srcaddr; 1919 struct sockaddr_in6 *saddr6 = (struct sockaddr_in6 *)srcaddr;
1920 struct sockaddr_in6 *vaddr6 = (struct sockaddr_in6 *)&rhs; 1920 struct sockaddr_in6 *vaddr6 = (struct sockaddr_in6 *)rhs;
1921 return ipv6_addr_equal(&saddr6->sin6_addr, &vaddr6->sin6_addr); 1921 return ipv6_addr_equal(&saddr6->sin6_addr, &vaddr6->sin6_addr);
1922 } 1922 }
1923 default: 1923 default:
diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c
index e95b94945d5f..137af4255da6 100644
--- a/fs/f2fs/acl.c
+++ b/fs/f2fs/acl.c
@@ -191,15 +191,14 @@ struct posix_acl *f2fs_get_acl(struct inode *inode, int type)
191 retval = f2fs_getxattr(inode, name_index, "", value, retval); 191 retval = f2fs_getxattr(inode, name_index, "", value, retval);
192 } 192 }
193 193
194 if (retval < 0) { 194 if (retval > 0)
195 if (retval == -ENODATA)
196 acl = NULL;
197 else
198 acl = ERR_PTR(retval);
199 } else {
200 acl = f2fs_acl_from_disk(value, retval); 195 acl = f2fs_acl_from_disk(value, retval);
201 } 196 else if (retval == -ENODATA)
197 acl = NULL;
198 else
199 acl = ERR_PTR(retval);
202 kfree(value); 200 kfree(value);
201
203 if (!IS_ERR(acl)) 202 if (!IS_ERR(acl))
204 set_cached_acl(inode, type, acl); 203 set_cached_acl(inode, type, acl);
205 204
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index 6ef36c37e2be..ff3c8439af87 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -214,7 +214,6 @@ retry:
214 goto retry; 214 goto retry;
215 } 215 }
216 new->ino = ino; 216 new->ino = ino;
217 INIT_LIST_HEAD(&new->list);
218 217
219 /* add new_oentry into list which is sorted by inode number */ 218 /* add new_oentry into list which is sorted by inode number */
220 if (orphan) { 219 if (orphan) {
@@ -772,7 +771,7 @@ void init_orphan_info(struct f2fs_sb_info *sbi)
772 sbi->n_orphans = 0; 771 sbi->n_orphans = 0;
773} 772}
774 773
775int create_checkpoint_caches(void) 774int __init create_checkpoint_caches(void)
776{ 775{
777 orphan_entry_slab = f2fs_kmem_cache_create("f2fs_orphan_entry", 776 orphan_entry_slab = f2fs_kmem_cache_create("f2fs_orphan_entry",
778 sizeof(struct orphan_inode_entry), NULL); 777 sizeof(struct orphan_inode_entry), NULL);
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 3aa5ce7cab83..7bd22a201125 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -547,6 +547,15 @@ redirty_out:
547 547
548#define MAX_DESIRED_PAGES_WP 4096 548#define MAX_DESIRED_PAGES_WP 4096
549 549
550static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
551 void *data)
552{
553 struct address_space *mapping = data;
554 int ret = mapping->a_ops->writepage(page, wbc);
555 mapping_set_error(mapping, ret);
556 return ret;
557}
558
550static int f2fs_write_data_pages(struct address_space *mapping, 559static int f2fs_write_data_pages(struct address_space *mapping,
551 struct writeback_control *wbc) 560 struct writeback_control *wbc)
552{ 561{
@@ -563,7 +572,7 @@ static int f2fs_write_data_pages(struct address_space *mapping,
563 572
564 if (!S_ISDIR(inode->i_mode)) 573 if (!S_ISDIR(inode->i_mode))
565 mutex_lock(&sbi->writepages); 574 mutex_lock(&sbi->writepages);
566 ret = generic_writepages(mapping, wbc); 575 ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
567 if (!S_ISDIR(inode->i_mode)) 576 if (!S_ISDIR(inode->i_mode))
568 mutex_unlock(&sbi->writepages); 577 mutex_unlock(&sbi->writepages);
569 f2fs_submit_bio(sbi, DATA, (wbc->sync_mode == WB_SYNC_ALL)); 578 f2fs_submit_bio(sbi, DATA, (wbc->sync_mode == WB_SYNC_ALL));
@@ -689,6 +698,11 @@ static int f2fs_set_data_page_dirty(struct page *page)
689 return 0; 698 return 0;
690} 699}
691 700
701static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
702{
703 return generic_block_bmap(mapping, block, get_data_block_ro);
704}
705
692const struct address_space_operations f2fs_dblock_aops = { 706const struct address_space_operations f2fs_dblock_aops = {
693 .readpage = f2fs_read_data_page, 707 .readpage = f2fs_read_data_page,
694 .readpages = f2fs_read_data_pages, 708 .readpages = f2fs_read_data_pages,
@@ -700,4 +714,5 @@ const struct address_space_operations f2fs_dblock_aops = {
700 .invalidatepage = f2fs_invalidate_data_page, 714 .invalidatepage = f2fs_invalidate_data_page,
701 .releasepage = f2fs_release_data_page, 715 .releasepage = f2fs_release_data_page,
702 .direct_IO = f2fs_direct_IO, 716 .direct_IO = f2fs_direct_IO,
717 .bmap = f2fs_bmap,
703}; 718};
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index 0e0380a588ad..c8c37307b326 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -26,6 +26,7 @@
26 26
27static LIST_HEAD(f2fs_stat_list); 27static LIST_HEAD(f2fs_stat_list);
28static struct dentry *debugfs_root; 28static struct dentry *debugfs_root;
29static DEFINE_MUTEX(f2fs_stat_mutex);
29 30
30static void update_general_status(struct f2fs_sb_info *sbi) 31static void update_general_status(struct f2fs_sb_info *sbi)
31{ 32{
@@ -180,18 +181,14 @@ static int stat_show(struct seq_file *s, void *v)
180 int i = 0; 181 int i = 0;
181 int j; 182 int j;
182 183
184 mutex_lock(&f2fs_stat_mutex);
183 list_for_each_entry_safe(si, next, &f2fs_stat_list, stat_list) { 185 list_for_each_entry_safe(si, next, &f2fs_stat_list, stat_list) {
184 186
185 mutex_lock(&si->stat_lock);
186 if (!si->sbi) {
187 mutex_unlock(&si->stat_lock);
188 continue;
189 }
190 update_general_status(si->sbi); 187 update_general_status(si->sbi);
191 188
192 seq_printf(s, "\n=====[ partition info. #%d ]=====\n", i++); 189 seq_printf(s, "\n=====[ partition info. #%d ]=====\n", i++);
193 seq_printf(s, "[SB: 1] [CP: 2] [NAT: %d] [SIT: %d] ", 190 seq_printf(s, "[SB: 1] [CP: 2] [SIT: %d] [NAT: %d] ",
194 si->nat_area_segs, si->sit_area_segs); 191 si->sit_area_segs, si->nat_area_segs);
195 seq_printf(s, "[SSA: %d] [MAIN: %d", 192 seq_printf(s, "[SSA: %d] [MAIN: %d",
196 si->ssa_area_segs, si->main_area_segs); 193 si->ssa_area_segs, si->main_area_segs);
197 seq_printf(s, "(OverProv:%d Resv:%d)]\n\n", 194 seq_printf(s, "(OverProv:%d Resv:%d)]\n\n",
@@ -286,8 +283,8 @@ static int stat_show(struct seq_file *s, void *v)
286 seq_printf(s, "\nMemory: %u KB = static: %u + cached: %u\n", 283 seq_printf(s, "\nMemory: %u KB = static: %u + cached: %u\n",
287 (si->base_mem + si->cache_mem) >> 10, 284 (si->base_mem + si->cache_mem) >> 10,
288 si->base_mem >> 10, si->cache_mem >> 10); 285 si->base_mem >> 10, si->cache_mem >> 10);
289 mutex_unlock(&si->stat_lock);
290 } 286 }
287 mutex_unlock(&f2fs_stat_mutex);
291 return 0; 288 return 0;
292} 289}
293 290
@@ -303,7 +300,7 @@ static const struct file_operations stat_fops = {
303 .release = single_release, 300 .release = single_release,
304}; 301};
305 302
306static int init_stats(struct f2fs_sb_info *sbi) 303int f2fs_build_stats(struct f2fs_sb_info *sbi)
307{ 304{
308 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); 305 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
309 struct f2fs_stat_info *si; 306 struct f2fs_stat_info *si;
@@ -313,9 +310,6 @@ static int init_stats(struct f2fs_sb_info *sbi)
313 return -ENOMEM; 310 return -ENOMEM;
314 311
315 si = sbi->stat_info; 312 si = sbi->stat_info;
316 mutex_init(&si->stat_lock);
317 list_add_tail(&si->stat_list, &f2fs_stat_list);
318
319 si->all_area_segs = le32_to_cpu(raw_super->segment_count); 313 si->all_area_segs = le32_to_cpu(raw_super->segment_count);
320 si->sit_area_segs = le32_to_cpu(raw_super->segment_count_sit); 314 si->sit_area_segs = le32_to_cpu(raw_super->segment_count_sit);
321 si->nat_area_segs = le32_to_cpu(raw_super->segment_count_nat); 315 si->nat_area_segs = le32_to_cpu(raw_super->segment_count_nat);
@@ -325,21 +319,11 @@ static int init_stats(struct f2fs_sb_info *sbi)
325 si->main_area_zones = si->main_area_sections / 319 si->main_area_zones = si->main_area_sections /
326 le32_to_cpu(raw_super->secs_per_zone); 320 le32_to_cpu(raw_super->secs_per_zone);
327 si->sbi = sbi; 321 si->sbi = sbi;
328 return 0;
329}
330 322
331int f2fs_build_stats(struct f2fs_sb_info *sbi) 323 mutex_lock(&f2fs_stat_mutex);
332{ 324 list_add_tail(&si->stat_list, &f2fs_stat_list);
333 int retval; 325 mutex_unlock(&f2fs_stat_mutex);
334
335 retval = init_stats(sbi);
336 if (retval)
337 return retval;
338
339 if (!debugfs_root)
340 debugfs_root = debugfs_create_dir("f2fs", NULL);
341 326
342 debugfs_create_file("status", S_IRUGO, debugfs_root, NULL, &stat_fops);
343 return 0; 327 return 0;
344} 328}
345 329
@@ -347,14 +331,22 @@ void f2fs_destroy_stats(struct f2fs_sb_info *sbi)
347{ 331{
348 struct f2fs_stat_info *si = sbi->stat_info; 332 struct f2fs_stat_info *si = sbi->stat_info;
349 333
334 mutex_lock(&f2fs_stat_mutex);
350 list_del(&si->stat_list); 335 list_del(&si->stat_list);
351 mutex_lock(&si->stat_lock); 336 mutex_unlock(&f2fs_stat_mutex);
352 si->sbi = NULL; 337
353 mutex_unlock(&si->stat_lock);
354 kfree(sbi->stat_info); 338 kfree(sbi->stat_info);
355} 339}
356 340
357void destroy_root_stats(void) 341void __init f2fs_create_root_stats(void)
342{
343 debugfs_root = debugfs_create_dir("f2fs", NULL);
344 if (debugfs_root)
345 debugfs_create_file("status", S_IRUGO, debugfs_root,
346 NULL, &stat_fops);
347}
348
349void f2fs_destroy_root_stats(void)
358{ 350{
359 debugfs_remove_recursive(debugfs_root); 351 debugfs_remove_recursive(debugfs_root);
360 debugfs_root = NULL; 352 debugfs_root = NULL;
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 951ed52748f6..989980e16d0b 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -503,7 +503,7 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
503 } 503 }
504 504
505 if (inode) { 505 if (inode) {
506 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; 506 inode->i_ctime = CURRENT_TIME;
507 drop_nlink(inode); 507 drop_nlink(inode);
508 if (S_ISDIR(inode->i_mode)) { 508 if (S_ISDIR(inode->i_mode)) {
509 drop_nlink(inode); 509 drop_nlink(inode);
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 13c6dfbb7183..c8e2d751ef9c 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -211,11 +211,11 @@ struct dnode_of_data {
211static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode, 211static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode,
212 struct page *ipage, struct page *npage, nid_t nid) 212 struct page *ipage, struct page *npage, nid_t nid)
213{ 213{
214 memset(dn, 0, sizeof(*dn));
214 dn->inode = inode; 215 dn->inode = inode;
215 dn->inode_page = ipage; 216 dn->inode_page = ipage;
216 dn->node_page = npage; 217 dn->node_page = npage;
217 dn->nid = nid; 218 dn->nid = nid;
218 dn->inode_page_locked = 0;
219} 219}
220 220
221/* 221/*
@@ -877,6 +877,8 @@ bool f2fs_empty_dir(struct inode *);
877 * super.c 877 * super.c
878 */ 878 */
879int f2fs_sync_fs(struct super_block *, int); 879int f2fs_sync_fs(struct super_block *, int);
880extern __printf(3, 4)
881void f2fs_msg(struct super_block *, const char *, const char *, ...);
880 882
881/* 883/*
882 * hash.c 884 * hash.c
@@ -912,7 +914,7 @@ int restore_node_summary(struct f2fs_sb_info *, unsigned int,
912void flush_nat_entries(struct f2fs_sb_info *); 914void flush_nat_entries(struct f2fs_sb_info *);
913int build_node_manager(struct f2fs_sb_info *); 915int build_node_manager(struct f2fs_sb_info *);
914void destroy_node_manager(struct f2fs_sb_info *); 916void destroy_node_manager(struct f2fs_sb_info *);
915int create_node_manager_caches(void); 917int __init create_node_manager_caches(void);
916void destroy_node_manager_caches(void); 918void destroy_node_manager_caches(void);
917 919
918/* 920/*
@@ -964,7 +966,7 @@ void sync_dirty_dir_inodes(struct f2fs_sb_info *);
964void block_operations(struct f2fs_sb_info *); 966void block_operations(struct f2fs_sb_info *);
965void write_checkpoint(struct f2fs_sb_info *, bool, bool); 967void write_checkpoint(struct f2fs_sb_info *, bool, bool);
966void init_orphan_info(struct f2fs_sb_info *); 968void init_orphan_info(struct f2fs_sb_info *);
967int create_checkpoint_caches(void); 969int __init create_checkpoint_caches(void);
968void destroy_checkpoint_caches(void); 970void destroy_checkpoint_caches(void);
969 971
970/* 972/*
@@ -984,9 +986,9 @@ int do_write_data_page(struct page *);
984int start_gc_thread(struct f2fs_sb_info *); 986int start_gc_thread(struct f2fs_sb_info *);
985void stop_gc_thread(struct f2fs_sb_info *); 987void stop_gc_thread(struct f2fs_sb_info *);
986block_t start_bidx_of_node(unsigned int); 988block_t start_bidx_of_node(unsigned int);
987int f2fs_gc(struct f2fs_sb_info *, int); 989int f2fs_gc(struct f2fs_sb_info *);
988void build_gc_manager(struct f2fs_sb_info *); 990void build_gc_manager(struct f2fs_sb_info *);
989int create_gc_caches(void); 991int __init create_gc_caches(void);
990void destroy_gc_caches(void); 992void destroy_gc_caches(void);
991 993
992/* 994/*
@@ -1058,7 +1060,8 @@ struct f2fs_stat_info {
1058 1060
1059int f2fs_build_stats(struct f2fs_sb_info *); 1061int f2fs_build_stats(struct f2fs_sb_info *);
1060void f2fs_destroy_stats(struct f2fs_sb_info *); 1062void f2fs_destroy_stats(struct f2fs_sb_info *);
1061void destroy_root_stats(void); 1063void __init f2fs_create_root_stats(void);
1064void f2fs_destroy_root_stats(void);
1062#else 1065#else
1063#define stat_inc_call_count(si) 1066#define stat_inc_call_count(si)
1064#define stat_inc_seg_count(si, type) 1067#define stat_inc_seg_count(si, type)
@@ -1068,7 +1071,8 @@ void destroy_root_stats(void);
1068 1071
1069static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; } 1072static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; }
1070static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { } 1073static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { }
1071static inline void destroy_root_stats(void) { } 1074static inline void __init f2fs_create_root_stats(void) { }
1075static inline void f2fs_destroy_root_stats(void) { }
1072#endif 1076#endif
1073 1077
1074extern const struct file_operations f2fs_dir_operations; 1078extern const struct file_operations f2fs_dir_operations;
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 7f9ea9271ebe..3191b52aafb0 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -96,8 +96,9 @@ out:
96} 96}
97 97
98static const struct vm_operations_struct f2fs_file_vm_ops = { 98static const struct vm_operations_struct f2fs_file_vm_ops = {
99 .fault = filemap_fault, 99 .fault = filemap_fault,
100 .page_mkwrite = f2fs_vm_page_mkwrite, 100 .page_mkwrite = f2fs_vm_page_mkwrite,
101 .remap_pages = generic_file_remap_pages,
101}; 102};
102 103
103static int need_to_sync_dir(struct f2fs_sb_info *sbi, struct inode *inode) 104static int need_to_sync_dir(struct f2fs_sb_info *sbi, struct inode *inode)
@@ -137,6 +138,9 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
137 if (ret) 138 if (ret)
138 return ret; 139 return ret;
139 140
141 /* guarantee free sections for fsync */
142 f2fs_balance_fs(sbi);
143
140 mutex_lock(&inode->i_mutex); 144 mutex_lock(&inode->i_mutex);
141 145
142 if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) 146 if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
@@ -407,6 +411,8 @@ int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
407 struct dnode_of_data dn; 411 struct dnode_of_data dn;
408 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 412 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
409 413
414 f2fs_balance_fs(sbi);
415
410 mutex_lock_op(sbi, DATA_TRUNC); 416 mutex_lock_op(sbi, DATA_TRUNC);
411 set_new_dnode(&dn, inode, NULL, NULL, 0); 417 set_new_dnode(&dn, inode, NULL, NULL, 0);
412 err = get_dnode_of_data(&dn, index, RDONLY_NODE); 418 err = get_dnode_of_data(&dn, index, RDONLY_NODE);
@@ -534,7 +540,6 @@ static long f2fs_fallocate(struct file *file, int mode,
534 loff_t offset, loff_t len) 540 loff_t offset, loff_t len)
535{ 541{
536 struct inode *inode = file->f_path.dentry->d_inode; 542 struct inode *inode = file->f_path.dentry->d_inode;
537 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
538 long ret; 543 long ret;
539 544
540 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) 545 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
@@ -545,7 +550,10 @@ static long f2fs_fallocate(struct file *file, int mode,
545 else 550 else
546 ret = expand_inode_data(inode, offset, len, mode); 551 ret = expand_inode_data(inode, offset, len, mode);
547 552
548 f2fs_balance_fs(sbi); 553 if (!ret) {
554 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
555 mark_inode_dirty(inode);
556 }
549 return ret; 557 return ret;
550} 558}
551 559
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index b0ec721e984a..c386910dacc5 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -78,7 +78,7 @@ static int gc_thread_func(void *data)
78 78
79 sbi->bg_gc++; 79 sbi->bg_gc++;
80 80
81 if (f2fs_gc(sbi, 1) == GC_NONE) 81 if (f2fs_gc(sbi) == GC_NONE)
82 wait_ms = GC_THREAD_NOGC_SLEEP_TIME; 82 wait_ms = GC_THREAD_NOGC_SLEEP_TIME;
83 else if (wait_ms == GC_THREAD_NOGC_SLEEP_TIME) 83 else if (wait_ms == GC_THREAD_NOGC_SLEEP_TIME)
84 wait_ms = GC_THREAD_MAX_SLEEP_TIME; 84 wait_ms = GC_THREAD_MAX_SLEEP_TIME;
@@ -424,7 +424,11 @@ next_step:
424} 424}
425 425
426/* 426/*
427 * Calculate start block index that this node page contains 427 * Calculate start block index indicating the given node offset.
428 * Be careful, caller should give this node offset only indicating direct node
429 * blocks. If any node offsets, which point the other types of node blocks such
430 * as indirect or double indirect node blocks, are given, it must be a caller's
431 * bug.
428 */ 432 */
429block_t start_bidx_of_node(unsigned int node_ofs) 433block_t start_bidx_of_node(unsigned int node_ofs)
430{ 434{
@@ -651,62 +655,44 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
651 return ret; 655 return ret;
652} 656}
653 657
654int f2fs_gc(struct f2fs_sb_info *sbi, int nGC) 658int f2fs_gc(struct f2fs_sb_info *sbi)
655{ 659{
656 unsigned int segno;
657 int old_free_secs, cur_free_secs;
658 int gc_status, nfree;
659 struct list_head ilist; 660 struct list_head ilist;
661 unsigned int segno, i;
660 int gc_type = BG_GC; 662 int gc_type = BG_GC;
663 int gc_status = GC_NONE;
661 664
662 INIT_LIST_HEAD(&ilist); 665 INIT_LIST_HEAD(&ilist);
663gc_more: 666gc_more:
664 nfree = 0; 667 if (!(sbi->sb->s_flags & MS_ACTIVE))
665 gc_status = GC_NONE; 668 goto stop;
666 669
667 if (has_not_enough_free_secs(sbi)) 670 if (has_not_enough_free_secs(sbi))
668 old_free_secs = reserved_sections(sbi); 671 gc_type = FG_GC;
669 else
670 old_free_secs = free_sections(sbi);
671
672 while (sbi->sb->s_flags & MS_ACTIVE) {
673 int i;
674 if (has_not_enough_free_secs(sbi))
675 gc_type = FG_GC;
676 672
677 cur_free_secs = free_sections(sbi) + nfree; 673 if (!__get_victim(sbi, &segno, gc_type, NO_CHECK_TYPE))
674 goto stop;
678 675
679 /* We got free space successfully. */ 676 for (i = 0; i < sbi->segs_per_sec; i++) {
680 if (nGC < cur_free_secs - old_free_secs) 677 /*
681 break; 678 * do_garbage_collect will give us three gc_status:
682 679 * GC_ERROR, GC_DONE, and GC_BLOCKED.
683 if (!__get_victim(sbi, &segno, gc_type, NO_CHECK_TYPE)) 680 * If GC is finished uncleanly, we have to return
681 * the victim to dirty segment list.
682 */
683 gc_status = do_garbage_collect(sbi, segno + i, &ilist, gc_type);
684 if (gc_status != GC_DONE)
684 break; 685 break;
685
686 for (i = 0; i < sbi->segs_per_sec; i++) {
687 /*
688 * do_garbage_collect will give us three gc_status:
689 * GC_ERROR, GC_DONE, and GC_BLOCKED.
690 * If GC is finished uncleanly, we have to return
691 * the victim to dirty segment list.
692 */
693 gc_status = do_garbage_collect(sbi, segno + i,
694 &ilist, gc_type);
695 if (gc_status != GC_DONE)
696 goto stop;
697 nfree++;
698 }
699 } 686 }
700stop: 687 if (has_not_enough_free_secs(sbi)) {
701 if (has_not_enough_free_secs(sbi) || gc_status == GC_BLOCKED) {
702 write_checkpoint(sbi, (gc_status == GC_BLOCKED), false); 688 write_checkpoint(sbi, (gc_status == GC_BLOCKED), false);
703 if (nfree) 689 if (has_not_enough_free_secs(sbi))
704 goto gc_more; 690 goto gc_more;
705 } 691 }
692stop:
706 mutex_unlock(&sbi->gc_mutex); 693 mutex_unlock(&sbi->gc_mutex);
707 694
708 put_gc_inode(&ilist); 695 put_gc_inode(&ilist);
709 BUG_ON(!list_empty(&ilist));
710 return gc_status; 696 return gc_status;
711} 697}
712 698
@@ -715,7 +701,7 @@ void build_gc_manager(struct f2fs_sb_info *sbi)
715 DIRTY_I(sbi)->v_ops = &default_v_ops; 701 DIRTY_I(sbi)->v_ops = &default_v_ops;
716} 702}
717 703
718int create_gc_caches(void) 704int __init create_gc_caches(void)
719{ 705{
720 winode_slab = f2fs_kmem_cache_create("f2fs_gc_inodes", 706 winode_slab = f2fs_kmem_cache_create("f2fs_gc_inodes",
721 sizeof(struct inode_entry), NULL); 707 sizeof(struct inode_entry), NULL);
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index bf20b4d03214..794241777322 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -217,6 +217,9 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
217 inode->i_ino == F2FS_META_INO(sbi)) 217 inode->i_ino == F2FS_META_INO(sbi))
218 return 0; 218 return 0;
219 219
220 if (wbc)
221 f2fs_balance_fs(sbi);
222
220 node_page = get_node_page(sbi, inode->i_ino); 223 node_page = get_node_page(sbi, inode->i_ino);
221 if (IS_ERR(node_page)) 224 if (IS_ERR(node_page))
222 return PTR_ERR(node_page); 225 return PTR_ERR(node_page);
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 5066bfd256c9..9bda63c9c166 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -1124,6 +1124,12 @@ static int f2fs_write_node_page(struct page *page,
1124 return 0; 1124 return 0;
1125} 1125}
1126 1126
1127/*
1128 * It is very important to gather dirty pages and write at once, so that we can
1129 * submit a big bio without interfering other data writes.
1130 * Be default, 512 pages (2MB), a segment size, is quite reasonable.
1131 */
1132#define COLLECT_DIRTY_NODES 512
1127static int f2fs_write_node_pages(struct address_space *mapping, 1133static int f2fs_write_node_pages(struct address_space *mapping,
1128 struct writeback_control *wbc) 1134 struct writeback_control *wbc)
1129{ 1135{
@@ -1131,17 +1137,16 @@ static int f2fs_write_node_pages(struct address_space *mapping,
1131 struct block_device *bdev = sbi->sb->s_bdev; 1137 struct block_device *bdev = sbi->sb->s_bdev;
1132 long nr_to_write = wbc->nr_to_write; 1138 long nr_to_write = wbc->nr_to_write;
1133 1139
1134 if (wbc->for_kupdate) 1140 /* First check balancing cached NAT entries */
1135 return 0;
1136
1137 if (get_pages(sbi, F2FS_DIRTY_NODES) == 0)
1138 return 0;
1139
1140 if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK)) { 1141 if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK)) {
1141 write_checkpoint(sbi, false, false); 1142 write_checkpoint(sbi, false, false);
1142 return 0; 1143 return 0;
1143 } 1144 }
1144 1145
1146 /* collect a number of dirty node pages and write together */
1147 if (get_pages(sbi, F2FS_DIRTY_NODES) < COLLECT_DIRTY_NODES)
1148 return 0;
1149
1145 /* if mounting is failed, skip writing node pages */ 1150 /* if mounting is failed, skip writing node pages */
1146 wbc->nr_to_write = bio_get_nr_vecs(bdev); 1151 wbc->nr_to_write = bio_get_nr_vecs(bdev);
1147 sync_node_pages(sbi, 0, wbc); 1152 sync_node_pages(sbi, 0, wbc);
@@ -1732,7 +1737,7 @@ void destroy_node_manager(struct f2fs_sb_info *sbi)
1732 kfree(nm_i); 1737 kfree(nm_i);
1733} 1738}
1734 1739
1735int create_node_manager_caches(void) 1740int __init create_node_manager_caches(void)
1736{ 1741{
1737 nat_entry_slab = f2fs_kmem_cache_create("nat_entry", 1742 nat_entry_slab = f2fs_kmem_cache_create("nat_entry",
1738 sizeof(struct nat_entry), NULL); 1743 sizeof(struct nat_entry), NULL);
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index b571fee677d5..f42e4060b399 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -67,7 +67,7 @@ static int recover_dentry(struct page *ipage, struct inode *inode)
67 kunmap(page); 67 kunmap(page);
68 f2fs_put_page(page, 0); 68 f2fs_put_page(page, 0);
69 } else { 69 } else {
70 f2fs_add_link(&dent, inode); 70 err = f2fs_add_link(&dent, inode);
71 } 71 }
72 iput(dir); 72 iput(dir);
73out: 73out:
@@ -151,7 +151,6 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
151 goto out; 151 goto out;
152 } 152 }
153 153
154 INIT_LIST_HEAD(&entry->list);
155 list_add_tail(&entry->list, head); 154 list_add_tail(&entry->list, head);
156 entry->blkaddr = blkaddr; 155 entry->blkaddr = blkaddr;
157 } 156 }
@@ -174,10 +173,9 @@ out:
174static void destroy_fsync_dnodes(struct f2fs_sb_info *sbi, 173static void destroy_fsync_dnodes(struct f2fs_sb_info *sbi,
175 struct list_head *head) 174 struct list_head *head)
176{ 175{
177 struct list_head *this; 176 struct fsync_inode_entry *entry, *tmp;
178 struct fsync_inode_entry *entry; 177
179 list_for_each(this, head) { 178 list_for_each_entry_safe(entry, tmp, head, list) {
180 entry = list_entry(this, struct fsync_inode_entry, list);
181 iput(entry->inode); 179 iput(entry->inode);
182 list_del(&entry->list); 180 list_del(&entry->list);
183 kmem_cache_free(fsync_entry_slab, entry); 181 kmem_cache_free(fsync_entry_slab, entry);
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index de6240922b0a..4b0099066582 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -31,7 +31,7 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi)
31 */ 31 */
32 if (has_not_enough_free_secs(sbi)) { 32 if (has_not_enough_free_secs(sbi)) {
33 mutex_lock(&sbi->gc_mutex); 33 mutex_lock(&sbi->gc_mutex);
34 f2fs_gc(sbi, 1); 34 f2fs_gc(sbi);
35 } 35 }
36} 36}
37 37
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 08a94c814bdc..37fad04c8669 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -53,6 +53,18 @@ static match_table_t f2fs_tokens = {
53 {Opt_err, NULL}, 53 {Opt_err, NULL},
54}; 54};
55 55
56void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...)
57{
58 struct va_format vaf;
59 va_list args;
60
61 va_start(args, fmt);
62 vaf.fmt = fmt;
63 vaf.va = &args;
64 printk("%sF2FS-fs (%s): %pV\n", level, sb->s_id, &vaf);
65 va_end(args);
66}
67
56static void init_once(void *foo) 68static void init_once(void *foo)
57{ 69{
58 struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo; 70 struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
@@ -125,6 +137,8 @@ int f2fs_sync_fs(struct super_block *sb, int sync)
125 137
126 if (sync) 138 if (sync)
127 write_checkpoint(sbi, false, false); 139 write_checkpoint(sbi, false, false);
140 else
141 f2fs_balance_fs(sbi);
128 142
129 return 0; 143 return 0;
130} 144}
@@ -247,7 +261,8 @@ static const struct export_operations f2fs_export_ops = {
247 .get_parent = f2fs_get_parent, 261 .get_parent = f2fs_get_parent,
248}; 262};
249 263
250static int parse_options(struct f2fs_sb_info *sbi, char *options) 264static int parse_options(struct super_block *sb, struct f2fs_sb_info *sbi,
265 char *options)
251{ 266{
252 substring_t args[MAX_OPT_ARGS]; 267 substring_t args[MAX_OPT_ARGS];
253 char *p; 268 char *p;
@@ -286,7 +301,8 @@ static int parse_options(struct f2fs_sb_info *sbi, char *options)
286 break; 301 break;
287#else 302#else
288 case Opt_nouser_xattr: 303 case Opt_nouser_xattr:
289 pr_info("nouser_xattr options not supported\n"); 304 f2fs_msg(sb, KERN_INFO,
305 "nouser_xattr options not supported");
290 break; 306 break;
291#endif 307#endif
292#ifdef CONFIG_F2FS_FS_POSIX_ACL 308#ifdef CONFIG_F2FS_FS_POSIX_ACL
@@ -295,7 +311,7 @@ static int parse_options(struct f2fs_sb_info *sbi, char *options)
295 break; 311 break;
296#else 312#else
297 case Opt_noacl: 313 case Opt_noacl:
298 pr_info("noacl options not supported\n"); 314 f2fs_msg(sb, KERN_INFO, "noacl options not supported");
299 break; 315 break;
300#endif 316#endif
301 case Opt_active_logs: 317 case Opt_active_logs:
@@ -309,8 +325,9 @@ static int parse_options(struct f2fs_sb_info *sbi, char *options)
309 set_opt(sbi, DISABLE_EXT_IDENTIFY); 325 set_opt(sbi, DISABLE_EXT_IDENTIFY);
310 break; 326 break;
311 default: 327 default:
312 pr_err("Unrecognized mount option \"%s\" or missing value\n", 328 f2fs_msg(sb, KERN_ERR,
313 p); 329 "Unrecognized mount option \"%s\" or missing value",
330 p);
314 return -EINVAL; 331 return -EINVAL;
315 } 332 }
316 } 333 }
@@ -337,23 +354,36 @@ static loff_t max_file_size(unsigned bits)
337 return result; 354 return result;
338} 355}
339 356
340static int sanity_check_raw_super(struct f2fs_super_block *raw_super) 357static int sanity_check_raw_super(struct super_block *sb,
358 struct f2fs_super_block *raw_super)
341{ 359{
342 unsigned int blocksize; 360 unsigned int blocksize;
343 361
344 if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) 362 if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
363 f2fs_msg(sb, KERN_INFO,
364 "Magic Mismatch, valid(0x%x) - read(0x%x)",
365 F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
345 return 1; 366 return 1;
367 }
346 368
347 /* Currently, support only 4KB block size */ 369 /* Currently, support only 4KB block size */
348 blocksize = 1 << le32_to_cpu(raw_super->log_blocksize); 370 blocksize = 1 << le32_to_cpu(raw_super->log_blocksize);
349 if (blocksize != PAGE_CACHE_SIZE) 371 if (blocksize != PAGE_CACHE_SIZE) {
372 f2fs_msg(sb, KERN_INFO,
373 "Invalid blocksize (%u), supports only 4KB\n",
374 blocksize);
350 return 1; 375 return 1;
376 }
351 if (le32_to_cpu(raw_super->log_sectorsize) != 377 if (le32_to_cpu(raw_super->log_sectorsize) !=
352 F2FS_LOG_SECTOR_SIZE) 378 F2FS_LOG_SECTOR_SIZE) {
379 f2fs_msg(sb, KERN_INFO, "Invalid log sectorsize");
353 return 1; 380 return 1;
381 }
354 if (le32_to_cpu(raw_super->log_sectors_per_block) != 382 if (le32_to_cpu(raw_super->log_sectors_per_block) !=
355 F2FS_LOG_SECTORS_PER_BLOCK) 383 F2FS_LOG_SECTORS_PER_BLOCK) {
384 f2fs_msg(sb, KERN_INFO, "Invalid log sectors per block");
356 return 1; 385 return 1;
386 }
357 return 0; 387 return 0;
358} 388}
359 389
@@ -413,14 +443,17 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
413 if (!sbi) 443 if (!sbi)
414 return -ENOMEM; 444 return -ENOMEM;
415 445
416 /* set a temporary block size */ 446 /* set a block size */
417 if (!sb_set_blocksize(sb, F2FS_BLKSIZE)) 447 if (!sb_set_blocksize(sb, F2FS_BLKSIZE)) {
448 f2fs_msg(sb, KERN_ERR, "unable to set blocksize");
418 goto free_sbi; 449 goto free_sbi;
450 }
419 451
420 /* read f2fs raw super block */ 452 /* read f2fs raw super block */
421 raw_super_buf = sb_bread(sb, 0); 453 raw_super_buf = sb_bread(sb, 0);
422 if (!raw_super_buf) { 454 if (!raw_super_buf) {
423 err = -EIO; 455 err = -EIO;
456 f2fs_msg(sb, KERN_ERR, "unable to read superblock");
424 goto free_sbi; 457 goto free_sbi;
425 } 458 }
426 raw_super = (struct f2fs_super_block *) 459 raw_super = (struct f2fs_super_block *)
@@ -438,12 +471,14 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
438 set_opt(sbi, POSIX_ACL); 471 set_opt(sbi, POSIX_ACL);
439#endif 472#endif
440 /* parse mount options */ 473 /* parse mount options */
441 if (parse_options(sbi, (char *)data)) 474 if (parse_options(sb, sbi, (char *)data))
442 goto free_sb_buf; 475 goto free_sb_buf;
443 476
444 /* sanity checking of raw super */ 477 /* sanity checking of raw super */
445 if (sanity_check_raw_super(raw_super)) 478 if (sanity_check_raw_super(sb, raw_super)) {
479 f2fs_msg(sb, KERN_ERR, "Can't find a valid F2FS filesystem");
446 goto free_sb_buf; 480 goto free_sb_buf;
481 }
447 482
448 sb->s_maxbytes = max_file_size(le32_to_cpu(raw_super->log_blocksize)); 483 sb->s_maxbytes = max_file_size(le32_to_cpu(raw_super->log_blocksize));
449 sb->s_max_links = F2FS_LINK_MAX; 484 sb->s_max_links = F2FS_LINK_MAX;
@@ -477,18 +512,23 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
477 /* get an inode for meta space */ 512 /* get an inode for meta space */
478 sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi)); 513 sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
479 if (IS_ERR(sbi->meta_inode)) { 514 if (IS_ERR(sbi->meta_inode)) {
515 f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode");
480 err = PTR_ERR(sbi->meta_inode); 516 err = PTR_ERR(sbi->meta_inode);
481 goto free_sb_buf; 517 goto free_sb_buf;
482 } 518 }
483 519
484 err = get_valid_checkpoint(sbi); 520 err = get_valid_checkpoint(sbi);
485 if (err) 521 if (err) {
522 f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint");
486 goto free_meta_inode; 523 goto free_meta_inode;
524 }
487 525
488 /* sanity checking of checkpoint */ 526 /* sanity checking of checkpoint */
489 err = -EINVAL; 527 err = -EINVAL;
490 if (sanity_check_ckpt(raw_super, sbi->ckpt)) 528 if (sanity_check_ckpt(raw_super, sbi->ckpt)) {
529 f2fs_msg(sb, KERN_ERR, "Invalid F2FS checkpoint");
491 goto free_cp; 530 goto free_cp;
531 }
492 532
493 sbi->total_valid_node_count = 533 sbi->total_valid_node_count =
494 le32_to_cpu(sbi->ckpt->valid_node_count); 534 le32_to_cpu(sbi->ckpt->valid_node_count);
@@ -502,25 +542,28 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
502 INIT_LIST_HEAD(&sbi->dir_inode_list); 542 INIT_LIST_HEAD(&sbi->dir_inode_list);
503 spin_lock_init(&sbi->dir_inode_lock); 543 spin_lock_init(&sbi->dir_inode_lock);
504 544
505 /* init super block */
506 if (!sb_set_blocksize(sb, sbi->blocksize))
507 goto free_cp;
508
509 init_orphan_info(sbi); 545 init_orphan_info(sbi);
510 546
511 /* setup f2fs internal modules */ 547 /* setup f2fs internal modules */
512 err = build_segment_manager(sbi); 548 err = build_segment_manager(sbi);
513 if (err) 549 if (err) {
550 f2fs_msg(sb, KERN_ERR,
551 "Failed to initialize F2FS segment manager");
514 goto free_sm; 552 goto free_sm;
553 }
515 err = build_node_manager(sbi); 554 err = build_node_manager(sbi);
516 if (err) 555 if (err) {
556 f2fs_msg(sb, KERN_ERR,
557 "Failed to initialize F2FS node manager");
517 goto free_nm; 558 goto free_nm;
559 }
518 560
519 build_gc_manager(sbi); 561 build_gc_manager(sbi);
520 562
521 /* get an inode for node space */ 563 /* get an inode for node space */
522 sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi)); 564 sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
523 if (IS_ERR(sbi->node_inode)) { 565 if (IS_ERR(sbi->node_inode)) {
566 f2fs_msg(sb, KERN_ERR, "Failed to read node inode");
524 err = PTR_ERR(sbi->node_inode); 567 err = PTR_ERR(sbi->node_inode);
525 goto free_nm; 568 goto free_nm;
526 } 569 }
@@ -533,6 +576,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
533 /* read root inode and dentry */ 576 /* read root inode and dentry */
534 root = f2fs_iget(sb, F2FS_ROOT_INO(sbi)); 577 root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
535 if (IS_ERR(root)) { 578 if (IS_ERR(root)) {
579 f2fs_msg(sb, KERN_ERR, "Failed to read root inode");
536 err = PTR_ERR(root); 580 err = PTR_ERR(root);
537 goto free_node_inode; 581 goto free_node_inode;
538 } 582 }
@@ -596,7 +640,7 @@ static struct file_system_type f2fs_fs_type = {
596 .fs_flags = FS_REQUIRES_DEV, 640 .fs_flags = FS_REQUIRES_DEV,
597}; 641};
598 642
599static int init_inodecache(void) 643static int __init init_inodecache(void)
600{ 644{
601 f2fs_inode_cachep = f2fs_kmem_cache_create("f2fs_inode_cache", 645 f2fs_inode_cachep = f2fs_kmem_cache_create("f2fs_inode_cache",
602 sizeof(struct f2fs_inode_info), NULL); 646 sizeof(struct f2fs_inode_info), NULL);
@@ -631,14 +675,17 @@ static int __init init_f2fs_fs(void)
631 err = create_checkpoint_caches(); 675 err = create_checkpoint_caches();
632 if (err) 676 if (err)
633 goto fail; 677 goto fail;
634 return register_filesystem(&f2fs_fs_type); 678 err = register_filesystem(&f2fs_fs_type);
679 if (err)
680 goto fail;
681 f2fs_create_root_stats();
635fail: 682fail:
636 return err; 683 return err;
637} 684}
638 685
639static void __exit exit_f2fs_fs(void) 686static void __exit exit_f2fs_fs(void)
640{ 687{
641 destroy_root_stats(); 688 f2fs_destroy_root_stats();
642 unregister_filesystem(&f2fs_fs_type); 689 unregister_filesystem(&f2fs_fs_type);
643 destroy_checkpoint_caches(); 690 destroy_checkpoint_caches();
644 destroy_gc_caches(); 691 destroy_gc_caches();
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index 940136a3d3a6..8038c0496504 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -318,6 +318,8 @@ int f2fs_setxattr(struct inode *inode, int name_index, const char *name,
318 if (name_len > 255 || value_len > MAX_VALUE_LEN) 318 if (name_len > 255 || value_len > MAX_VALUE_LEN)
319 return -ERANGE; 319 return -ERANGE;
320 320
321 f2fs_balance_fs(sbi);
322
321 mutex_lock_op(sbi, NODE_NEW); 323 mutex_lock_op(sbi, NODE_NEW);
322 if (!fi->i_xattr_nid) { 324 if (!fi->i_xattr_nid) {
323 /* Allocate new attribute block */ 325 /* Allocate new attribute block */
diff --git a/fs/fuse/Kconfig b/fs/fuse/Kconfig
index 0cf160a94eda..1b2f6c2c3aaf 100644
--- a/fs/fuse/Kconfig
+++ b/fs/fuse/Kconfig
@@ -4,12 +4,24 @@ config FUSE_FS
4 With FUSE it is possible to implement a fully functional filesystem 4 With FUSE it is possible to implement a fully functional filesystem
5 in a userspace program. 5 in a userspace program.
6 6
7 There's also companion library: libfuse. This library along with 7 There's also a companion library: libfuse2. This library is available
8 utilities is available from the FUSE homepage: 8 from the FUSE homepage:
9 <http://fuse.sourceforge.net/> 9 <http://fuse.sourceforge.net/>
10 although chances are your distribution already has that library
11 installed if you've installed the "fuse" package itself.
10 12
11 See <file:Documentation/filesystems/fuse.txt> for more information. 13 See <file:Documentation/filesystems/fuse.txt> for more information.
12 See <file:Documentation/Changes> for needed library/utility version. 14 See <file:Documentation/Changes> for needed library/utility version.
13 15
14 If you want to develop a userspace FS, or if you want to use 16 If you want to develop a userspace FS, or if you want to use
15 a filesystem based on FUSE, answer Y or M. 17 a filesystem based on FUSE, answer Y or M.
18
19config CUSE
20 tristate "Character device in Userspace support"
21 depends on FUSE_FS
22 help
23 This FUSE extension allows character devices to be
24 implemented in userspace.
25
26 If you want to develop or use a userspace character device
27 based on CUSE, answer Y or M.
diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
index ee8d55042298..e397b675b029 100644
--- a/fs/fuse/cuse.c
+++ b/fs/fuse/cuse.c
@@ -45,7 +45,6 @@
45#include <linux/miscdevice.h> 45#include <linux/miscdevice.h>
46#include <linux/mutex.h> 46#include <linux/mutex.h>
47#include <linux/slab.h> 47#include <linux/slab.h>
48#include <linux/spinlock.h>
49#include <linux/stat.h> 48#include <linux/stat.h>
50#include <linux/module.h> 49#include <linux/module.h>
51 50
@@ -63,7 +62,7 @@ struct cuse_conn {
63 bool unrestricted_ioctl; 62 bool unrestricted_ioctl;
64}; 63};
65 64
66static DEFINE_SPINLOCK(cuse_lock); /* protects cuse_conntbl */ 65static DEFINE_MUTEX(cuse_lock); /* protects registration */
67static struct list_head cuse_conntbl[CUSE_CONNTBL_LEN]; 66static struct list_head cuse_conntbl[CUSE_CONNTBL_LEN];
68static struct class *cuse_class; 67static struct class *cuse_class;
69 68
@@ -114,14 +113,14 @@ static int cuse_open(struct inode *inode, struct file *file)
114 int rc; 113 int rc;
115 114
116 /* look up and get the connection */ 115 /* look up and get the connection */
117 spin_lock(&cuse_lock); 116 mutex_lock(&cuse_lock);
118 list_for_each_entry(pos, cuse_conntbl_head(devt), list) 117 list_for_each_entry(pos, cuse_conntbl_head(devt), list)
119 if (pos->dev->devt == devt) { 118 if (pos->dev->devt == devt) {
120 fuse_conn_get(&pos->fc); 119 fuse_conn_get(&pos->fc);
121 cc = pos; 120 cc = pos;
122 break; 121 break;
123 } 122 }
124 spin_unlock(&cuse_lock); 123 mutex_unlock(&cuse_lock);
125 124
126 /* dead? */ 125 /* dead? */
127 if (!cc) 126 if (!cc)
@@ -267,7 +266,7 @@ static int cuse_parse_one(char **pp, char *end, char **keyp, char **valp)
267static int cuse_parse_devinfo(char *p, size_t len, struct cuse_devinfo *devinfo) 266static int cuse_parse_devinfo(char *p, size_t len, struct cuse_devinfo *devinfo)
268{ 267{
269 char *end = p + len; 268 char *end = p + len;
270 char *key, *val; 269 char *uninitialized_var(key), *uninitialized_var(val);
271 int rc; 270 int rc;
272 271
273 while (true) { 272 while (true) {
@@ -305,14 +304,14 @@ static void cuse_gendev_release(struct device *dev)
305 */ 304 */
306static void cuse_process_init_reply(struct fuse_conn *fc, struct fuse_req *req) 305static void cuse_process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
307{ 306{
308 struct cuse_conn *cc = fc_to_cc(fc); 307 struct cuse_conn *cc = fc_to_cc(fc), *pos;
309 struct cuse_init_out *arg = req->out.args[0].value; 308 struct cuse_init_out *arg = req->out.args[0].value;
310 struct page *page = req->pages[0]; 309 struct page *page = req->pages[0];
311 struct cuse_devinfo devinfo = { }; 310 struct cuse_devinfo devinfo = { };
312 struct device *dev; 311 struct device *dev;
313 struct cdev *cdev; 312 struct cdev *cdev;
314 dev_t devt; 313 dev_t devt;
315 int rc; 314 int rc, i;
316 315
317 if (req->out.h.error || 316 if (req->out.h.error ||
318 arg->major != FUSE_KERNEL_VERSION || arg->minor < 11) { 317 arg->major != FUSE_KERNEL_VERSION || arg->minor < 11) {
@@ -356,15 +355,24 @@ static void cuse_process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
356 dev_set_drvdata(dev, cc); 355 dev_set_drvdata(dev, cc);
357 dev_set_name(dev, "%s", devinfo.name); 356 dev_set_name(dev, "%s", devinfo.name);
358 357
358 mutex_lock(&cuse_lock);
359
360 /* make sure the device-name is unique */
361 for (i = 0; i < CUSE_CONNTBL_LEN; ++i) {
362 list_for_each_entry(pos, &cuse_conntbl[i], list)
363 if (!strcmp(dev_name(pos->dev), dev_name(dev)))
364 goto err_unlock;
365 }
366
359 rc = device_add(dev); 367 rc = device_add(dev);
360 if (rc) 368 if (rc)
361 goto err_device; 369 goto err_unlock;
362 370
363 /* register cdev */ 371 /* register cdev */
364 rc = -ENOMEM; 372 rc = -ENOMEM;
365 cdev = cdev_alloc(); 373 cdev = cdev_alloc();
366 if (!cdev) 374 if (!cdev)
367 goto err_device; 375 goto err_unlock;
368 376
369 cdev->owner = THIS_MODULE; 377 cdev->owner = THIS_MODULE;
370 cdev->ops = &cuse_frontend_fops; 378 cdev->ops = &cuse_frontend_fops;
@@ -377,9 +385,8 @@ static void cuse_process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
377 cc->cdev = cdev; 385 cc->cdev = cdev;
378 386
379 /* make the device available */ 387 /* make the device available */
380 spin_lock(&cuse_lock);
381 list_add(&cc->list, cuse_conntbl_head(devt)); 388 list_add(&cc->list, cuse_conntbl_head(devt));
382 spin_unlock(&cuse_lock); 389 mutex_unlock(&cuse_lock);
383 390
384 /* announce device availability */ 391 /* announce device availability */
385 dev_set_uevent_suppress(dev, 0); 392 dev_set_uevent_suppress(dev, 0);
@@ -391,7 +398,8 @@ out:
391 398
392err_cdev: 399err_cdev:
393 cdev_del(cdev); 400 cdev_del(cdev);
394err_device: 401err_unlock:
402 mutex_unlock(&cuse_lock);
395 put_device(dev); 403 put_device(dev);
396err_region: 404err_region:
397 unregister_chrdev_region(devt, 1); 405 unregister_chrdev_region(devt, 1);
@@ -520,9 +528,9 @@ static int cuse_channel_release(struct inode *inode, struct file *file)
520 int rc; 528 int rc;
521 529
522 /* remove from the conntbl, no more access from this point on */ 530 /* remove from the conntbl, no more access from this point on */
523 spin_lock(&cuse_lock); 531 mutex_lock(&cuse_lock);
524 list_del_init(&cc->list); 532 list_del_init(&cc->list);
525 spin_unlock(&cuse_lock); 533 mutex_unlock(&cuse_lock);
526 534
527 /* remove device */ 535 /* remove device */
528 if (cc->dev) 536 if (cc->dev)
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index c16335315e5d..e83351aa5bad 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -692,8 +692,6 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
692 struct page *oldpage = *pagep; 692 struct page *oldpage = *pagep;
693 struct page *newpage; 693 struct page *newpage;
694 struct pipe_buffer *buf = cs->pipebufs; 694 struct pipe_buffer *buf = cs->pipebufs;
695 struct address_space *mapping;
696 pgoff_t index;
697 695
698 unlock_request(cs->fc, cs->req); 696 unlock_request(cs->fc, cs->req);
699 fuse_copy_finish(cs); 697 fuse_copy_finish(cs);
@@ -724,9 +722,6 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
724 if (fuse_check_page(newpage) != 0) 722 if (fuse_check_page(newpage) != 0)
725 goto out_fallback_unlock; 723 goto out_fallback_unlock;
726 724
727 mapping = oldpage->mapping;
728 index = oldpage->index;
729
730 /* 725 /*
731 * This is a new and locked page, it shouldn't be mapped or 726 * This is a new and locked page, it shouldn't be mapped or
732 * have any special flags on it 727 * have any special flags on it
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index e21d4d8f87e3..f3ab824fa302 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -2177,8 +2177,8 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
2177 return ret; 2177 return ret;
2178} 2178}
2179 2179
2180long fuse_file_fallocate(struct file *file, int mode, loff_t offset, 2180static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
2181 loff_t length) 2181 loff_t length)
2182{ 2182{
2183 struct fuse_file *ff = file->private_data; 2183 struct fuse_file *ff = file->private_data;
2184 struct fuse_conn *fc = ff->fc; 2184 struct fuse_conn *fc = ff->fc;
@@ -2213,7 +2213,6 @@ long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
2213 2213
2214 return err; 2214 return err;
2215} 2215}
2216EXPORT_SYMBOL_GPL(fuse_file_fallocate);
2217 2216
2218static const struct file_operations fuse_file_operations = { 2217static const struct file_operations fuse_file_operations = {
2219 .llseek = fuse_file_llseek, 2218 .llseek = fuse_file_llseek,