aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-03-31 20:58:48 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-03-31 20:58:48 -0400
commitfe8e12b5032536d37751c47e1c0446f17e974e5c (patch)
tree279c47182394244da6fd87a33031117dd459bbe1
parentf9799ad21b5e4a41633f54dfab407ebb37abbd8a (diff)
parent41a75a6eb2bf4e75e0d93862171bebb3ca4efec7 (diff)
Merge branch 'for-linus-4.11' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs
Pull btrfs fixes from Chris Mason: "We have three small fixes queued up in my for-linus-4.11 branch" * 'for-linus-4.11' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs: Btrfs: fix an integer overflow check btrfs: Change qgroup_meta_rsv to 64bit Btrfs: bring back repair during read
-rw-r--r--fs/btrfs/ctree.h2
-rw-r--r--fs/btrfs/disk-io.c2
-rw-r--r--fs/btrfs/extent_io.c46
-rw-r--r--fs/btrfs/inode.c6
-rw-r--r--fs/btrfs/qgroup.c10
-rw-r--r--fs/btrfs/send.c7
6 files changed, 44 insertions, 29 deletions
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 29b7fc28c607..c4115901d906 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1259,7 +1259,7 @@ struct btrfs_root {
1259 atomic_t will_be_snapshoted; 1259 atomic_t will_be_snapshoted;
1260 1260
1261 /* For qgroup metadata space reserve */ 1261 /* For qgroup metadata space reserve */
1262 atomic_t qgroup_meta_rsv; 1262 atomic64_t qgroup_meta_rsv;
1263}; 1263};
1264static inline u32 btrfs_inode_sectorsize(const struct inode *inode) 1264static inline u32 btrfs_inode_sectorsize(const struct inode *inode)
1265{ 1265{
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 08b74daf35d0..eb1ee7b6f532 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1342,7 +1342,7 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
1342 atomic_set(&root->orphan_inodes, 0); 1342 atomic_set(&root->orphan_inodes, 0);
1343 atomic_set(&root->refs, 1); 1343 atomic_set(&root->refs, 1);
1344 atomic_set(&root->will_be_snapshoted, 0); 1344 atomic_set(&root->will_be_snapshoted, 0);
1345 atomic_set(&root->qgroup_meta_rsv, 0); 1345 atomic64_set(&root->qgroup_meta_rsv, 0);
1346 root->log_transid = 0; 1346 root->log_transid = 0;
1347 root->log_transid_committed = -1; 1347 root->log_transid_committed = -1;
1348 root->last_log_commit = 0; 1348 root->last_log_commit = 0;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 8df797432740..27fdb250b446 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2584,26 +2584,36 @@ static void end_bio_extent_readpage(struct bio *bio)
2584 2584
2585 if (tree->ops) { 2585 if (tree->ops) {
2586 ret = tree->ops->readpage_io_failed_hook(page, mirror); 2586 ret = tree->ops->readpage_io_failed_hook(page, mirror);
2587 if (!ret && !bio->bi_error) 2587 if (ret == -EAGAIN) {
2588 uptodate = 1; 2588 /*
2589 } else { 2589 * Data inode's readpage_io_failed_hook() always
2590 * returns -EAGAIN.
2591 *
2592 * The generic bio_readpage_error handles errors
2593 * the following way: If possible, new read
2594 * requests are created and submitted and will
2595 * end up in end_bio_extent_readpage as well (if
2596 * we're lucky, not in the !uptodate case). In
2597 * that case it returns 0 and we just go on with
2598 * the next page in our bio. If it can't handle
2599 * the error it will return -EIO and we remain
2600 * responsible for that page.
2601 */
2602 ret = bio_readpage_error(bio, offset, page,
2603 start, end, mirror);
2604 if (ret == 0) {
2605 uptodate = !bio->bi_error;
2606 offset += len;
2607 continue;
2608 }
2609 }
2610
2590 /* 2611 /*
2591 * The generic bio_readpage_error handles errors the 2612 * metadata's readpage_io_failed_hook() always returns
2592 * following way: If possible, new read requests are 2613 * -EIO and fixes nothing. -EIO is also returned if
2593 * created and submitted and will end up in 2614 * data inode error could not be fixed.
2594 * end_bio_extent_readpage as well (if we're lucky, not
2595 * in the !uptodate case). In that case it returns 0 and
2596 * we just go on with the next page in our bio. If it
2597 * can't handle the error it will return -EIO and we
2598 * remain responsible for that page.
2599 */ 2615 */
2600 ret = bio_readpage_error(bio, offset, page, start, end, 2616 ASSERT(ret == -EIO);
2601 mirror);
2602 if (ret == 0) {
2603 uptodate = !bio->bi_error;
2604 offset += len;
2605 continue;
2606 }
2607 } 2617 }
2608readpage_ok: 2618readpage_ok:
2609 if (likely(uptodate)) { 2619 if (likely(uptodate)) {
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 231503935652..a18510be76c1 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -10523,9 +10523,9 @@ out_inode:
10523} 10523}
10524 10524
10525__attribute__((const)) 10525__attribute__((const))
10526static int dummy_readpage_io_failed_hook(struct page *page, int failed_mirror) 10526static int btrfs_readpage_io_failed_hook(struct page *page, int failed_mirror)
10527{ 10527{
10528 return 0; 10528 return -EAGAIN;
10529} 10529}
10530 10530
10531static const struct inode_operations btrfs_dir_inode_operations = { 10531static const struct inode_operations btrfs_dir_inode_operations = {
@@ -10570,7 +10570,7 @@ static const struct extent_io_ops btrfs_extent_io_ops = {
10570 .submit_bio_hook = btrfs_submit_bio_hook, 10570 .submit_bio_hook = btrfs_submit_bio_hook,
10571 .readpage_end_io_hook = btrfs_readpage_end_io_hook, 10571 .readpage_end_io_hook = btrfs_readpage_end_io_hook,
10572 .merge_bio_hook = btrfs_merge_bio_hook, 10572 .merge_bio_hook = btrfs_merge_bio_hook,
10573 .readpage_io_failed_hook = dummy_readpage_io_failed_hook, 10573 .readpage_io_failed_hook = btrfs_readpage_io_failed_hook,
10574 10574
10575 /* optional callbacks */ 10575 /* optional callbacks */
10576 .fill_delalloc = run_delalloc_range, 10576 .fill_delalloc = run_delalloc_range,
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index a5da750c1087..a59801dc2a34 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -2948,20 +2948,20 @@ int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
2948 ret = qgroup_reserve(root, num_bytes, enforce); 2948 ret = qgroup_reserve(root, num_bytes, enforce);
2949 if (ret < 0) 2949 if (ret < 0)
2950 return ret; 2950 return ret;
2951 atomic_add(num_bytes, &root->qgroup_meta_rsv); 2951 atomic64_add(num_bytes, &root->qgroup_meta_rsv);
2952 return ret; 2952 return ret;
2953} 2953}
2954 2954
2955void btrfs_qgroup_free_meta_all(struct btrfs_root *root) 2955void btrfs_qgroup_free_meta_all(struct btrfs_root *root)
2956{ 2956{
2957 struct btrfs_fs_info *fs_info = root->fs_info; 2957 struct btrfs_fs_info *fs_info = root->fs_info;
2958 int reserved; 2958 u64 reserved;
2959 2959
2960 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) || 2960 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
2961 !is_fstree(root->objectid)) 2961 !is_fstree(root->objectid))
2962 return; 2962 return;
2963 2963
2964 reserved = atomic_xchg(&root->qgroup_meta_rsv, 0); 2964 reserved = atomic64_xchg(&root->qgroup_meta_rsv, 0);
2965 if (reserved == 0) 2965 if (reserved == 0)
2966 return; 2966 return;
2967 btrfs_qgroup_free_refroot(fs_info, root->objectid, reserved); 2967 btrfs_qgroup_free_refroot(fs_info, root->objectid, reserved);
@@ -2976,8 +2976,8 @@ void btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes)
2976 return; 2976 return;
2977 2977
2978 BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize)); 2978 BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
2979 WARN_ON(atomic_read(&root->qgroup_meta_rsv) < num_bytes); 2979 WARN_ON(atomic64_read(&root->qgroup_meta_rsv) < num_bytes);
2980 atomic_sub(num_bytes, &root->qgroup_meta_rsv); 2980 atomic64_sub(num_bytes, &root->qgroup_meta_rsv);
2981 btrfs_qgroup_free_refroot(fs_info, root->objectid, num_bytes); 2981 btrfs_qgroup_free_refroot(fs_info, root->objectid, num_bytes);
2982} 2982}
2983 2983
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 456c8901489b..a60d5bfb8a49 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -6305,8 +6305,13 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
6305 goto out; 6305 goto out;
6306 } 6306 }
6307 6307
6308 /*
6309 * Check that we don't overflow at later allocations, we request
6310 * clone_sources_count + 1 items, and compare to unsigned long inside
6311 * access_ok.
6312 */
6308 if (arg->clone_sources_count > 6313 if (arg->clone_sources_count >
6309 ULLONG_MAX / sizeof(*arg->clone_sources)) { 6314 ULONG_MAX / sizeof(struct clone_root) - 1) {
6310 ret = -EINVAL; 6315 ret = -EINVAL;
6311 goto out; 6316 goto out;
6312 } 6317 }