aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/btrfs/backref.c2
-rw-r--r--fs/btrfs/check-integrity.c2
-rw-r--r--fs/btrfs/compression.c2
-rw-r--r--fs/btrfs/ctree.h2
-rw-r--r--fs/btrfs/disk-io.c12
-rw-r--r--fs/btrfs/extent-tree.c51
-rw-r--r--fs/btrfs/extent_io.c113
-rw-r--r--fs/btrfs/extent_io.h1
-rw-r--r--fs/btrfs/extent_map.h4
-rw-r--r--fs/btrfs/file.c29
-rw-r--r--fs/btrfs/free-space-cache.c1
-rw-r--r--fs/btrfs/inode-map.c6
-rw-r--r--fs/btrfs/inode.c40
-rw-r--r--fs/btrfs/ioctl.c59
-rw-r--r--fs/btrfs/scrub.c8
-rw-r--r--fs/btrfs/transaction.c16
-rw-r--r--fs/btrfs/volumes.c33
17 files changed, 250 insertions, 131 deletions
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 633c701a287d..98f6bf10bbd4 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -892,6 +892,8 @@ static char *iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
892 if (eb != eb_in) 892 if (eb != eb_in)
893 free_extent_buffer(eb); 893 free_extent_buffer(eb);
894 ret = inode_ref_info(parent, 0, fs_root, path, &found_key); 894 ret = inode_ref_info(parent, 0, fs_root, path, &found_key);
895 if (ret > 0)
896 ret = -ENOENT;
895 if (ret) 897 if (ret)
896 break; 898 break;
897 next_inum = found_key.offset; 899 next_inum = found_key.offset;
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index b669a7d8e499..d986824bb2b4 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -644,7 +644,7 @@ static struct btrfsic_dev_state *btrfsic_dev_state_hashtable_lookup(
644static int btrfsic_process_superblock(struct btrfsic_state *state, 644static int btrfsic_process_superblock(struct btrfsic_state *state,
645 struct btrfs_fs_devices *fs_devices) 645 struct btrfs_fs_devices *fs_devices)
646{ 646{
647 int ret; 647 int ret = 0;
648 struct btrfs_super_block *selected_super; 648 struct btrfs_super_block *selected_super;
649 struct list_head *dev_head = &fs_devices->devices; 649 struct list_head *dev_head = &fs_devices->devices;
650 struct btrfs_device *device; 650 struct btrfs_device *device;
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 14f1c5a0b2d2..d02c27cd14c7 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -588,6 +588,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
588 page_offset(bio->bi_io_vec->bv_page), 588 page_offset(bio->bi_io_vec->bv_page),
589 PAGE_CACHE_SIZE); 589 PAGE_CACHE_SIZE);
590 read_unlock(&em_tree->lock); 590 read_unlock(&em_tree->lock);
591 if (!em)
592 return -EIO;
591 593
592 compressed_len = em->block_len; 594 compressed_len = em->block_len;
593 cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); 595 cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 27ebe61d3ccc..80b6486fd5e6 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -886,7 +886,7 @@ struct btrfs_block_rsv {
886 u64 reserved; 886 u64 reserved;
887 struct btrfs_space_info *space_info; 887 struct btrfs_space_info *space_info;
888 spinlock_t lock; 888 spinlock_t lock;
889 unsigned int full:1; 889 unsigned int full;
890}; 890};
891 891
892/* 892/*
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 811d9f918b1c..534266fe505f 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -2260,6 +2260,12 @@ int open_ctree(struct super_block *sb,
2260 goto fail_sb_buffer; 2260 goto fail_sb_buffer;
2261 } 2261 }
2262 2262
2263 if (sectorsize < PAGE_SIZE) {
2264 printk(KERN_WARNING "btrfs: Incompatible sector size "
2265 "found on %s\n", sb->s_id);
2266 goto fail_sb_buffer;
2267 }
2268
2263 mutex_lock(&fs_info->chunk_mutex); 2269 mutex_lock(&fs_info->chunk_mutex);
2264 ret = btrfs_read_sys_array(tree_root); 2270 ret = btrfs_read_sys_array(tree_root);
2265 mutex_unlock(&fs_info->chunk_mutex); 2271 mutex_unlock(&fs_info->chunk_mutex);
@@ -2301,6 +2307,12 @@ int open_ctree(struct super_block *sb,
2301 2307
2302 btrfs_close_extra_devices(fs_devices); 2308 btrfs_close_extra_devices(fs_devices);
2303 2309
2310 if (!fs_devices->latest_bdev) {
2311 printk(KERN_CRIT "btrfs: failed to read devices on %s\n",
2312 sb->s_id);
2313 goto fail_tree_roots;
2314 }
2315
2304retry_root_backup: 2316retry_root_backup:
2305 blocksize = btrfs_level_size(tree_root, 2317 blocksize = btrfs_level_size(tree_root,
2306 btrfs_super_root_level(disk_super)); 2318 btrfs_super_root_level(disk_super));
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 283af7a676a3..37e0a800d34e 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3312,7 +3312,8 @@ commit_trans:
3312 } 3312 }
3313 data_sinfo->bytes_may_use += bytes; 3313 data_sinfo->bytes_may_use += bytes;
3314 trace_btrfs_space_reservation(root->fs_info, "space_info", 3314 trace_btrfs_space_reservation(root->fs_info, "space_info",
3315 (u64)data_sinfo, bytes, 1); 3315 (u64)(unsigned long)data_sinfo,
3316 bytes, 1);
3316 spin_unlock(&data_sinfo->lock); 3317 spin_unlock(&data_sinfo->lock);
3317 3318
3318 return 0; 3319 return 0;
@@ -3333,7 +3334,8 @@ void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
3333 spin_lock(&data_sinfo->lock); 3334 spin_lock(&data_sinfo->lock);
3334 data_sinfo->bytes_may_use -= bytes; 3335 data_sinfo->bytes_may_use -= bytes;
3335 trace_btrfs_space_reservation(root->fs_info, "space_info", 3336 trace_btrfs_space_reservation(root->fs_info, "space_info",
3336 (u64)data_sinfo, bytes, 0); 3337 (u64)(unsigned long)data_sinfo,
3338 bytes, 0);
3337 spin_unlock(&data_sinfo->lock); 3339 spin_unlock(&data_sinfo->lock);
3338} 3340}
3339 3341
@@ -3611,12 +3613,15 @@ static int may_commit_transaction(struct btrfs_root *root,
3611 if (space_info != delayed_rsv->space_info) 3613 if (space_info != delayed_rsv->space_info)
3612 return -ENOSPC; 3614 return -ENOSPC;
3613 3615
3616 spin_lock(&space_info->lock);
3614 spin_lock(&delayed_rsv->lock); 3617 spin_lock(&delayed_rsv->lock);
3615 if (delayed_rsv->size < bytes) { 3618 if (space_info->bytes_pinned + delayed_rsv->size < bytes) {
3616 spin_unlock(&delayed_rsv->lock); 3619 spin_unlock(&delayed_rsv->lock);
3620 spin_unlock(&space_info->lock);
3617 return -ENOSPC; 3621 return -ENOSPC;
3618 } 3622 }
3619 spin_unlock(&delayed_rsv->lock); 3623 spin_unlock(&delayed_rsv->lock);
3624 spin_unlock(&space_info->lock);
3620 3625
3621commit: 3626commit:
3622 trans = btrfs_join_transaction(root); 3627 trans = btrfs_join_transaction(root);
@@ -3695,9 +3700,9 @@ again:
3695 if (used + orig_bytes <= space_info->total_bytes) { 3700 if (used + orig_bytes <= space_info->total_bytes) {
3696 space_info->bytes_may_use += orig_bytes; 3701 space_info->bytes_may_use += orig_bytes;
3697 trace_btrfs_space_reservation(root->fs_info, 3702 trace_btrfs_space_reservation(root->fs_info,
3698 "space_info", 3703 "space_info",
3699 (u64)space_info, 3704 (u64)(unsigned long)space_info,
3700 orig_bytes, 1); 3705 orig_bytes, 1);
3701 ret = 0; 3706 ret = 0;
3702 } else { 3707 } else {
3703 /* 3708 /*
@@ -3766,9 +3771,9 @@ again:
3766 if (used + num_bytes < space_info->total_bytes + avail) { 3771 if (used + num_bytes < space_info->total_bytes + avail) {
3767 space_info->bytes_may_use += orig_bytes; 3772 space_info->bytes_may_use += orig_bytes;
3768 trace_btrfs_space_reservation(root->fs_info, 3773 trace_btrfs_space_reservation(root->fs_info,
3769 "space_info", 3774 "space_info",
3770 (u64)space_info, 3775 (u64)(unsigned long)space_info,
3771 orig_bytes, 1); 3776 orig_bytes, 1);
3772 ret = 0; 3777 ret = 0;
3773 } else { 3778 } else {
3774 wait_ordered = true; 3779 wait_ordered = true;
@@ -3913,8 +3918,8 @@ static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
3913 spin_lock(&space_info->lock); 3918 spin_lock(&space_info->lock);
3914 space_info->bytes_may_use -= num_bytes; 3919 space_info->bytes_may_use -= num_bytes;
3915 trace_btrfs_space_reservation(fs_info, "space_info", 3920 trace_btrfs_space_reservation(fs_info, "space_info",
3916 (u64)space_info, 3921 (u64)(unsigned long)space_info,
3917 num_bytes, 0); 3922 num_bytes, 0);
3918 space_info->reservation_progress++; 3923 space_info->reservation_progress++;
3919 spin_unlock(&space_info->lock); 3924 spin_unlock(&space_info->lock);
3920 } 3925 }
@@ -4105,7 +4110,7 @@ static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
4105 num_bytes += div64_u64(data_used + meta_used, 50); 4110 num_bytes += div64_u64(data_used + meta_used, 50);
4106 4111
4107 if (num_bytes * 3 > meta_used) 4112 if (num_bytes * 3 > meta_used)
4108 num_bytes = div64_u64(meta_used, 3); 4113 num_bytes = div64_u64(meta_used, 3) * 2;
4109 4114
4110 return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10); 4115 return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
4111} 4116}
@@ -4132,14 +4137,14 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
4132 block_rsv->reserved += num_bytes; 4137 block_rsv->reserved += num_bytes;
4133 sinfo->bytes_may_use += num_bytes; 4138 sinfo->bytes_may_use += num_bytes;
4134 trace_btrfs_space_reservation(fs_info, "space_info", 4139 trace_btrfs_space_reservation(fs_info, "space_info",
4135 (u64)sinfo, num_bytes, 1); 4140 (u64)(unsigned long)sinfo, num_bytes, 1);
4136 } 4141 }
4137 4142
4138 if (block_rsv->reserved >= block_rsv->size) { 4143 if (block_rsv->reserved >= block_rsv->size) {
4139 num_bytes = block_rsv->reserved - block_rsv->size; 4144 num_bytes = block_rsv->reserved - block_rsv->size;
4140 sinfo->bytes_may_use -= num_bytes; 4145 sinfo->bytes_may_use -= num_bytes;
4141 trace_btrfs_space_reservation(fs_info, "space_info", 4146 trace_btrfs_space_reservation(fs_info, "space_info",
4142 (u64)sinfo, num_bytes, 0); 4147 (u64)(unsigned long)sinfo, num_bytes, 0);
4143 sinfo->reservation_progress++; 4148 sinfo->reservation_progress++;
4144 block_rsv->reserved = block_rsv->size; 4149 block_rsv->reserved = block_rsv->size;
4145 block_rsv->full = 1; 4150 block_rsv->full = 1;
@@ -4192,7 +4197,8 @@ void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
4192 if (!trans->bytes_reserved) 4197 if (!trans->bytes_reserved)
4193 return; 4198 return;
4194 4199
4195 trace_btrfs_space_reservation(root->fs_info, "transaction", (u64)trans, 4200 trace_btrfs_space_reservation(root->fs_info, "transaction",
4201 (u64)(unsigned long)trans,
4196 trans->bytes_reserved, 0); 4202 trans->bytes_reserved, 0);
4197 btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved); 4203 btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
4198 trans->bytes_reserved = 0; 4204 trans->bytes_reserved = 0;
@@ -4710,9 +4716,9 @@ static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
4710 space_info->bytes_reserved += num_bytes; 4716 space_info->bytes_reserved += num_bytes;
4711 if (reserve == RESERVE_ALLOC) { 4717 if (reserve == RESERVE_ALLOC) {
4712 trace_btrfs_space_reservation(cache->fs_info, 4718 trace_btrfs_space_reservation(cache->fs_info,
4713 "space_info", 4719 "space_info",
4714 (u64)space_info, 4720 (u64)(unsigned long)space_info,
4715 num_bytes, 0); 4721 num_bytes, 0);
4716 space_info->bytes_may_use -= num_bytes; 4722 space_info->bytes_may_use -= num_bytes;
4717 } 4723 }
4718 } 4724 }
@@ -7886,9 +7892,16 @@ int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
7886 u64 start; 7892 u64 start;
7887 u64 end; 7893 u64 end;
7888 u64 trimmed = 0; 7894 u64 trimmed = 0;
7895 u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
7889 int ret = 0; 7896 int ret = 0;
7890 7897
7891 cache = btrfs_lookup_block_group(fs_info, range->start); 7898 /*
7899 * try to trim all FS space, our block group may start from non-zero.
7900 */
7901 if (range->len == total_bytes)
7902 cache = btrfs_lookup_first_block_group(fs_info, range->start);
7903 else
7904 cache = btrfs_lookup_block_group(fs_info, range->start);
7892 7905
7893 while (cache) { 7906 while (cache) {
7894 if (cache->key.objectid >= (range->start + range->len)) { 7907 if (cache->key.objectid >= (range->start + range->len)) {
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index fcf77e1ded40..a55fbe6252de 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -513,6 +513,15 @@ hit_next:
513 WARN_ON(state->end < start); 513 WARN_ON(state->end < start);
514 last_end = state->end; 514 last_end = state->end;
515 515
516 if (state->end < end && !need_resched())
517 next_node = rb_next(&state->rb_node);
518 else
519 next_node = NULL;
520
521 /* the state doesn't have the wanted bits, go ahead */
522 if (!(state->state & bits))
523 goto next;
524
516 /* 525 /*
517 * | ---- desired range ---- | 526 * | ---- desired range ---- |
518 * | state | or 527 * | state | or
@@ -565,20 +574,15 @@ hit_next:
565 goto out; 574 goto out;
566 } 575 }
567 576
568 if (state->end < end && prealloc && !need_resched())
569 next_node = rb_next(&state->rb_node);
570 else
571 next_node = NULL;
572
573 set |= clear_state_bit(tree, state, &bits, wake); 577 set |= clear_state_bit(tree, state, &bits, wake);
578next:
574 if (last_end == (u64)-1) 579 if (last_end == (u64)-1)
575 goto out; 580 goto out;
576 start = last_end + 1; 581 start = last_end + 1;
577 if (start <= end && next_node) { 582 if (start <= end && next_node) {
578 state = rb_entry(next_node, struct extent_state, 583 state = rb_entry(next_node, struct extent_state,
579 rb_node); 584 rb_node);
580 if (state->start == start) 585 goto hit_next;
581 goto hit_next;
582 } 586 }
583 goto search_again; 587 goto search_again;
584 588
@@ -961,8 +965,6 @@ hit_next:
961 965
962 set_state_bits(tree, state, &bits); 966 set_state_bits(tree, state, &bits);
963 clear_state_bit(tree, state, &clear_bits, 0); 967 clear_state_bit(tree, state, &clear_bits, 0);
964
965 merge_state(tree, state);
966 if (last_end == (u64)-1) 968 if (last_end == (u64)-1)
967 goto out; 969 goto out;
968 970
@@ -1007,7 +1009,6 @@ hit_next:
1007 if (state->end <= end) { 1009 if (state->end <= end) {
1008 set_state_bits(tree, state, &bits); 1010 set_state_bits(tree, state, &bits);
1009 clear_state_bit(tree, state, &clear_bits, 0); 1011 clear_state_bit(tree, state, &clear_bits, 0);
1010 merge_state(tree, state);
1011 if (last_end == (u64)-1) 1012 if (last_end == (u64)-1)
1012 goto out; 1013 goto out;
1013 start = last_end + 1; 1014 start = last_end + 1;
@@ -1068,8 +1069,6 @@ hit_next:
1068 1069
1069 set_state_bits(tree, prealloc, &bits); 1070 set_state_bits(tree, prealloc, &bits);
1070 clear_state_bit(tree, prealloc, &clear_bits, 0); 1071 clear_state_bit(tree, prealloc, &clear_bits, 0);
1071
1072 merge_state(tree, prealloc);
1073 prealloc = NULL; 1072 prealloc = NULL;
1074 goto out; 1073 goto out;
1075 } 1074 }
@@ -2154,13 +2153,46 @@ static int bio_readpage_error(struct bio *failed_bio, struct page *page,
2154 "this_mirror=%d, num_copies=%d, in_validation=%d\n", read_mode, 2153 "this_mirror=%d, num_copies=%d, in_validation=%d\n", read_mode,
2155 failrec->this_mirror, num_copies, failrec->in_validation); 2154 failrec->this_mirror, num_copies, failrec->in_validation);
2156 2155
2157 tree->ops->submit_bio_hook(inode, read_mode, bio, failrec->this_mirror, 2156 ret = tree->ops->submit_bio_hook(inode, read_mode, bio,
2158 failrec->bio_flags, 0); 2157 failrec->this_mirror,
2159 return 0; 2158 failrec->bio_flags, 0);
2159 return ret;
2160} 2160}
2161 2161
2162/* lots and lots of room for performance fixes in the end_bio funcs */ 2162/* lots and lots of room for performance fixes in the end_bio funcs */
2163 2163
2164int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
2165{
2166 int uptodate = (err == 0);
2167 struct extent_io_tree *tree;
2168 int ret;
2169
2170 tree = &BTRFS_I(page->mapping->host)->io_tree;
2171
2172 if (tree->ops && tree->ops->writepage_end_io_hook) {
2173 ret = tree->ops->writepage_end_io_hook(page, start,
2174 end, NULL, uptodate);
2175 if (ret)
2176 uptodate = 0;
2177 }
2178
2179 if (!uptodate && tree->ops &&
2180 tree->ops->writepage_io_failed_hook) {
2181 ret = tree->ops->writepage_io_failed_hook(NULL, page,
2182 start, end, NULL);
2183 /* Writeback already completed */
2184 if (ret == 0)
2185 return 1;
2186 }
2187
2188 if (!uptodate) {
2189 clear_extent_uptodate(tree, start, end, NULL, GFP_NOFS);
2190 ClearPageUptodate(page);
2191 SetPageError(page);
2192 }
2193 return 0;
2194}
2195
2164/* 2196/*
2165 * after a writepage IO is done, we need to: 2197 * after a writepage IO is done, we need to:
2166 * clear the uptodate bits on error 2198 * clear the uptodate bits on error
@@ -2172,13 +2204,11 @@ static int bio_readpage_error(struct bio *failed_bio, struct page *page,
2172 */ 2204 */
2173static void end_bio_extent_writepage(struct bio *bio, int err) 2205static void end_bio_extent_writepage(struct bio *bio, int err)
2174{ 2206{
2175 int uptodate = err == 0;
2176 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; 2207 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
2177 struct extent_io_tree *tree; 2208 struct extent_io_tree *tree;
2178 u64 start; 2209 u64 start;
2179 u64 end; 2210 u64 end;
2180 int whole_page; 2211 int whole_page;
2181 int ret;
2182 2212
2183 do { 2213 do {
2184 struct page *page = bvec->bv_page; 2214 struct page *page = bvec->bv_page;
@@ -2195,28 +2225,9 @@ static void end_bio_extent_writepage(struct bio *bio, int err)
2195 2225
2196 if (--bvec >= bio->bi_io_vec) 2226 if (--bvec >= bio->bi_io_vec)
2197 prefetchw(&bvec->bv_page->flags); 2227 prefetchw(&bvec->bv_page->flags);
2198 if (tree->ops && tree->ops->writepage_end_io_hook) {
2199 ret = tree->ops->writepage_end_io_hook(page, start,
2200 end, NULL, uptodate);
2201 if (ret)
2202 uptodate = 0;
2203 }
2204
2205 if (!uptodate && tree->ops &&
2206 tree->ops->writepage_io_failed_hook) {
2207 ret = tree->ops->writepage_io_failed_hook(bio, page,
2208 start, end, NULL);
2209 if (ret == 0) {
2210 uptodate = (err == 0);
2211 continue;
2212 }
2213 }
2214 2228
2215 if (!uptodate) { 2229 if (end_extent_writepage(page, err, start, end))
2216 clear_extent_uptodate(tree, start, end, NULL, GFP_NOFS); 2230 continue;
2217 ClearPageUptodate(page);
2218 SetPageError(page);
2219 }
2220 2231
2221 if (whole_page) 2232 if (whole_page)
2222 end_page_writeback(page); 2233 end_page_writeback(page);
@@ -2779,9 +2790,12 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2779 delalloc_start = delalloc_end + 1; 2790 delalloc_start = delalloc_end + 1;
2780 continue; 2791 continue;
2781 } 2792 }
2782 tree->ops->fill_delalloc(inode, page, delalloc_start, 2793 ret = tree->ops->fill_delalloc(inode, page,
2783 delalloc_end, &page_started, 2794 delalloc_start,
2784 &nr_written); 2795 delalloc_end,
2796 &page_started,
2797 &nr_written);
2798 BUG_ON(ret);
2785 /* 2799 /*
2786 * delalloc_end is already one less than the total 2800 * delalloc_end is already one less than the total
2787 * length, so we don't subtract one from 2801 * length, so we don't subtract one from
@@ -2818,8 +2832,12 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2818 if (tree->ops && tree->ops->writepage_start_hook) { 2832 if (tree->ops && tree->ops->writepage_start_hook) {
2819 ret = tree->ops->writepage_start_hook(page, start, 2833 ret = tree->ops->writepage_start_hook(page, start,
2820 page_end); 2834 page_end);
2821 if (ret == -EAGAIN) { 2835 if (ret) {
2822 redirty_page_for_writepage(wbc, page); 2836 /* Fixup worker will requeue */
2837 if (ret == -EBUSY)
2838 wbc->pages_skipped++;
2839 else
2840 redirty_page_for_writepage(wbc, page);
2823 update_nr_written(page, wbc, nr_written); 2841 update_nr_written(page, wbc, nr_written);
2824 unlock_page(page); 2842 unlock_page(page);
2825 ret = 0; 2843 ret = 0;
@@ -3289,7 +3307,7 @@ int try_release_extent_mapping(struct extent_map_tree *map,
3289 len = end - start + 1; 3307 len = end - start + 1;
3290 write_lock(&map->lock); 3308 write_lock(&map->lock);
3291 em = lookup_extent_mapping(map, start, len); 3309 em = lookup_extent_mapping(map, start, len);
3292 if (IS_ERR_OR_NULL(em)) { 3310 if (!em) {
3293 write_unlock(&map->lock); 3311 write_unlock(&map->lock);
3294 break; 3312 break;
3295 } 3313 }
@@ -3853,10 +3871,9 @@ int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
3853 num_pages = num_extent_pages(eb->start, eb->len); 3871 num_pages = num_extent_pages(eb->start, eb->len);
3854 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); 3872 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
3855 3873
3856 if (eb_straddles_pages(eb)) { 3874 clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
3857 clear_extent_uptodate(tree, eb->start, eb->start + eb->len - 1, 3875 cached_state, GFP_NOFS);
3858 cached_state, GFP_NOFS); 3876
3859 }
3860 for (i = 0; i < num_pages; i++) { 3877 for (i = 0; i < num_pages; i++) {
3861 page = extent_buffer_page(eb, i); 3878 page = extent_buffer_page(eb, i);
3862 if (page) 3879 if (page)
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index bc6a042cb6fc..cecc3518c121 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -319,4 +319,5 @@ struct btrfs_mapping_tree;
319int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start, 319int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start,
320 u64 length, u64 logical, struct page *page, 320 u64 length, u64 logical, struct page *page,
321 int mirror_num); 321 int mirror_num);
322int end_extent_writepage(struct page *page, int err, u64 start, u64 end);
322#endif 323#endif
diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h
index 33a7890b1f40..1195f09761fe 100644
--- a/fs/btrfs/extent_map.h
+++ b/fs/btrfs/extent_map.h
@@ -26,8 +26,8 @@ struct extent_map {
26 unsigned long flags; 26 unsigned long flags;
27 struct block_device *bdev; 27 struct block_device *bdev;
28 atomic_t refs; 28 atomic_t refs;
29 unsigned int in_tree:1; 29 unsigned int in_tree;
30 unsigned int compress_type:4; 30 unsigned int compress_type;
31}; 31};
32 32
33struct extent_map_tree { 33struct extent_map_tree {
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 859ba2dd8890..e8d06b6b9194 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1605,6 +1605,14 @@ static long btrfs_fallocate(struct file *file, int mode,
1605 return -EOPNOTSUPP; 1605 return -EOPNOTSUPP;
1606 1606
1607 /* 1607 /*
1608 * Make sure we have enough space before we do the
1609 * allocation.
1610 */
1611 ret = btrfs_check_data_free_space(inode, len);
1612 if (ret)
1613 return ret;
1614
1615 /*
1608 * wait for ordered IO before we have any locks. We'll loop again 1616 * wait for ordered IO before we have any locks. We'll loop again
1609 * below with the locks held. 1617 * below with the locks held.
1610 */ 1618 */
@@ -1667,27 +1675,12 @@ static long btrfs_fallocate(struct file *file, int mode,
1667 if (em->block_start == EXTENT_MAP_HOLE || 1675 if (em->block_start == EXTENT_MAP_HOLE ||
1668 (cur_offset >= inode->i_size && 1676 (cur_offset >= inode->i_size &&
1669 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) { 1677 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
1670
1671 /*
1672 * Make sure we have enough space before we do the
1673 * allocation.
1674 */
1675 ret = btrfs_check_data_free_space(inode, last_byte -
1676 cur_offset);
1677 if (ret) {
1678 free_extent_map(em);
1679 break;
1680 }
1681
1682 ret = btrfs_prealloc_file_range(inode, mode, cur_offset, 1678 ret = btrfs_prealloc_file_range(inode, mode, cur_offset,
1683 last_byte - cur_offset, 1679 last_byte - cur_offset,
1684 1 << inode->i_blkbits, 1680 1 << inode->i_blkbits,
1685 offset + len, 1681 offset + len,
1686 &alloc_hint); 1682 &alloc_hint);
1687 1683
1688 /* Let go of our reservation. */
1689 btrfs_free_reserved_data_space(inode, last_byte -
1690 cur_offset);
1691 if (ret < 0) { 1684 if (ret < 0) {
1692 free_extent_map(em); 1685 free_extent_map(em);
1693 break; 1686 break;
@@ -1715,6 +1708,8 @@ static long btrfs_fallocate(struct file *file, int mode,
1715 &cached_state, GFP_NOFS); 1708 &cached_state, GFP_NOFS);
1716out: 1709out:
1717 mutex_unlock(&inode->i_mutex); 1710 mutex_unlock(&inode->i_mutex);
1711 /* Let go of our reservation. */
1712 btrfs_free_reserved_data_space(inode, len);
1718 return ret; 1713 return ret;
1719} 1714}
1720 1715
@@ -1761,7 +1756,7 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int origin)
1761 start - root->sectorsize, 1756 start - root->sectorsize,
1762 root->sectorsize, 0); 1757 root->sectorsize, 0);
1763 if (IS_ERR(em)) { 1758 if (IS_ERR(em)) {
1764 ret = -ENXIO; 1759 ret = PTR_ERR(em);
1765 goto out; 1760 goto out;
1766 } 1761 }
1767 last_end = em->start + em->len; 1762 last_end = em->start + em->len;
@@ -1773,7 +1768,7 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int origin)
1773 while (1) { 1768 while (1) {
1774 em = btrfs_get_extent_fiemap(inode, NULL, 0, start, len, 0); 1769 em = btrfs_get_extent_fiemap(inode, NULL, 0, start, len, 0);
1775 if (IS_ERR(em)) { 1770 if (IS_ERR(em)) {
1776 ret = -ENXIO; 1771 ret = PTR_ERR(em);
1777 break; 1772 break;
1778 } 1773 }
1779 1774
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index c2f20594c9f7..710ea380c7ed 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -777,6 +777,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
777 spin_lock(&block_group->lock); 777 spin_lock(&block_group->lock);
778 if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) { 778 if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
779 spin_unlock(&block_group->lock); 779 spin_unlock(&block_group->lock);
780 btrfs_free_path(path);
780 goto out; 781 goto out;
781 } 782 }
782 spin_unlock(&block_group->lock); 783 spin_unlock(&block_group->lock);
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index 213ffa86ce1b..ee15d88b33d2 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -438,7 +438,8 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
438 trans->bytes_reserved); 438 trans->bytes_reserved);
439 if (ret) 439 if (ret)
440 goto out; 440 goto out;
441 trace_btrfs_space_reservation(root->fs_info, "ino_cache", (u64)trans, 441 trace_btrfs_space_reservation(root->fs_info, "ino_cache",
442 (u64)(unsigned long)trans,
442 trans->bytes_reserved, 1); 443 trans->bytes_reserved, 1);
443again: 444again:
444 inode = lookup_free_ino_inode(root, path); 445 inode = lookup_free_ino_inode(root, path);
@@ -500,7 +501,8 @@ again:
500out_put: 501out_put:
501 iput(inode); 502 iput(inode);
502out_release: 503out_release:
503 trace_btrfs_space_reservation(root->fs_info, "ino_cache", (u64)trans, 504 trace_btrfs_space_reservation(root->fs_info, "ino_cache",
505 (u64)(unsigned long)trans,
504 trans->bytes_reserved, 0); 506 trans->bytes_reserved, 0);
505 btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved); 507 btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
506out: 508out:
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 32214fe0f7e3..892b34785ccc 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1555,6 +1555,7 @@ static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
1555 struct inode *inode; 1555 struct inode *inode;
1556 u64 page_start; 1556 u64 page_start;
1557 u64 page_end; 1557 u64 page_end;
1558 int ret;
1558 1559
1559 fixup = container_of(work, struct btrfs_writepage_fixup, work); 1560 fixup = container_of(work, struct btrfs_writepage_fixup, work);
1560 page = fixup->page; 1561 page = fixup->page;
@@ -1582,12 +1583,21 @@ again:
1582 page_end, &cached_state, GFP_NOFS); 1583 page_end, &cached_state, GFP_NOFS);
1583 unlock_page(page); 1584 unlock_page(page);
1584 btrfs_start_ordered_extent(inode, ordered, 1); 1585 btrfs_start_ordered_extent(inode, ordered, 1);
1586 btrfs_put_ordered_extent(ordered);
1585 goto again; 1587 goto again;
1586 } 1588 }
1587 1589
1588 BUG(); 1590 ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
1591 if (ret) {
1592 mapping_set_error(page->mapping, ret);
1593 end_extent_writepage(page, ret, page_start, page_end);
1594 ClearPageChecked(page);
1595 goto out;
1596 }
1597
1589 btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state); 1598 btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state);
1590 ClearPageChecked(page); 1599 ClearPageChecked(page);
1600 set_page_dirty(page);
1591out: 1601out:
1592 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end, 1602 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
1593 &cached_state, GFP_NOFS); 1603 &cached_state, GFP_NOFS);
@@ -1630,7 +1640,7 @@ static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
1630 fixup->work.func = btrfs_writepage_fixup_worker; 1640 fixup->work.func = btrfs_writepage_fixup_worker;
1631 fixup->page = page; 1641 fixup->page = page;
1632 btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work); 1642 btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work);
1633 return -EAGAIN; 1643 return -EBUSY;
1634} 1644}
1635 1645
1636static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, 1646static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
@@ -4575,7 +4585,8 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
4575 ret = btrfs_insert_dir_item(trans, root, name, name_len, 4585 ret = btrfs_insert_dir_item(trans, root, name, name_len,
4576 parent_inode, &key, 4586 parent_inode, &key,
4577 btrfs_inode_type(inode), index); 4587 btrfs_inode_type(inode), index);
4578 BUG_ON(ret); 4588 if (ret)
4589 goto fail_dir_item;
4579 4590
4580 btrfs_i_size_write(parent_inode, parent_inode->i_size + 4591 btrfs_i_size_write(parent_inode, parent_inode->i_size +
4581 name_len * 2); 4592 name_len * 2);
@@ -4583,6 +4594,23 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
4583 ret = btrfs_update_inode(trans, root, parent_inode); 4594 ret = btrfs_update_inode(trans, root, parent_inode);
4584 } 4595 }
4585 return ret; 4596 return ret;
4597
4598fail_dir_item:
4599 if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
4600 u64 local_index;
4601 int err;
4602 err = btrfs_del_root_ref(trans, root->fs_info->tree_root,
4603 key.objectid, root->root_key.objectid,
4604 parent_ino, &local_index, name, name_len);
4605
4606 } else if (add_backref) {
4607 u64 local_index;
4608 int err;
4609
4610 err = btrfs_del_inode_ref(trans, root, name, name_len,
4611 ino, parent_ino, &local_index);
4612 }
4613 return ret;
4586} 4614}
4587 4615
4588static int btrfs_add_nondir(struct btrfs_trans_handle *trans, 4616static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
@@ -6696,8 +6724,10 @@ int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
6696 int err; 6724 int err;
6697 u64 index = 0; 6725 u64 index = 0;
6698 6726
6699 inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, new_dirid, 6727 inode = btrfs_new_inode(trans, new_root, NULL, "..", 2,
6700 new_dirid, S_IFDIR | 0700, &index); 6728 new_dirid, new_dirid,
6729 S_IFDIR | (~current_umask() & S_IRWXUGO),
6730 &index);
6701 if (IS_ERR(inode)) 6731 if (IS_ERR(inode))
6702 return PTR_ERR(inode); 6732 return PTR_ERR(inode);
6703 inode->i_op = &btrfs_dir_inode_operations; 6733 inode->i_op = &btrfs_dir_inode_operations;
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 03bb62a9ee24..d8b54715c2de 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -861,6 +861,7 @@ static int cluster_pages_for_defrag(struct inode *inode,
861 int i_done; 861 int i_done;
862 struct btrfs_ordered_extent *ordered; 862 struct btrfs_ordered_extent *ordered;
863 struct extent_state *cached_state = NULL; 863 struct extent_state *cached_state = NULL;
864 struct extent_io_tree *tree;
864 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); 865 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
865 866
866 if (isize == 0) 867 if (isize == 0)
@@ -871,18 +872,34 @@ static int cluster_pages_for_defrag(struct inode *inode,
871 num_pages << PAGE_CACHE_SHIFT); 872 num_pages << PAGE_CACHE_SHIFT);
872 if (ret) 873 if (ret)
873 return ret; 874 return ret;
874again:
875 ret = 0;
876 i_done = 0; 875 i_done = 0;
876 tree = &BTRFS_I(inode)->io_tree;
877 877
878 /* step one, lock all the pages */ 878 /* step one, lock all the pages */
879 for (i = 0; i < num_pages; i++) { 879 for (i = 0; i < num_pages; i++) {
880 struct page *page; 880 struct page *page;
881again:
881 page = find_or_create_page(inode->i_mapping, 882 page = find_or_create_page(inode->i_mapping,
882 start_index + i, mask); 883 start_index + i, mask);
883 if (!page) 884 if (!page)
884 break; 885 break;
885 886
887 page_start = page_offset(page);
888 page_end = page_start + PAGE_CACHE_SIZE - 1;
889 while (1) {
890 lock_extent(tree, page_start, page_end, GFP_NOFS);
891 ordered = btrfs_lookup_ordered_extent(inode,
892 page_start);
893 unlock_extent(tree, page_start, page_end, GFP_NOFS);
894 if (!ordered)
895 break;
896
897 unlock_page(page);
898 btrfs_start_ordered_extent(inode, ordered, 1);
899 btrfs_put_ordered_extent(ordered);
900 lock_page(page);
901 }
902
886 if (!PageUptodate(page)) { 903 if (!PageUptodate(page)) {
887 btrfs_readpage(NULL, page); 904 btrfs_readpage(NULL, page);
888 lock_page(page); 905 lock_page(page);
@@ -893,15 +910,22 @@ again:
893 break; 910 break;
894 } 911 }
895 } 912 }
913
896 isize = i_size_read(inode); 914 isize = i_size_read(inode);
897 file_end = (isize - 1) >> PAGE_CACHE_SHIFT; 915 file_end = (isize - 1) >> PAGE_CACHE_SHIFT;
898 if (!isize || page->index > file_end || 916 if (!isize || page->index > file_end) {
899 page->mapping != inode->i_mapping) {
900 /* whoops, we blew past eof, skip this page */ 917 /* whoops, we blew past eof, skip this page */
901 unlock_page(page); 918 unlock_page(page);
902 page_cache_release(page); 919 page_cache_release(page);
903 break; 920 break;
904 } 921 }
922
923 if (page->mapping != inode->i_mapping) {
924 unlock_page(page);
925 page_cache_release(page);
926 goto again;
927 }
928
905 pages[i] = page; 929 pages[i] = page;
906 i_done++; 930 i_done++;
907 } 931 }
@@ -924,25 +948,6 @@ again:
924 lock_extent_bits(&BTRFS_I(inode)->io_tree, 948 lock_extent_bits(&BTRFS_I(inode)->io_tree,
925 page_start, page_end - 1, 0, &cached_state, 949 page_start, page_end - 1, 0, &cached_state,
926 GFP_NOFS); 950 GFP_NOFS);
927 ordered = btrfs_lookup_first_ordered_extent(inode, page_end - 1);
928 if (ordered &&
929 ordered->file_offset + ordered->len > page_start &&
930 ordered->file_offset < page_end) {
931 btrfs_put_ordered_extent(ordered);
932 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
933 page_start, page_end - 1,
934 &cached_state, GFP_NOFS);
935 for (i = 0; i < i_done; i++) {
936 unlock_page(pages[i]);
937 page_cache_release(pages[i]);
938 }
939 btrfs_wait_ordered_range(inode, page_start,
940 page_end - page_start);
941 goto again;
942 }
943 if (ordered)
944 btrfs_put_ordered_extent(ordered);
945
946 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, 951 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start,
947 page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC | 952 page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
948 EXTENT_DO_ACCOUNTING, 0, 0, &cached_state, 953 EXTENT_DO_ACCOUNTING, 0, 0, &cached_state,
@@ -1327,6 +1332,12 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
1327 goto out; 1332 goto out;
1328 } 1333 }
1329 1334
1335 if (name[0] == '.' &&
1336 (namelen == 1 || (name[1] == '.' && namelen == 2))) {
1337 ret = -EEXIST;
1338 goto out;
1339 }
1340
1330 if (subvol) { 1341 if (subvol) {
1331 ret = btrfs_mksubvol(&file->f_path, name, namelen, 1342 ret = btrfs_mksubvol(&file->f_path, name, namelen,
1332 NULL, transid, readonly); 1343 NULL, transid, readonly);
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 9770cc5bfb76..abc0fbffa510 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -1367,7 +1367,8 @@ out:
1367} 1367}
1368 1368
1369static noinline_for_stack int scrub_chunk(struct scrub_dev *sdev, 1369static noinline_for_stack int scrub_chunk(struct scrub_dev *sdev,
1370 u64 chunk_tree, u64 chunk_objectid, u64 chunk_offset, u64 length) 1370 u64 chunk_tree, u64 chunk_objectid, u64 chunk_offset, u64 length,
1371 u64 dev_offset)
1371{ 1372{
1372 struct btrfs_mapping_tree *map_tree = 1373 struct btrfs_mapping_tree *map_tree =
1373 &sdev->dev->dev_root->fs_info->mapping_tree; 1374 &sdev->dev->dev_root->fs_info->mapping_tree;
@@ -1391,7 +1392,8 @@ static noinline_for_stack int scrub_chunk(struct scrub_dev *sdev,
1391 goto out; 1392 goto out;
1392 1393
1393 for (i = 0; i < map->num_stripes; ++i) { 1394 for (i = 0; i < map->num_stripes; ++i) {
1394 if (map->stripes[i].dev == sdev->dev) { 1395 if (map->stripes[i].dev == sdev->dev &&
1396 map->stripes[i].physical == dev_offset) {
1395 ret = scrub_stripe(sdev, map, i, chunk_offset, length); 1397 ret = scrub_stripe(sdev, map, i, chunk_offset, length);
1396 if (ret) 1398 if (ret)
1397 goto out; 1399 goto out;
@@ -1487,7 +1489,7 @@ int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end)
1487 break; 1489 break;
1488 } 1490 }
1489 ret = scrub_chunk(sdev, chunk_tree, chunk_objectid, 1491 ret = scrub_chunk(sdev, chunk_tree, chunk_objectid,
1490 chunk_offset, length); 1492 chunk_offset, length, found_key.offset);
1491 btrfs_put_block_group(cache); 1493 btrfs_put_block_group(cache);
1492 if (ret) 1494 if (ret)
1493 break; 1495 break;
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 287a6728b1ad..04b77e3ceb7a 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -327,7 +327,8 @@ again:
327 327
328 if (num_bytes) { 328 if (num_bytes) {
329 trace_btrfs_space_reservation(root->fs_info, "transaction", 329 trace_btrfs_space_reservation(root->fs_info, "transaction",
330 (u64)h, num_bytes, 1); 330 (u64)(unsigned long)h,
331 num_bytes, 1);
331 h->block_rsv = &root->fs_info->trans_block_rsv; 332 h->block_rsv = &root->fs_info->trans_block_rsv;
332 h->bytes_reserved = num_bytes; 333 h->bytes_reserved = num_bytes;
333 } 334 }
@@ -915,7 +916,11 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
915 dentry->d_name.name, dentry->d_name.len, 916 dentry->d_name.name, dentry->d_name.len,
916 parent_inode, &key, 917 parent_inode, &key,
917 BTRFS_FT_DIR, index); 918 BTRFS_FT_DIR, index);
918 BUG_ON(ret); 919 if (ret) {
920 pending->error = -EEXIST;
921 dput(parent);
922 goto fail;
923 }
919 924
920 btrfs_i_size_write(parent_inode, parent_inode->i_size + 925 btrfs_i_size_write(parent_inode, parent_inode->i_size +
921 dentry->d_name.len * 2); 926 dentry->d_name.len * 2);
@@ -993,12 +998,9 @@ static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
993{ 998{
994 struct btrfs_pending_snapshot *pending; 999 struct btrfs_pending_snapshot *pending;
995 struct list_head *head = &trans->transaction->pending_snapshots; 1000 struct list_head *head = &trans->transaction->pending_snapshots;
996 int ret;
997 1001
998 list_for_each_entry(pending, head, list) { 1002 list_for_each_entry(pending, head, list)
999 ret = create_pending_snapshot(trans, fs_info, pending); 1003 create_pending_snapshot(trans, fs_info, pending);
1000 BUG_ON(ret);
1001 }
1002 return 0; 1004 return 0;
1003} 1005}
1004 1006
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 0b4e2af7954d..ef41f285a475 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -459,12 +459,23 @@ int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
459{ 459{
460 struct btrfs_device *device, *next; 460 struct btrfs_device *device, *next;
461 461
462 struct block_device *latest_bdev = NULL;
463 u64 latest_devid = 0;
464 u64 latest_transid = 0;
465
462 mutex_lock(&uuid_mutex); 466 mutex_lock(&uuid_mutex);
463again: 467again:
464 /* This is the initialized path, it is safe to release the devices. */ 468 /* This is the initialized path, it is safe to release the devices. */
465 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { 469 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
466 if (device->in_fs_metadata) 470 if (device->in_fs_metadata) {
471 if (!latest_transid ||
472 device->generation > latest_transid) {
473 latest_devid = device->devid;
474 latest_transid = device->generation;
475 latest_bdev = device->bdev;
476 }
467 continue; 477 continue;
478 }
468 479
469 if (device->bdev) { 480 if (device->bdev) {
470 blkdev_put(device->bdev, device->mode); 481 blkdev_put(device->bdev, device->mode);
@@ -487,6 +498,10 @@ again:
487 goto again; 498 goto again;
488 } 499 }
489 500
501 fs_devices->latest_bdev = latest_bdev;
502 fs_devices->latest_devid = latest_devid;
503 fs_devices->latest_trans = latest_transid;
504
490 mutex_unlock(&uuid_mutex); 505 mutex_unlock(&uuid_mutex);
491 return 0; 506 return 0;
492} 507}
@@ -1953,7 +1968,7 @@ static int btrfs_relocate_chunk(struct btrfs_root *root,
1953 em = lookup_extent_mapping(em_tree, chunk_offset, 1); 1968 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
1954 read_unlock(&em_tree->lock); 1969 read_unlock(&em_tree->lock);
1955 1970
1956 BUG_ON(em->start > chunk_offset || 1971 BUG_ON(!em || em->start > chunk_offset ||
1957 em->start + em->len < chunk_offset); 1972 em->start + em->len < chunk_offset);
1958 map = (struct map_lookup *)em->bdev; 1973 map = (struct map_lookup *)em->bdev;
1959 1974
@@ -4356,6 +4371,20 @@ int btrfs_read_sys_array(struct btrfs_root *root)
4356 return -ENOMEM; 4371 return -ENOMEM;
4357 btrfs_set_buffer_uptodate(sb); 4372 btrfs_set_buffer_uptodate(sb);
4358 btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0); 4373 btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
4374 /*
4375 * The sb extent buffer is artifical and just used to read the system array.
4376 * btrfs_set_buffer_uptodate() call does not properly mark all it's
4377 * pages up-to-date when the page is larger: extent does not cover the
4378 * whole page and consequently check_page_uptodate does not find all
4379 * the page's extents up-to-date (the hole beyond sb),
4380 * write_extent_buffer then triggers a WARN_ON.
4381 *
4382 * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
4383 * but sb spans only this function. Add an explicit SetPageUptodate call
4384 * to silence the warning eg. on PowerPC 64.
4385 */
4386 if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE)
4387 SetPageUptodate(sb->first_page);
4359 4388
4360 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE); 4389 write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
4361 array_size = btrfs_super_sys_array_size(super_copy); 4390 array_size = btrfs_super_sys_array_size(super_copy);