summaryrefslogtreecommitdiffstats
path: root/fs/btrfs/disk-io.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@fusionio.com>2013-02-20 14:05:45 -0500
committerChris Mason <chris.mason@fusionio.com>2013-02-20 14:05:45 -0500
commitb2c6b3e0611c58fbeb6b9c0892b6249f7bdfaf6b (patch)
treede7cf0825605aa6acf33a8d107003efd7aedbe72 /fs/btrfs/disk-io.c
parent19f949f52599ba7c3f67a5897ac6be14bfcb1200 (diff)
parent272d26d0ad8c0e326689f2fa3cdc6a5fcc8e74e0 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/josef/btrfs-next into for-linus-3.9
Signed-off-by: Chris Mason <chris.mason@fusionio.com> Conflicts: fs/btrfs/disk-io.c
Diffstat (limited to 'fs/btrfs/disk-io.c')
-rw-r--r--fs/btrfs/disk-io.c160
1 files changed, 81 insertions, 79 deletions
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index a8f652dc940b..779b401cd952 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -56,7 +56,8 @@ static void end_workqueue_fn(struct btrfs_work *work);
56static void free_fs_root(struct btrfs_root *root); 56static void free_fs_root(struct btrfs_root *root);
57static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info, 57static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
58 int read_only); 58 int read_only);
59static void btrfs_destroy_ordered_operations(struct btrfs_root *root); 59static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t,
60 struct btrfs_root *root);
60static void btrfs_destroy_ordered_extents(struct btrfs_root *root); 61static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
61static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, 62static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
62 struct btrfs_root *root); 63 struct btrfs_root *root);
@@ -420,7 +421,7 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
420static int csum_dirty_buffer(struct btrfs_root *root, struct page *page) 421static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
421{ 422{
422 struct extent_io_tree *tree; 423 struct extent_io_tree *tree;
423 u64 start = (u64)page->index << PAGE_CACHE_SHIFT; 424 u64 start = page_offset(page);
424 u64 found_start; 425 u64 found_start;
425 struct extent_buffer *eb; 426 struct extent_buffer *eb;
426 427
@@ -946,18 +947,20 @@ static int btree_writepages(struct address_space *mapping,
946 struct writeback_control *wbc) 947 struct writeback_control *wbc)
947{ 948{
948 struct extent_io_tree *tree; 949 struct extent_io_tree *tree;
950 struct btrfs_fs_info *fs_info;
951 int ret;
952
949 tree = &BTRFS_I(mapping->host)->io_tree; 953 tree = &BTRFS_I(mapping->host)->io_tree;
950 if (wbc->sync_mode == WB_SYNC_NONE) { 954 if (wbc->sync_mode == WB_SYNC_NONE) {
951 struct btrfs_root *root = BTRFS_I(mapping->host)->root;
952 u64 num_dirty;
953 unsigned long thresh = 32 * 1024 * 1024;
954 955
955 if (wbc->for_kupdate) 956 if (wbc->for_kupdate)
956 return 0; 957 return 0;
957 958
959 fs_info = BTRFS_I(mapping->host)->root->fs_info;
958 /* this is a bit racy, but that's ok */ 960 /* this is a bit racy, but that's ok */
959 num_dirty = root->fs_info->dirty_metadata_bytes; 961 ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
960 if (num_dirty < thresh) 962 BTRFS_DIRTY_METADATA_THRESH);
963 if (ret < 0)
961 return 0; 964 return 0;
962 } 965 }
963 return btree_write_cache_pages(mapping, wbc); 966 return btree_write_cache_pages(mapping, wbc);
@@ -1125,24 +1128,16 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
1125void clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, 1128void clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1126 struct extent_buffer *buf) 1129 struct extent_buffer *buf)
1127{ 1130{
1131 struct btrfs_fs_info *fs_info = root->fs_info;
1132
1128 if (btrfs_header_generation(buf) == 1133 if (btrfs_header_generation(buf) ==
1129 root->fs_info->running_transaction->transid) { 1134 fs_info->running_transaction->transid) {
1130 btrfs_assert_tree_locked(buf); 1135 btrfs_assert_tree_locked(buf);
1131 1136
1132 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) { 1137 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
1133 spin_lock(&root->fs_info->delalloc_lock); 1138 __percpu_counter_add(&fs_info->dirty_metadata_bytes,
1134 if (root->fs_info->dirty_metadata_bytes >= buf->len) 1139 -buf->len,
1135 root->fs_info->dirty_metadata_bytes -= buf->len; 1140 fs_info->dirty_metadata_batch);
1136 else {
1137 spin_unlock(&root->fs_info->delalloc_lock);
1138 btrfs_panic(root->fs_info, -EOVERFLOW,
1139 "Can't clear %lu bytes from "
1140 " dirty_mdatadata_bytes (%llu)",
1141 buf->len,
1142 root->fs_info->dirty_metadata_bytes);
1143 }
1144 spin_unlock(&root->fs_info->delalloc_lock);
1145
1146 /* ugh, clear_extent_buffer_dirty needs to lock the page */ 1141 /* ugh, clear_extent_buffer_dirty needs to lock the page */
1147 btrfs_set_lock_blocking(buf); 1142 btrfs_set_lock_blocking(buf);
1148 clear_extent_buffer_dirty(buf); 1143 clear_extent_buffer_dirty(buf);
@@ -1178,9 +1173,13 @@ static void __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
1178 1173
1179 INIT_LIST_HEAD(&root->dirty_list); 1174 INIT_LIST_HEAD(&root->dirty_list);
1180 INIT_LIST_HEAD(&root->root_list); 1175 INIT_LIST_HEAD(&root->root_list);
1176 INIT_LIST_HEAD(&root->logged_list[0]);
1177 INIT_LIST_HEAD(&root->logged_list[1]);
1181 spin_lock_init(&root->orphan_lock); 1178 spin_lock_init(&root->orphan_lock);
1182 spin_lock_init(&root->inode_lock); 1179 spin_lock_init(&root->inode_lock);
1183 spin_lock_init(&root->accounting_lock); 1180 spin_lock_init(&root->accounting_lock);
1181 spin_lock_init(&root->log_extents_lock[0]);
1182 spin_lock_init(&root->log_extents_lock[1]);
1184 mutex_init(&root->objectid_mutex); 1183 mutex_init(&root->objectid_mutex);
1185 mutex_init(&root->log_mutex); 1184 mutex_init(&root->log_mutex);
1186 init_waitqueue_head(&root->log_writer_wait); 1185 init_waitqueue_head(&root->log_writer_wait);
@@ -2004,10 +2003,24 @@ int open_ctree(struct super_block *sb,
2004 goto fail_srcu; 2003 goto fail_srcu;
2005 } 2004 }
2006 2005
2006 ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0);
2007 if (ret) {
2008 err = ret;
2009 goto fail_bdi;
2010 }
2011 fs_info->dirty_metadata_batch = PAGE_CACHE_SIZE *
2012 (1 + ilog2(nr_cpu_ids));
2013
2014 ret = percpu_counter_init(&fs_info->delalloc_bytes, 0);
2015 if (ret) {
2016 err = ret;
2017 goto fail_dirty_metadata_bytes;
2018 }
2019
2007 fs_info->btree_inode = new_inode(sb); 2020 fs_info->btree_inode = new_inode(sb);
2008 if (!fs_info->btree_inode) { 2021 if (!fs_info->btree_inode) {
2009 err = -ENOMEM; 2022 err = -ENOMEM;
2010 goto fail_bdi; 2023 goto fail_delalloc_bytes;
2011 } 2024 }
2012 2025
2013 mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS); 2026 mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
@@ -2017,7 +2030,6 @@ int open_ctree(struct super_block *sb,
2017 INIT_LIST_HEAD(&fs_info->dead_roots); 2030 INIT_LIST_HEAD(&fs_info->dead_roots);
2018 INIT_LIST_HEAD(&fs_info->delayed_iputs); 2031 INIT_LIST_HEAD(&fs_info->delayed_iputs);
2019 INIT_LIST_HEAD(&fs_info->delalloc_inodes); 2032 INIT_LIST_HEAD(&fs_info->delalloc_inodes);
2020 INIT_LIST_HEAD(&fs_info->ordered_operations);
2021 INIT_LIST_HEAD(&fs_info->caching_block_groups); 2033 INIT_LIST_HEAD(&fs_info->caching_block_groups);
2022 spin_lock_init(&fs_info->delalloc_lock); 2034 spin_lock_init(&fs_info->delalloc_lock);
2023 spin_lock_init(&fs_info->trans_lock); 2035 spin_lock_init(&fs_info->trans_lock);
@@ -2028,6 +2040,7 @@ int open_ctree(struct super_block *sb,
2028 spin_lock_init(&fs_info->tree_mod_seq_lock); 2040 spin_lock_init(&fs_info->tree_mod_seq_lock);
2029 rwlock_init(&fs_info->tree_mod_log_lock); 2041 rwlock_init(&fs_info->tree_mod_log_lock);
2030 mutex_init(&fs_info->reloc_mutex); 2042 mutex_init(&fs_info->reloc_mutex);
2043 seqlock_init(&fs_info->profiles_lock);
2031 2044
2032 init_completion(&fs_info->kobj_unregister); 2045 init_completion(&fs_info->kobj_unregister);
2033 INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots); 2046 INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
@@ -2126,6 +2139,7 @@ int open_ctree(struct super_block *sb,
2126 2139
2127 spin_lock_init(&fs_info->block_group_cache_lock); 2140 spin_lock_init(&fs_info->block_group_cache_lock);
2128 fs_info->block_group_cache_tree = RB_ROOT; 2141 fs_info->block_group_cache_tree = RB_ROOT;
2142 fs_info->first_logical_byte = (u64)-1;
2129 2143
2130 extent_io_tree_init(&fs_info->freed_extents[0], 2144 extent_io_tree_init(&fs_info->freed_extents[0],
2131 fs_info->btree_inode->i_mapping); 2145 fs_info->btree_inode->i_mapping);
@@ -2187,7 +2201,8 @@ int open_ctree(struct super_block *sb,
2187 goto fail_alloc; 2201 goto fail_alloc;
2188 2202
2189 /* check FS state, whether FS is broken. */ 2203 /* check FS state, whether FS is broken. */
2190 fs_info->fs_state |= btrfs_super_flags(disk_super); 2204 if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR)
2205 set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
2191 2206
2192 ret = btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY); 2207 ret = btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY);
2193 if (ret) { 2208 if (ret) {
@@ -2261,6 +2276,8 @@ int open_ctree(struct super_block *sb,
2261 leafsize = btrfs_super_leafsize(disk_super); 2276 leafsize = btrfs_super_leafsize(disk_super);
2262 sectorsize = btrfs_super_sectorsize(disk_super); 2277 sectorsize = btrfs_super_sectorsize(disk_super);
2263 stripesize = btrfs_super_stripesize(disk_super); 2278 stripesize = btrfs_super_stripesize(disk_super);
2279 fs_info->dirty_metadata_batch = leafsize * (1 + ilog2(nr_cpu_ids));
2280 fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
2264 2281
2265 /* 2282 /*
2266 * mixed block groups end up with duplicate but slightly offset 2283 * mixed block groups end up with duplicate but slightly offset
@@ -2390,8 +2407,7 @@ int open_ctree(struct super_block *sb,
2390 sb->s_blocksize = sectorsize; 2407 sb->s_blocksize = sectorsize;
2391 sb->s_blocksize_bits = blksize_bits(sectorsize); 2408 sb->s_blocksize_bits = blksize_bits(sectorsize);
2392 2409
2393 if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC, 2410 if (disk_super->magic != cpu_to_le64(BTRFS_MAGIC)) {
2394 sizeof(disk_super->magic))) {
2395 printk(KERN_INFO "btrfs: valid FS not found on %s\n", sb->s_id); 2411 printk(KERN_INFO "btrfs: valid FS not found on %s\n", sb->s_id);
2396 goto fail_sb_buffer; 2412 goto fail_sb_buffer;
2397 } 2413 }
@@ -2694,13 +2710,13 @@ fail_cleaner:
2694 * kthreads 2710 * kthreads
2695 */ 2711 */
2696 filemap_write_and_wait(fs_info->btree_inode->i_mapping); 2712 filemap_write_and_wait(fs_info->btree_inode->i_mapping);
2697 invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
2698 2713
2699fail_block_groups: 2714fail_block_groups:
2700 btrfs_free_block_groups(fs_info); 2715 btrfs_free_block_groups(fs_info);
2701 2716
2702fail_tree_roots: 2717fail_tree_roots:
2703 free_root_pointers(fs_info, 1); 2718 free_root_pointers(fs_info, 1);
2719 invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
2704 2720
2705fail_sb_buffer: 2721fail_sb_buffer:
2706 btrfs_stop_workers(&fs_info->generic_worker); 2722 btrfs_stop_workers(&fs_info->generic_worker);
@@ -2721,8 +2737,11 @@ fail_alloc:
2721fail_iput: 2737fail_iput:
2722 btrfs_mapping_tree_free(&fs_info->mapping_tree); 2738 btrfs_mapping_tree_free(&fs_info->mapping_tree);
2723 2739
2724 invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
2725 iput(fs_info->btree_inode); 2740 iput(fs_info->btree_inode);
2741fail_delalloc_bytes:
2742 percpu_counter_destroy(&fs_info->delalloc_bytes);
2743fail_dirty_metadata_bytes:
2744 percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
2726fail_bdi: 2745fail_bdi:
2727 bdi_destroy(&fs_info->bdi); 2746 bdi_destroy(&fs_info->bdi);
2728fail_srcu: 2747fail_srcu:
@@ -2795,8 +2814,7 @@ struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
2795 2814
2796 super = (struct btrfs_super_block *)bh->b_data; 2815 super = (struct btrfs_super_block *)bh->b_data;
2797 if (btrfs_super_bytenr(super) != bytenr || 2816 if (btrfs_super_bytenr(super) != bytenr ||
2798 strncmp((char *)(&super->magic), BTRFS_MAGIC, 2817 super->magic != cpu_to_le64(BTRFS_MAGIC)) {
2799 sizeof(super->magic))) {
2800 brelse(bh); 2818 brelse(bh);
2801 continue; 2819 continue;
2802 } 2820 }
@@ -3339,7 +3357,7 @@ int close_ctree(struct btrfs_root *root)
3339 printk(KERN_ERR "btrfs: commit super ret %d\n", ret); 3357 printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
3340 } 3358 }
3341 3359
3342 if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) 3360 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
3343 btrfs_error_commit_super(root); 3361 btrfs_error_commit_super(root);
3344 3362
3345 btrfs_put_block_group_cache(fs_info); 3363 btrfs_put_block_group_cache(fs_info);
@@ -3352,9 +3370,9 @@ int close_ctree(struct btrfs_root *root)
3352 3370
3353 btrfs_free_qgroup_config(root->fs_info); 3371 btrfs_free_qgroup_config(root->fs_info);
3354 3372
3355 if (fs_info->delalloc_bytes) { 3373 if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
3356 printk(KERN_INFO "btrfs: at unmount delalloc count %llu\n", 3374 printk(KERN_INFO "btrfs: at unmount delalloc count %lld\n",
3357 (unsigned long long)fs_info->delalloc_bytes); 3375 percpu_counter_sum(&fs_info->delalloc_bytes));
3358 } 3376 }
3359 3377
3360 free_extent_buffer(fs_info->extent_root->node); 3378 free_extent_buffer(fs_info->extent_root->node);
@@ -3401,6 +3419,8 @@ int close_ctree(struct btrfs_root *root)
3401 btrfs_close_devices(fs_info->fs_devices); 3419 btrfs_close_devices(fs_info->fs_devices);
3402 btrfs_mapping_tree_free(&fs_info->mapping_tree); 3420 btrfs_mapping_tree_free(&fs_info->mapping_tree);
3403 3421
3422 percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
3423 percpu_counter_destroy(&fs_info->delalloc_bytes);
3404 bdi_destroy(&fs_info->bdi); 3424 bdi_destroy(&fs_info->bdi);
3405 cleanup_srcu_struct(&fs_info->subvol_srcu); 3425 cleanup_srcu_struct(&fs_info->subvol_srcu);
3406 3426
@@ -3443,11 +3463,10 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
3443 (unsigned long long)transid, 3463 (unsigned long long)transid,
3444 (unsigned long long)root->fs_info->generation); 3464 (unsigned long long)root->fs_info->generation);
3445 was_dirty = set_extent_buffer_dirty(buf); 3465 was_dirty = set_extent_buffer_dirty(buf);
3446 if (!was_dirty) { 3466 if (!was_dirty)
3447 spin_lock(&root->fs_info->delalloc_lock); 3467 __percpu_counter_add(&root->fs_info->dirty_metadata_bytes,
3448 root->fs_info->dirty_metadata_bytes += buf->len; 3468 buf->len,
3449 spin_unlock(&root->fs_info->delalloc_lock); 3469 root->fs_info->dirty_metadata_batch);
3450 }
3451} 3470}
3452 3471
3453static void __btrfs_btree_balance_dirty(struct btrfs_root *root, 3472static void __btrfs_btree_balance_dirty(struct btrfs_root *root,
@@ -3457,8 +3476,7 @@ static void __btrfs_btree_balance_dirty(struct btrfs_root *root,
3457 * looks as though older kernels can get into trouble with 3476 * looks as though older kernels can get into trouble with
3458 * this code, they end up stuck in balance_dirty_pages forever 3477 * this code, they end up stuck in balance_dirty_pages forever
3459 */ 3478 */
3460 u64 num_dirty; 3479 int ret;
3461 unsigned long thresh = 32 * 1024 * 1024;
3462 3480
3463 if (current->flags & PF_MEMALLOC) 3481 if (current->flags & PF_MEMALLOC)
3464 return; 3482 return;
@@ -3466,9 +3484,9 @@ static void __btrfs_btree_balance_dirty(struct btrfs_root *root,
3466 if (flush_delayed) 3484 if (flush_delayed)
3467 btrfs_balance_delayed_items(root); 3485 btrfs_balance_delayed_items(root);
3468 3486
3469 num_dirty = root->fs_info->dirty_metadata_bytes; 3487 ret = percpu_counter_compare(&root->fs_info->dirty_metadata_bytes,
3470 3488 BTRFS_DIRTY_METADATA_THRESH);
3471 if (num_dirty > thresh) { 3489 if (ret > 0) {
3472 balance_dirty_pages_ratelimited( 3490 balance_dirty_pages_ratelimited(
3473 root->fs_info->btree_inode->i_mapping); 3491 root->fs_info->btree_inode->i_mapping);
3474 } 3492 }
@@ -3518,7 +3536,8 @@ void btrfs_error_commit_super(struct btrfs_root *root)
3518 btrfs_cleanup_transaction(root); 3536 btrfs_cleanup_transaction(root);
3519} 3537}
3520 3538
3521static void btrfs_destroy_ordered_operations(struct btrfs_root *root) 3539static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t,
3540 struct btrfs_root *root)
3522{ 3541{
3523 struct btrfs_inode *btrfs_inode; 3542 struct btrfs_inode *btrfs_inode;
3524 struct list_head splice; 3543 struct list_head splice;
@@ -3528,7 +3547,7 @@ static void btrfs_destroy_ordered_operations(struct btrfs_root *root)
3528 mutex_lock(&root->fs_info->ordered_operations_mutex); 3547 mutex_lock(&root->fs_info->ordered_operations_mutex);
3529 spin_lock(&root->fs_info->ordered_extent_lock); 3548 spin_lock(&root->fs_info->ordered_extent_lock);
3530 3549
3531 list_splice_init(&root->fs_info->ordered_operations, &splice); 3550 list_splice_init(&t->ordered_operations, &splice);
3532 while (!list_empty(&splice)) { 3551 while (!list_empty(&splice)) {
3533 btrfs_inode = list_entry(splice.next, struct btrfs_inode, 3552 btrfs_inode = list_entry(splice.next, struct btrfs_inode,
3534 ordered_operations); 3553 ordered_operations);
@@ -3544,35 +3563,16 @@ static void btrfs_destroy_ordered_operations(struct btrfs_root *root)
3544 3563
3545static void btrfs_destroy_ordered_extents(struct btrfs_root *root) 3564static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
3546{ 3565{
3547 struct list_head splice;
3548 struct btrfs_ordered_extent *ordered; 3566 struct btrfs_ordered_extent *ordered;
3549 struct inode *inode;
3550
3551 INIT_LIST_HEAD(&splice);
3552 3567
3553 spin_lock(&root->fs_info->ordered_extent_lock); 3568 spin_lock(&root->fs_info->ordered_extent_lock);
3554 3569 /*
3555 list_splice_init(&root->fs_info->ordered_extents, &splice); 3570 * This will just short circuit the ordered completion stuff which will
3556 while (!list_empty(&splice)) { 3571 * make sure the ordered extent gets properly cleaned up.
3557 ordered = list_entry(splice.next, struct btrfs_ordered_extent, 3572 */
3558 root_extent_list); 3573 list_for_each_entry(ordered, &root->fs_info->ordered_extents,
3559 3574 root_extent_list)
3560 list_del_init(&ordered->root_extent_list); 3575 set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
3561 atomic_inc(&ordered->refs);
3562
3563 /* the inode may be getting freed (in sys_unlink path). */
3564 inode = igrab(ordered->inode);
3565
3566 spin_unlock(&root->fs_info->ordered_extent_lock);
3567 if (inode)
3568 iput(inode);
3569
3570 atomic_set(&ordered->refs, 1);
3571 btrfs_put_ordered_extent(ordered);
3572
3573 spin_lock(&root->fs_info->ordered_extent_lock);
3574 }
3575
3576 spin_unlock(&root->fs_info->ordered_extent_lock); 3576 spin_unlock(&root->fs_info->ordered_extent_lock);
3577} 3577}
3578 3578
@@ -3594,11 +3594,11 @@ int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
3594 } 3594 }
3595 3595
3596 while ((node = rb_first(&delayed_refs->root)) != NULL) { 3596 while ((node = rb_first(&delayed_refs->root)) != NULL) {
3597 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node); 3597 struct btrfs_delayed_ref_head *head = NULL;
3598 3598
3599 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
3599 atomic_set(&ref->refs, 1); 3600 atomic_set(&ref->refs, 1);
3600 if (btrfs_delayed_ref_is_head(ref)) { 3601 if (btrfs_delayed_ref_is_head(ref)) {
3601 struct btrfs_delayed_ref_head *head;
3602 3602
3603 head = btrfs_delayed_node_to_head(ref); 3603 head = btrfs_delayed_node_to_head(ref);
3604 if (!mutex_trylock(&head->mutex)) { 3604 if (!mutex_trylock(&head->mutex)) {
@@ -3614,16 +3614,18 @@ int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
3614 continue; 3614 continue;
3615 } 3615 }
3616 3616
3617 kfree(head->extent_op); 3617 btrfs_free_delayed_extent_op(head->extent_op);
3618 delayed_refs->num_heads--; 3618 delayed_refs->num_heads--;
3619 if (list_empty(&head->cluster)) 3619 if (list_empty(&head->cluster))
3620 delayed_refs->num_heads_ready--; 3620 delayed_refs->num_heads_ready--;
3621 list_del_init(&head->cluster); 3621 list_del_init(&head->cluster);
3622 } 3622 }
3623
3623 ref->in_tree = 0; 3624 ref->in_tree = 0;
3624 rb_erase(&ref->rb_node, &delayed_refs->root); 3625 rb_erase(&ref->rb_node, &delayed_refs->root);
3625 delayed_refs->num_entries--; 3626 delayed_refs->num_entries--;
3626 3627 if (head)
3628 mutex_unlock(&head->mutex);
3627 spin_unlock(&delayed_refs->lock); 3629 spin_unlock(&delayed_refs->lock);
3628 btrfs_put_delayed_ref(ref); 3630 btrfs_put_delayed_ref(ref);
3629 3631
@@ -3671,6 +3673,8 @@ static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
3671 delalloc_inodes); 3673 delalloc_inodes);
3672 3674
3673 list_del_init(&btrfs_inode->delalloc_inodes); 3675 list_del_init(&btrfs_inode->delalloc_inodes);
3676 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
3677 &btrfs_inode->runtime_flags);
3674 3678
3675 btrfs_invalidate_inodes(btrfs_inode->root); 3679 btrfs_invalidate_inodes(btrfs_inode->root);
3676 } 3680 }
@@ -3823,10 +3827,8 @@ int btrfs_cleanup_transaction(struct btrfs_root *root)
3823 3827
3824 while (!list_empty(&list)) { 3828 while (!list_empty(&list)) {
3825 t = list_entry(list.next, struct btrfs_transaction, list); 3829 t = list_entry(list.next, struct btrfs_transaction, list);
3826 if (!t)
3827 break;
3828 3830
3829 btrfs_destroy_ordered_operations(root); 3831 btrfs_destroy_ordered_operations(t, root);
3830 3832
3831 btrfs_destroy_ordered_extents(root); 3833 btrfs_destroy_ordered_extents(root);
3832 3834