aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent-tree.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/extent-tree.c')
-rw-r--r--fs/btrfs/extent-tree.c252
1 files changed, 197 insertions, 55 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 359a754c782c..e238a0cdac67 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -1568,23 +1568,23 @@ static int remove_extent_backref(struct btrfs_trans_handle *trans,
1568 return ret; 1568 return ret;
1569} 1569}
1570 1570
1571#ifdef BIO_RW_DISCARD
1572static void btrfs_issue_discard(struct block_device *bdev, 1571static void btrfs_issue_discard(struct block_device *bdev,
1573 u64 start, u64 len) 1572 u64 start, u64 len)
1574{ 1573{
1575 blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL, 1574 blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL,
1576 DISCARD_FL_BARRIER); 1575 DISCARD_FL_BARRIER);
1577} 1576}
1578#endif
1579 1577
1580static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr, 1578static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1581 u64 num_bytes) 1579 u64 num_bytes)
1582{ 1580{
1583#ifdef BIO_RW_DISCARD
1584 int ret; 1581 int ret;
1585 u64 map_length = num_bytes; 1582 u64 map_length = num_bytes;
1586 struct btrfs_multi_bio *multi = NULL; 1583 struct btrfs_multi_bio *multi = NULL;
1587 1584
1585 if (!btrfs_test_opt(root, DISCARD))
1586 return 0;
1587
1588 /* Tell the block device(s) that the sectors can be discarded */ 1588 /* Tell the block device(s) that the sectors can be discarded */
1589 ret = btrfs_map_block(&root->fs_info->mapping_tree, READ, 1589 ret = btrfs_map_block(&root->fs_info->mapping_tree, READ,
1590 bytenr, &map_length, &multi, 0); 1590 bytenr, &map_length, &multi, 0);
@@ -1604,9 +1604,6 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1604 } 1604 }
1605 1605
1606 return ret; 1606 return ret;
1607#else
1608 return 0;
1609#endif
1610} 1607}
1611 1608
1612int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, 1609int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
@@ -2824,14 +2821,17 @@ int btrfs_unreserve_metadata_for_delalloc(struct btrfs_root *root,
2824 num_items); 2821 num_items);
2825 2822
2826 spin_lock(&meta_sinfo->lock); 2823 spin_lock(&meta_sinfo->lock);
2827 if (BTRFS_I(inode)->delalloc_reserved_extents <= 2824 spin_lock(&BTRFS_I(inode)->accounting_lock);
2828 BTRFS_I(inode)->delalloc_extents) { 2825 if (BTRFS_I(inode)->reserved_extents <=
2826 BTRFS_I(inode)->outstanding_extents) {
2827 spin_unlock(&BTRFS_I(inode)->accounting_lock);
2829 spin_unlock(&meta_sinfo->lock); 2828 spin_unlock(&meta_sinfo->lock);
2830 return 0; 2829 return 0;
2831 } 2830 }
2831 spin_unlock(&BTRFS_I(inode)->accounting_lock);
2832 2832
2833 BTRFS_I(inode)->delalloc_reserved_extents--; 2833 BTRFS_I(inode)->reserved_extents--;
2834 BUG_ON(BTRFS_I(inode)->delalloc_reserved_extents < 0); 2834 BUG_ON(BTRFS_I(inode)->reserved_extents < 0);
2835 2835
2836 if (meta_sinfo->bytes_delalloc < num_bytes) { 2836 if (meta_sinfo->bytes_delalloc < num_bytes) {
2837 bug = true; 2837 bug = true;
@@ -2864,6 +2864,107 @@ static void check_force_delalloc(struct btrfs_space_info *meta_sinfo)
2864 meta_sinfo->force_delalloc = 0; 2864 meta_sinfo->force_delalloc = 0;
2865} 2865}
2866 2866
2867struct async_flush {
2868 struct btrfs_root *root;
2869 struct btrfs_space_info *info;
2870 struct btrfs_work work;
2871};
2872
2873static noinline void flush_delalloc_async(struct btrfs_work *work)
2874{
2875 struct async_flush *async;
2876 struct btrfs_root *root;
2877 struct btrfs_space_info *info;
2878
2879 async = container_of(work, struct async_flush, work);
2880 root = async->root;
2881 info = async->info;
2882
2883 btrfs_start_delalloc_inodes(root);
2884 wake_up(&info->flush_wait);
2885 btrfs_wait_ordered_extents(root, 0);
2886
2887 spin_lock(&info->lock);
2888 info->flushing = 0;
2889 spin_unlock(&info->lock);
2890 wake_up(&info->flush_wait);
2891
2892 kfree(async);
2893}
2894
2895static void wait_on_flush(struct btrfs_space_info *info)
2896{
2897 DEFINE_WAIT(wait);
2898 u64 used;
2899
2900 while (1) {
2901 prepare_to_wait(&info->flush_wait, &wait,
2902 TASK_UNINTERRUPTIBLE);
2903 spin_lock(&info->lock);
2904 if (!info->flushing) {
2905 spin_unlock(&info->lock);
2906 break;
2907 }
2908
2909 used = info->bytes_used + info->bytes_reserved +
2910 info->bytes_pinned + info->bytes_readonly +
2911 info->bytes_super + info->bytes_root +
2912 info->bytes_may_use + info->bytes_delalloc;
2913 if (used < info->total_bytes) {
2914 spin_unlock(&info->lock);
2915 break;
2916 }
2917 spin_unlock(&info->lock);
2918 schedule();
2919 }
2920 finish_wait(&info->flush_wait, &wait);
2921}
2922
2923static void flush_delalloc(struct btrfs_root *root,
2924 struct btrfs_space_info *info)
2925{
2926 struct async_flush *async;
2927 bool wait = false;
2928
2929 spin_lock(&info->lock);
2930
2931 if (!info->flushing) {
2932 info->flushing = 1;
2933 init_waitqueue_head(&info->flush_wait);
2934 } else {
2935 wait = true;
2936 }
2937
2938 spin_unlock(&info->lock);
2939
2940 if (wait) {
2941 wait_on_flush(info);
2942 return;
2943 }
2944
2945 async = kzalloc(sizeof(*async), GFP_NOFS);
2946 if (!async)
2947 goto flush;
2948
2949 async->root = root;
2950 async->info = info;
2951 async->work.func = flush_delalloc_async;
2952
2953 btrfs_queue_worker(&root->fs_info->enospc_workers,
2954 &async->work);
2955 wait_on_flush(info);
2956 return;
2957
2958flush:
2959 btrfs_start_delalloc_inodes(root);
2960 btrfs_wait_ordered_extents(root, 0);
2961
2962 spin_lock(&info->lock);
2963 info->flushing = 0;
2964 spin_unlock(&info->lock);
2965 wake_up(&info->flush_wait);
2966}
2967
2867static int maybe_allocate_chunk(struct btrfs_root *root, 2968static int maybe_allocate_chunk(struct btrfs_root *root,
2868 struct btrfs_space_info *info) 2969 struct btrfs_space_info *info)
2869{ 2970{
@@ -2894,7 +2995,7 @@ static int maybe_allocate_chunk(struct btrfs_root *root,
2894 if (!info->allocating_chunk) { 2995 if (!info->allocating_chunk) {
2895 info->force_alloc = 1; 2996 info->force_alloc = 1;
2896 info->allocating_chunk = 1; 2997 info->allocating_chunk = 1;
2897 init_waitqueue_head(&info->wait); 2998 init_waitqueue_head(&info->allocate_wait);
2898 } else { 2999 } else {
2899 wait = true; 3000 wait = true;
2900 } 3001 }
@@ -2902,7 +3003,7 @@ static int maybe_allocate_chunk(struct btrfs_root *root,
2902 spin_unlock(&info->lock); 3003 spin_unlock(&info->lock);
2903 3004
2904 if (wait) { 3005 if (wait) {
2905 wait_event(info->wait, 3006 wait_event(info->allocate_wait,
2906 !info->allocating_chunk); 3007 !info->allocating_chunk);
2907 return 1; 3008 return 1;
2908 } 3009 }
@@ -2923,7 +3024,7 @@ out:
2923 spin_lock(&info->lock); 3024 spin_lock(&info->lock);
2924 info->allocating_chunk = 0; 3025 info->allocating_chunk = 0;
2925 spin_unlock(&info->lock); 3026 spin_unlock(&info->lock);
2926 wake_up(&info->wait); 3027 wake_up(&info->allocate_wait);
2927 3028
2928 if (ret) 3029 if (ret)
2929 return 0; 3030 return 0;
@@ -2981,21 +3082,20 @@ again:
2981 filemap_flush(inode->i_mapping); 3082 filemap_flush(inode->i_mapping);
2982 goto again; 3083 goto again;
2983 } else if (flushed == 3) { 3084 } else if (flushed == 3) {
2984 btrfs_start_delalloc_inodes(root); 3085 flush_delalloc(root, meta_sinfo);
2985 btrfs_wait_ordered_extents(root, 0);
2986 goto again; 3086 goto again;
2987 } 3087 }
2988 spin_lock(&meta_sinfo->lock); 3088 spin_lock(&meta_sinfo->lock);
2989 meta_sinfo->bytes_delalloc -= num_bytes; 3089 meta_sinfo->bytes_delalloc -= num_bytes;
2990 spin_unlock(&meta_sinfo->lock); 3090 spin_unlock(&meta_sinfo->lock);
2991 printk(KERN_ERR "enospc, has %d, reserved %d\n", 3091 printk(KERN_ERR "enospc, has %d, reserved %d\n",
2992 BTRFS_I(inode)->delalloc_extents, 3092 BTRFS_I(inode)->outstanding_extents,
2993 BTRFS_I(inode)->delalloc_reserved_extents); 3093 BTRFS_I(inode)->reserved_extents);
2994 dump_space_info(meta_sinfo, 0, 0); 3094 dump_space_info(meta_sinfo, 0, 0);
2995 return -ENOSPC; 3095 return -ENOSPC;
2996 } 3096 }
2997 3097
2998 BTRFS_I(inode)->delalloc_reserved_extents++; 3098 BTRFS_I(inode)->reserved_extents++;
2999 check_force_delalloc(meta_sinfo); 3099 check_force_delalloc(meta_sinfo);
3000 spin_unlock(&meta_sinfo->lock); 3100 spin_unlock(&meta_sinfo->lock);
3001 3101
@@ -3094,8 +3194,7 @@ again:
3094 } 3194 }
3095 3195
3096 if (retries == 2) { 3196 if (retries == 2) {
3097 btrfs_start_delalloc_inodes(root); 3197 flush_delalloc(root, meta_sinfo);
3098 btrfs_wait_ordered_extents(root, 0);
3099 goto again; 3198 goto again;
3100 } 3199 }
3101 spin_lock(&meta_sinfo->lock); 3200 spin_lock(&meta_sinfo->lock);
@@ -3588,6 +3687,14 @@ static int pin_down_bytes(struct btrfs_trans_handle *trans,
3588 if (is_data) 3687 if (is_data)
3589 goto pinit; 3688 goto pinit;
3590 3689
3690 /*
3691 * discard is sloooow, and so triggering discards on
3692 * individual btree blocks isn't a good plan. Just
3693 * pin everything in discard mode.
3694 */
3695 if (btrfs_test_opt(root, DISCARD))
3696 goto pinit;
3697
3591 buf = btrfs_find_tree_block(root, bytenr, num_bytes); 3698 buf = btrfs_find_tree_block(root, bytenr, num_bytes);
3592 if (!buf) 3699 if (!buf)
3593 goto pinit; 3700 goto pinit;
@@ -4029,6 +4136,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
4029 int loop = 0; 4136 int loop = 0;
4030 bool found_uncached_bg = false; 4137 bool found_uncached_bg = false;
4031 bool failed_cluster_refill = false; 4138 bool failed_cluster_refill = false;
4139 bool failed_alloc = false;
4032 4140
4033 WARN_ON(num_bytes < root->sectorsize); 4141 WARN_ON(num_bytes < root->sectorsize);
4034 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY); 4142 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
@@ -4233,14 +4341,23 @@ refill_cluster:
4233 4341
4234 offset = btrfs_find_space_for_alloc(block_group, search_start, 4342 offset = btrfs_find_space_for_alloc(block_group, search_start,
4235 num_bytes, empty_size); 4343 num_bytes, empty_size);
4236 if (!offset && (cached || (!cached && 4344 /*
4237 loop == LOOP_CACHING_NOWAIT))) { 4345 * If we didn't find a chunk, and we haven't failed on this
4238 goto loop; 4346 * block group before, and this block group is in the middle of
4239 } else if (!offset && (!cached && 4347 * caching and we are ok with waiting, then go ahead and wait
4240 loop > LOOP_CACHING_NOWAIT)) { 4348 * for progress to be made, and set failed_alloc to true.
4349 *
4350 * If failed_alloc is true then we've already waited on this
4351 * block group once and should move on to the next block group.
4352 */
4353 if (!offset && !failed_alloc && !cached &&
4354 loop > LOOP_CACHING_NOWAIT) {
4241 wait_block_group_cache_progress(block_group, 4355 wait_block_group_cache_progress(block_group,
4242 num_bytes + empty_size); 4356 num_bytes + empty_size);
4357 failed_alloc = true;
4243 goto have_block_group; 4358 goto have_block_group;
4359 } else if (!offset) {
4360 goto loop;
4244 } 4361 }
4245checks: 4362checks:
4246 search_start = stripe_align(root, offset); 4363 search_start = stripe_align(root, offset);
@@ -4288,6 +4405,7 @@ checks:
4288 break; 4405 break;
4289loop: 4406loop:
4290 failed_cluster_refill = false; 4407 failed_cluster_refill = false;
4408 failed_alloc = false;
4291 btrfs_put_block_group(block_group); 4409 btrfs_put_block_group(block_group);
4292 } 4410 }
4293 up_read(&space_info->groups_sem); 4411 up_read(&space_info->groups_sem);
@@ -4799,6 +4917,7 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
4799 u64 bytenr; 4917 u64 bytenr;
4800 u64 generation; 4918 u64 generation;
4801 u64 refs; 4919 u64 refs;
4920 u64 flags;
4802 u64 last = 0; 4921 u64 last = 0;
4803 u32 nritems; 4922 u32 nritems;
4804 u32 blocksize; 4923 u32 blocksize;
@@ -4836,15 +4955,19 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
4836 generation <= root->root_key.offset) 4955 generation <= root->root_key.offset)
4837 continue; 4956 continue;
4838 4957
4958 /* We don't lock the tree block, it's OK to be racy here */
4959 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
4960 &refs, &flags);
4961 BUG_ON(ret);
4962 BUG_ON(refs == 0);
4963
4839 if (wc->stage == DROP_REFERENCE) { 4964 if (wc->stage == DROP_REFERENCE) {
4840 ret = btrfs_lookup_extent_info(trans, root,
4841 bytenr, blocksize,
4842 &refs, NULL);
4843 BUG_ON(ret);
4844 BUG_ON(refs == 0);
4845 if (refs == 1) 4965 if (refs == 1)
4846 goto reada; 4966 goto reada;
4847 4967
4968 if (wc->level == 1 &&
4969 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
4970 continue;
4848 if (!wc->update_ref || 4971 if (!wc->update_ref ||
4849 generation <= root->root_key.offset) 4972 generation <= root->root_key.offset)
4850 continue; 4973 continue;
@@ -4853,6 +4976,10 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
4853 &wc->update_progress); 4976 &wc->update_progress);
4854 if (ret < 0) 4977 if (ret < 0)
4855 continue; 4978 continue;
4979 } else {
4980 if (wc->level == 1 &&
4981 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
4982 continue;
4856 } 4983 }
4857reada: 4984reada:
4858 ret = readahead_tree_block(root, bytenr, blocksize, 4985 ret = readahead_tree_block(root, bytenr, blocksize,
@@ -4876,7 +5003,7 @@ reada:
4876static noinline int walk_down_proc(struct btrfs_trans_handle *trans, 5003static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
4877 struct btrfs_root *root, 5004 struct btrfs_root *root,
4878 struct btrfs_path *path, 5005 struct btrfs_path *path,
4879 struct walk_control *wc) 5006 struct walk_control *wc, int lookup_info)
4880{ 5007{
4881 int level = wc->level; 5008 int level = wc->level;
4882 struct extent_buffer *eb = path->nodes[level]; 5009 struct extent_buffer *eb = path->nodes[level];
@@ -4891,8 +5018,9 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
4891 * when reference count of tree block is 1, it won't increase 5018 * when reference count of tree block is 1, it won't increase
4892 * again. once full backref flag is set, we never clear it. 5019 * again. once full backref flag is set, we never clear it.
4893 */ 5020 */
4894 if ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) || 5021 if (lookup_info &&
4895 (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag))) { 5022 ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
5023 (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
4896 BUG_ON(!path->locks[level]); 5024 BUG_ON(!path->locks[level]);
4897 ret = btrfs_lookup_extent_info(trans, root, 5025 ret = btrfs_lookup_extent_info(trans, root,
4898 eb->start, eb->len, 5026 eb->start, eb->len,
@@ -4953,7 +5081,7 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
4953static noinline int do_walk_down(struct btrfs_trans_handle *trans, 5081static noinline int do_walk_down(struct btrfs_trans_handle *trans,
4954 struct btrfs_root *root, 5082 struct btrfs_root *root,
4955 struct btrfs_path *path, 5083 struct btrfs_path *path,
4956 struct walk_control *wc) 5084 struct walk_control *wc, int *lookup_info)
4957{ 5085{
4958 u64 bytenr; 5086 u64 bytenr;
4959 u64 generation; 5087 u64 generation;
@@ -4973,8 +5101,10 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
4973 * for the subtree 5101 * for the subtree
4974 */ 5102 */
4975 if (wc->stage == UPDATE_BACKREF && 5103 if (wc->stage == UPDATE_BACKREF &&
4976 generation <= root->root_key.offset) 5104 generation <= root->root_key.offset) {
5105 *lookup_info = 1;
4977 return 1; 5106 return 1;
5107 }
4978 5108
4979 bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]); 5109 bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
4980 blocksize = btrfs_level_size(root, level - 1); 5110 blocksize = btrfs_level_size(root, level - 1);
@@ -4987,14 +5117,19 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
4987 btrfs_tree_lock(next); 5117 btrfs_tree_lock(next);
4988 btrfs_set_lock_blocking(next); 5118 btrfs_set_lock_blocking(next);
4989 5119
4990 if (wc->stage == DROP_REFERENCE) { 5120 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
4991 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize, 5121 &wc->refs[level - 1],
4992 &wc->refs[level - 1], 5122 &wc->flags[level - 1]);
4993 &wc->flags[level - 1]); 5123 BUG_ON(ret);
4994 BUG_ON(ret); 5124 BUG_ON(wc->refs[level - 1] == 0);
4995 BUG_ON(wc->refs[level - 1] == 0); 5125 *lookup_info = 0;
4996 5126
5127 if (wc->stage == DROP_REFERENCE) {
4997 if (wc->refs[level - 1] > 1) { 5128 if (wc->refs[level - 1] > 1) {
5129 if (level == 1 &&
5130 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5131 goto skip;
5132
4998 if (!wc->update_ref || 5133 if (!wc->update_ref ||
4999 generation <= root->root_key.offset) 5134 generation <= root->root_key.offset)
5000 goto skip; 5135 goto skip;
@@ -5008,12 +5143,17 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
5008 wc->stage = UPDATE_BACKREF; 5143 wc->stage = UPDATE_BACKREF;
5009 wc->shared_level = level - 1; 5144 wc->shared_level = level - 1;
5010 } 5145 }
5146 } else {
5147 if (level == 1 &&
5148 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5149 goto skip;
5011 } 5150 }
5012 5151
5013 if (!btrfs_buffer_uptodate(next, generation)) { 5152 if (!btrfs_buffer_uptodate(next, generation)) {
5014 btrfs_tree_unlock(next); 5153 btrfs_tree_unlock(next);
5015 free_extent_buffer(next); 5154 free_extent_buffer(next);
5016 next = NULL; 5155 next = NULL;
5156 *lookup_info = 1;
5017 } 5157 }
5018 5158
5019 if (!next) { 5159 if (!next) {
@@ -5036,21 +5176,22 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
5036skip: 5176skip:
5037 wc->refs[level - 1] = 0; 5177 wc->refs[level - 1] = 0;
5038 wc->flags[level - 1] = 0; 5178 wc->flags[level - 1] = 0;
5179 if (wc->stage == DROP_REFERENCE) {
5180 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
5181 parent = path->nodes[level]->start;
5182 } else {
5183 BUG_ON(root->root_key.objectid !=
5184 btrfs_header_owner(path->nodes[level]));
5185 parent = 0;
5186 }
5039 5187
5040 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) { 5188 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
5041 parent = path->nodes[level]->start; 5189 root->root_key.objectid, level - 1, 0);
5042 } else { 5190 BUG_ON(ret);
5043 BUG_ON(root->root_key.objectid !=
5044 btrfs_header_owner(path->nodes[level]));
5045 parent = 0;
5046 } 5191 }
5047
5048 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
5049 root->root_key.objectid, level - 1, 0);
5050 BUG_ON(ret);
5051
5052 btrfs_tree_unlock(next); 5192 btrfs_tree_unlock(next);
5053 free_extent_buffer(next); 5193 free_extent_buffer(next);
5194 *lookup_info = 1;
5054 return 1; 5195 return 1;
5055} 5196}
5056 5197
@@ -5164,6 +5305,7 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
5164 struct walk_control *wc) 5305 struct walk_control *wc)
5165{ 5306{
5166 int level = wc->level; 5307 int level = wc->level;
5308 int lookup_info = 1;
5167 int ret; 5309 int ret;
5168 5310
5169 while (level >= 0) { 5311 while (level >= 0) {
@@ -5171,14 +5313,14 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
5171 btrfs_header_nritems(path->nodes[level])) 5313 btrfs_header_nritems(path->nodes[level]))
5172 break; 5314 break;
5173 5315
5174 ret = walk_down_proc(trans, root, path, wc); 5316 ret = walk_down_proc(trans, root, path, wc, lookup_info);
5175 if (ret > 0) 5317 if (ret > 0)
5176 break; 5318 break;
5177 5319
5178 if (level == 0) 5320 if (level == 0)
5179 break; 5321 break;
5180 5322
5181 ret = do_walk_down(trans, root, path, wc); 5323 ret = do_walk_down(trans, root, path, wc, &lookup_info);
5182 if (ret > 0) { 5324 if (ret > 0) {
5183 path->slots[level]++; 5325 path->slots[level]++;
5184 continue; 5326 continue;