aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/inode.c
diff options
context:
space:
mode:
authorJosef Bacik <josef@redhat.com>2010-02-03 14:33:23 -0500
committerChris Mason <chris.mason@oracle.com>2010-03-15 11:00:13 -0400
commit2ac55d41b5d6bf49e76bc85db5431240617e2f8f (patch)
treeee8e2a716ef0b50388ef5e4a86387ec0499bca89 /fs/btrfs/inode.c
parent5a1a3df1f6c86926cfe8657e6f9b4b4c2f467d60 (diff)
Btrfs: cache the extent state everywhere we possibly can V2
This patch just goes through and fixes everybody that does lock_extent() blah unlock_extent() to use lock_extent_bits() blah unlock_extent_cached() and pass around a extent_state so we only have to do the searches once per function. This gives me about a 3 mb/s boots on my random write test. I have not converted some things, like the relocation and ioctl's, since they aren't heavily used and the relocation stuff is in the middle of being re-written. I also changed the clear_extent_bit() to only unset the cached state if we are clearing EXTENT_LOCKED and related stuff, so we can do things like this lock_extent_bits() clear delalloc bits unlock_extent_cached() without losing our cached state. I tested this thoroughly and turned on LEAK_DEBUG to make sure we weren't leaking extent states, everything worked out fine. Signed-off-by: Josef Bacik <josef@redhat.com> Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/inode.c')
-rw-r--r--fs/btrfs/inode.c111
1 files changed, 69 insertions, 42 deletions
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 1824dda1d351..2a337a09c650 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -573,8 +573,8 @@ retry:
573 unsigned long nr_written = 0; 573 unsigned long nr_written = 0;
574 574
575 lock_extent(io_tree, async_extent->start, 575 lock_extent(io_tree, async_extent->start,
576 async_extent->start + 576 async_extent->start +
577 async_extent->ram_size - 1, GFP_NOFS); 577 async_extent->ram_size - 1, GFP_NOFS);
578 578
579 /* allocate blocks */ 579 /* allocate blocks */
580 ret = cow_file_range(inode, async_cow->locked_page, 580 ret = cow_file_range(inode, async_cow->locked_page,
@@ -1512,12 +1512,13 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1512 return 0; 1512 return 0;
1513} 1513}
1514 1514
1515int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end) 1515int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
1516 struct extent_state **cached_state)
1516{ 1517{
1517 if ((end & (PAGE_CACHE_SIZE - 1)) == 0) 1518 if ((end & (PAGE_CACHE_SIZE - 1)) == 0)
1518 WARN_ON(1); 1519 WARN_ON(1);
1519 return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end, 1520 return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1520 GFP_NOFS); 1521 cached_state, GFP_NOFS);
1521} 1522}
1522 1523
1523/* see btrfs_writepage_start_hook for details on why this is required */ 1524/* see btrfs_writepage_start_hook for details on why this is required */
@@ -1530,6 +1531,7 @@ static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
1530{ 1531{
1531 struct btrfs_writepage_fixup *fixup; 1532 struct btrfs_writepage_fixup *fixup;
1532 struct btrfs_ordered_extent *ordered; 1533 struct btrfs_ordered_extent *ordered;
1534 struct extent_state *cached_state = NULL;
1533 struct page *page; 1535 struct page *page;
1534 struct inode *inode; 1536 struct inode *inode;
1535 u64 page_start; 1537 u64 page_start;
@@ -1548,7 +1550,8 @@ again:
1548 page_start = page_offset(page); 1550 page_start = page_offset(page);
1549 page_end = page_offset(page) + PAGE_CACHE_SIZE - 1; 1551 page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
1550 1552
1551 lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS); 1553 lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0,
1554 &cached_state, GFP_NOFS);
1552 1555
1553 /* already ordered? We're done */ 1556 /* already ordered? We're done */
1554 if (PagePrivate2(page)) 1557 if (PagePrivate2(page))
@@ -1556,17 +1559,18 @@ again:
1556 1559
1557 ordered = btrfs_lookup_ordered_extent(inode, page_start); 1560 ordered = btrfs_lookup_ordered_extent(inode, page_start);
1558 if (ordered) { 1561 if (ordered) {
1559 unlock_extent(&BTRFS_I(inode)->io_tree, page_start, 1562 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
1560 page_end, GFP_NOFS); 1563 page_end, &cached_state, GFP_NOFS);
1561 unlock_page(page); 1564 unlock_page(page);
1562 btrfs_start_ordered_extent(inode, ordered, 1); 1565 btrfs_start_ordered_extent(inode, ordered, 1);
1563 goto again; 1566 goto again;
1564 } 1567 }
1565 1568
1566 btrfs_set_extent_delalloc(inode, page_start, page_end); 1569 btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state);
1567 ClearPageChecked(page); 1570 ClearPageChecked(page);
1568out: 1571out:
1569 unlock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS); 1572 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
1573 &cached_state, GFP_NOFS);
1570out_page: 1574out_page:
1571 unlock_page(page); 1575 unlock_page(page);
1572 page_cache_release(page); 1576 page_cache_release(page);
@@ -1695,6 +1699,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1695 struct btrfs_trans_handle *trans; 1699 struct btrfs_trans_handle *trans;
1696 struct btrfs_ordered_extent *ordered_extent = NULL; 1700 struct btrfs_ordered_extent *ordered_extent = NULL;
1697 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 1701 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1702 struct extent_state *cached_state = NULL;
1698 int compressed = 0; 1703 int compressed = 0;
1699 int ret; 1704 int ret;
1700 1705
@@ -1716,9 +1721,9 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1716 goto out; 1721 goto out;
1717 } 1722 }
1718 1723
1719 lock_extent(io_tree, ordered_extent->file_offset, 1724 lock_extent_bits(io_tree, ordered_extent->file_offset,
1720 ordered_extent->file_offset + ordered_extent->len - 1, 1725 ordered_extent->file_offset + ordered_extent->len - 1,
1721 GFP_NOFS); 1726 0, &cached_state, GFP_NOFS);
1722 1727
1723 trans = btrfs_join_transaction(root, 1); 1728 trans = btrfs_join_transaction(root, 1);
1724 1729
@@ -1745,9 +1750,10 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1745 ordered_extent->len); 1750 ordered_extent->len);
1746 BUG_ON(ret); 1751 BUG_ON(ret);
1747 } 1752 }
1748 unlock_extent(io_tree, ordered_extent->file_offset, 1753 unlock_extent_cached(io_tree, ordered_extent->file_offset,
1749 ordered_extent->file_offset + ordered_extent->len - 1, 1754 ordered_extent->file_offset +
1750 GFP_NOFS); 1755 ordered_extent->len - 1, &cached_state, GFP_NOFS);
1756
1751 add_pending_csums(trans, inode, ordered_extent->file_offset, 1757 add_pending_csums(trans, inode, ordered_extent->file_offset,
1752 &ordered_extent->list); 1758 &ordered_extent->list);
1753 1759
@@ -3084,6 +3090,7 @@ static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
3084 struct btrfs_root *root = BTRFS_I(inode)->root; 3090 struct btrfs_root *root = BTRFS_I(inode)->root;
3085 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 3091 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3086 struct btrfs_ordered_extent *ordered; 3092 struct btrfs_ordered_extent *ordered;
3093 struct extent_state *cached_state = NULL;
3087 char *kaddr; 3094 char *kaddr;
3088 u32 blocksize = root->sectorsize; 3095 u32 blocksize = root->sectorsize;
3089 pgoff_t index = from >> PAGE_CACHE_SHIFT; 3096 pgoff_t index = from >> PAGE_CACHE_SHIFT;
@@ -3130,12 +3137,14 @@ again:
3130 } 3137 }
3131 wait_on_page_writeback(page); 3138 wait_on_page_writeback(page);
3132 3139
3133 lock_extent(io_tree, page_start, page_end, GFP_NOFS); 3140 lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state,
3141 GFP_NOFS);
3134 set_page_extent_mapped(page); 3142 set_page_extent_mapped(page);
3135 3143
3136 ordered = btrfs_lookup_ordered_extent(inode, page_start); 3144 ordered = btrfs_lookup_ordered_extent(inode, page_start);
3137 if (ordered) { 3145 if (ordered) {
3138 unlock_extent(io_tree, page_start, page_end, GFP_NOFS); 3146 unlock_extent_cached(io_tree, page_start, page_end,
3147 &cached_state, GFP_NOFS);
3139 unlock_page(page); 3148 unlock_page(page);
3140 page_cache_release(page); 3149 page_cache_release(page);
3141 btrfs_start_ordered_extent(inode, ordered, 1); 3150 btrfs_start_ordered_extent(inode, ordered, 1);
@@ -3143,13 +3152,15 @@ again:
3143 goto again; 3152 goto again;
3144 } 3153 }
3145 3154
3146 clear_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 3155 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
3147 EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING, 3156 EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
3148 GFP_NOFS); 3157 0, 0, &cached_state, GFP_NOFS);
3149 3158
3150 ret = btrfs_set_extent_delalloc(inode, page_start, page_end); 3159 ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
3160 &cached_state);
3151 if (ret) { 3161 if (ret) {
3152 unlock_extent(io_tree, page_start, page_end, GFP_NOFS); 3162 unlock_extent_cached(io_tree, page_start, page_end,
3163 &cached_state, GFP_NOFS);
3153 goto out_unlock; 3164 goto out_unlock;
3154 } 3165 }
3155 3166
@@ -3162,7 +3173,8 @@ again:
3162 } 3173 }
3163 ClearPageChecked(page); 3174 ClearPageChecked(page);
3164 set_page_dirty(page); 3175 set_page_dirty(page);
3165 unlock_extent(io_tree, page_start, page_end, GFP_NOFS); 3176 unlock_extent_cached(io_tree, page_start, page_end, &cached_state,
3177 GFP_NOFS);
3166 3178
3167out_unlock: 3179out_unlock:
3168 if (ret) 3180 if (ret)
@@ -3180,6 +3192,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t size)
3180 struct btrfs_root *root = BTRFS_I(inode)->root; 3192 struct btrfs_root *root = BTRFS_I(inode)->root;
3181 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 3193 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3182 struct extent_map *em; 3194 struct extent_map *em;
3195 struct extent_state *cached_state = NULL;
3183 u64 mask = root->sectorsize - 1; 3196 u64 mask = root->sectorsize - 1;
3184 u64 hole_start = (inode->i_size + mask) & ~mask; 3197 u64 hole_start = (inode->i_size + mask) & ~mask;
3185 u64 block_end = (size + mask) & ~mask; 3198 u64 block_end = (size + mask) & ~mask;
@@ -3195,11 +3208,13 @@ int btrfs_cont_expand(struct inode *inode, loff_t size)
3195 struct btrfs_ordered_extent *ordered; 3208 struct btrfs_ordered_extent *ordered;
3196 btrfs_wait_ordered_range(inode, hole_start, 3209 btrfs_wait_ordered_range(inode, hole_start,
3197 block_end - hole_start); 3210 block_end - hole_start);
3198 lock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS); 3211 lock_extent_bits(io_tree, hole_start, block_end - 1, 0,
3212 &cached_state, GFP_NOFS);
3199 ordered = btrfs_lookup_ordered_extent(inode, hole_start); 3213 ordered = btrfs_lookup_ordered_extent(inode, hole_start);
3200 if (!ordered) 3214 if (!ordered)
3201 break; 3215 break;
3202 unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS); 3216 unlock_extent_cached(io_tree, hole_start, block_end - 1,
3217 &cached_state, GFP_NOFS);
3203 btrfs_put_ordered_extent(ordered); 3218 btrfs_put_ordered_extent(ordered);
3204 } 3219 }
3205 3220
@@ -3244,7 +3259,8 @@ int btrfs_cont_expand(struct inode *inode, loff_t size)
3244 break; 3259 break;
3245 } 3260 }
3246 3261
3247 unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS); 3262 unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state,
3263 GFP_NOFS);
3248 return err; 3264 return err;
3249} 3265}
3250 3266
@@ -4985,6 +5001,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)
4985{ 5001{
4986 struct extent_io_tree *tree; 5002 struct extent_io_tree *tree;
4987 struct btrfs_ordered_extent *ordered; 5003 struct btrfs_ordered_extent *ordered;
5004 struct extent_state *cached_state = NULL;
4988 u64 page_start = page_offset(page); 5005 u64 page_start = page_offset(page);
4989 u64 page_end = page_start + PAGE_CACHE_SIZE - 1; 5006 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
4990 5007
@@ -5003,7 +5020,8 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)
5003 btrfs_releasepage(page, GFP_NOFS); 5020 btrfs_releasepage(page, GFP_NOFS);
5004 return; 5021 return;
5005 } 5022 }
5006 lock_extent(tree, page_start, page_end, GFP_NOFS); 5023 lock_extent_bits(tree, page_start, page_end, 0, &cached_state,
5024 GFP_NOFS);
5007 ordered = btrfs_lookup_ordered_extent(page->mapping->host, 5025 ordered = btrfs_lookup_ordered_extent(page->mapping->host,
5008 page_offset(page)); 5026 page_offset(page));
5009 if (ordered) { 5027 if (ordered) {
@@ -5014,7 +5032,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)
5014 clear_extent_bit(tree, page_start, page_end, 5032 clear_extent_bit(tree, page_start, page_end,
5015 EXTENT_DIRTY | EXTENT_DELALLOC | 5033 EXTENT_DIRTY | EXTENT_DELALLOC |
5016 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING, 1, 0, 5034 EXTENT_LOCKED | EXTENT_DO_ACCOUNTING, 1, 0,
5017 NULL, GFP_NOFS); 5035 &cached_state, GFP_NOFS);
5018 /* 5036 /*
5019 * whoever cleared the private bit is responsible 5037 * whoever cleared the private bit is responsible
5020 * for the finish_ordered_io 5038 * for the finish_ordered_io
@@ -5024,11 +5042,13 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)
5024 page_start, page_end); 5042 page_start, page_end);
5025 } 5043 }
5026 btrfs_put_ordered_extent(ordered); 5044 btrfs_put_ordered_extent(ordered);
5027 lock_extent(tree, page_start, page_end, GFP_NOFS); 5045 cached_state = NULL;
5046 lock_extent_bits(tree, page_start, page_end, 0, &cached_state,
5047 GFP_NOFS);
5028 } 5048 }
5029 clear_extent_bit(tree, page_start, page_end, 5049 clear_extent_bit(tree, page_start, page_end,
5030 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | 5050 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
5031 EXTENT_DO_ACCOUNTING, 1, 1, NULL, GFP_NOFS); 5051 EXTENT_DO_ACCOUNTING, 1, 1, &cached_state, GFP_NOFS);
5032 __btrfs_releasepage(page, GFP_NOFS); 5052 __btrfs_releasepage(page, GFP_NOFS);
5033 5053
5034 ClearPageChecked(page); 5054 ClearPageChecked(page);
@@ -5061,6 +5081,7 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
5061 struct btrfs_root *root = BTRFS_I(inode)->root; 5081 struct btrfs_root *root = BTRFS_I(inode)->root;
5062 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 5082 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5063 struct btrfs_ordered_extent *ordered; 5083 struct btrfs_ordered_extent *ordered;
5084 struct extent_state *cached_state = NULL;
5064 char *kaddr; 5085 char *kaddr;
5065 unsigned long zero_start; 5086 unsigned long zero_start;
5066 loff_t size; 5087 loff_t size;
@@ -5099,7 +5120,8 @@ again:
5099 } 5120 }
5100 wait_on_page_writeback(page); 5121 wait_on_page_writeback(page);
5101 5122
5102 lock_extent(io_tree, page_start, page_end, GFP_NOFS); 5123 lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state,
5124 GFP_NOFS);
5103 set_page_extent_mapped(page); 5125 set_page_extent_mapped(page);
5104 5126
5105 /* 5127 /*
@@ -5108,7 +5130,8 @@ again:
5108 */ 5130 */
5109 ordered = btrfs_lookup_ordered_extent(inode, page_start); 5131 ordered = btrfs_lookup_ordered_extent(inode, page_start);
5110 if (ordered) { 5132 if (ordered) {
5111 unlock_extent(io_tree, page_start, page_end, GFP_NOFS); 5133 unlock_extent_cached(io_tree, page_start, page_end,
5134 &cached_state, GFP_NOFS);
5112 unlock_page(page); 5135 unlock_page(page);
5113 btrfs_start_ordered_extent(inode, ordered, 1); 5136 btrfs_start_ordered_extent(inode, ordered, 1);
5114 btrfs_put_ordered_extent(ordered); 5137 btrfs_put_ordered_extent(ordered);
@@ -5122,13 +5145,15 @@ again:
5122 * is probably a better way to do this, but for now keep consistent with 5145 * is probably a better way to do this, but for now keep consistent with
5123 * prepare_pages in the normal write path. 5146 * prepare_pages in the normal write path.
5124 */ 5147 */
5125 clear_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 5148 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
5126 EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING, 5149 EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING,
5127 GFP_NOFS); 5150 0, 0, &cached_state, GFP_NOFS);
5128 5151
5129 ret = btrfs_set_extent_delalloc(inode, page_start, page_end); 5152 ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
5153 &cached_state);
5130 if (ret) { 5154 if (ret) {
5131 unlock_extent(io_tree, page_start, page_end, GFP_NOFS); 5155 unlock_extent_cached(io_tree, page_start, page_end,
5156 &cached_state, GFP_NOFS);
5132 ret = VM_FAULT_SIGBUS; 5157 ret = VM_FAULT_SIGBUS;
5133 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE); 5158 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
5134 goto out_unlock; 5159 goto out_unlock;
@@ -5154,7 +5179,7 @@ again:
5154 BTRFS_I(inode)->last_trans = root->fs_info->generation; 5179 BTRFS_I(inode)->last_trans = root->fs_info->generation;
5155 BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid; 5180 BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
5156 5181
5157 unlock_extent(io_tree, page_start, page_end, GFP_NOFS); 5182 unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS);
5158 5183
5159out_unlock: 5184out_unlock:
5160 btrfs_unreserve_metadata_for_delalloc(root, inode, 1); 5185 btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
@@ -5833,6 +5858,7 @@ stop_trans:
5833static long btrfs_fallocate(struct inode *inode, int mode, 5858static long btrfs_fallocate(struct inode *inode, int mode,
5834 loff_t offset, loff_t len) 5859 loff_t offset, loff_t len)
5835{ 5860{
5861 struct extent_state *cached_state = NULL;
5836 u64 cur_offset; 5862 u64 cur_offset;
5837 u64 last_byte; 5863 u64 last_byte;
5838 u64 alloc_start; 5864 u64 alloc_start;
@@ -5871,16 +5897,17 @@ static long btrfs_fallocate(struct inode *inode, int mode,
5871 /* the extent lock is ordered inside the running 5897 /* the extent lock is ordered inside the running
5872 * transaction 5898 * transaction
5873 */ 5899 */
5874 lock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end, 5900 lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
5875 GFP_NOFS); 5901 locked_end, 0, &cached_state, GFP_NOFS);
5876 ordered = btrfs_lookup_first_ordered_extent(inode, 5902 ordered = btrfs_lookup_first_ordered_extent(inode,
5877 alloc_end - 1); 5903 alloc_end - 1);
5878 if (ordered && 5904 if (ordered &&
5879 ordered->file_offset + ordered->len > alloc_start && 5905 ordered->file_offset + ordered->len > alloc_start &&
5880 ordered->file_offset < alloc_end) { 5906 ordered->file_offset < alloc_end) {
5881 btrfs_put_ordered_extent(ordered); 5907 btrfs_put_ordered_extent(ordered);
5882 unlock_extent(&BTRFS_I(inode)->io_tree, 5908 unlock_extent_cached(&BTRFS_I(inode)->io_tree,
5883 alloc_start, locked_end, GFP_NOFS); 5909 alloc_start, locked_end,
5910 &cached_state, GFP_NOFS);
5884 /* 5911 /*
5885 * we can't wait on the range with the transaction 5912 * we can't wait on the range with the transaction
5886 * running or with the extent lock held 5913 * running or with the extent lock held
@@ -5922,8 +5949,8 @@ static long btrfs_fallocate(struct inode *inode, int mode,
5922 break; 5949 break;
5923 } 5950 }
5924 } 5951 }
5925 unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end, 5952 unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
5926 GFP_NOFS); 5953 &cached_state, GFP_NOFS);
5927 5954
5928 btrfs_free_reserved_data_space(BTRFS_I(inode)->root, inode, 5955 btrfs_free_reserved_data_space(BTRFS_I(inode)->root, inode,
5929 alloc_end - alloc_start); 5956 alloc_end - alloc_start);