summaryrefslogtreecommitdiffstats
path: root/fs/btrfs/file.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/file.c')
-rw-r--r--fs/btrfs/file.c139
1 files changed, 73 insertions, 66 deletions
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index c1d2a07205da..520cb7230b2d 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -92,10 +92,10 @@ static int __compare_inode_defrag(struct inode_defrag *defrag1,
92 * If an existing record is found the defrag item you 92 * If an existing record is found the defrag item you
93 * pass in is freed 93 * pass in is freed
94 */ 94 */
95static int __btrfs_add_inode_defrag(struct inode *inode, 95static int __btrfs_add_inode_defrag(struct btrfs_inode *inode,
96 struct inode_defrag *defrag) 96 struct inode_defrag *defrag)
97{ 97{
98 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 98 struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
99 struct inode_defrag *entry; 99 struct inode_defrag *entry;
100 struct rb_node **p; 100 struct rb_node **p;
101 struct rb_node *parent = NULL; 101 struct rb_node *parent = NULL;
@@ -123,7 +123,7 @@ static int __btrfs_add_inode_defrag(struct inode *inode,
123 return -EEXIST; 123 return -EEXIST;
124 } 124 }
125 } 125 }
126 set_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags); 126 set_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags);
127 rb_link_node(&defrag->rb_node, parent, p); 127 rb_link_node(&defrag->rb_node, parent, p);
128 rb_insert_color(&defrag->rb_node, &fs_info->defrag_inodes); 128 rb_insert_color(&defrag->rb_node, &fs_info->defrag_inodes);
129 return 0; 129 return 0;
@@ -145,10 +145,10 @@ static inline int __need_auto_defrag(struct btrfs_fs_info *fs_info)
145 * enabled 145 * enabled
146 */ 146 */
147int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, 147int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
148 struct inode *inode) 148 struct btrfs_inode *inode)
149{ 149{
150 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 150 struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
151 struct btrfs_root *root = BTRFS_I(inode)->root; 151 struct btrfs_root *root = inode->root;
152 struct inode_defrag *defrag; 152 struct inode_defrag *defrag;
153 u64 transid; 153 u64 transid;
154 int ret; 154 int ret;
@@ -156,24 +156,24 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
156 if (!__need_auto_defrag(fs_info)) 156 if (!__need_auto_defrag(fs_info))
157 return 0; 157 return 0;
158 158
159 if (test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags)) 159 if (test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags))
160 return 0; 160 return 0;
161 161
162 if (trans) 162 if (trans)
163 transid = trans->transid; 163 transid = trans->transid;
164 else 164 else
165 transid = BTRFS_I(inode)->root->last_trans; 165 transid = inode->root->last_trans;
166 166
167 defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS); 167 defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS);
168 if (!defrag) 168 if (!defrag)
169 return -ENOMEM; 169 return -ENOMEM;
170 170
171 defrag->ino = btrfs_ino(BTRFS_I(inode)); 171 defrag->ino = btrfs_ino(inode);
172 defrag->transid = transid; 172 defrag->transid = transid;
173 defrag->root = root->root_key.objectid; 173 defrag->root = root->root_key.objectid;
174 174
175 spin_lock(&fs_info->defrag_inodes_lock); 175 spin_lock(&fs_info->defrag_inodes_lock);
176 if (!test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags)) { 176 if (!test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) {
177 /* 177 /*
178 * If we set IN_DEFRAG flag and evict the inode from memory, 178 * If we set IN_DEFRAG flag and evict the inode from memory,
179 * and then re-read this inode, this new inode doesn't have 179 * and then re-read this inode, this new inode doesn't have
@@ -194,10 +194,10 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
194 * the same inode in the tree, we will merge them together (by 194 * the same inode in the tree, we will merge them together (by
195 * __btrfs_add_inode_defrag()) and free the one that we want to requeue. 195 * __btrfs_add_inode_defrag()) and free the one that we want to requeue.
196 */ 196 */
197static void btrfs_requeue_inode_defrag(struct inode *inode, 197static void btrfs_requeue_inode_defrag(struct btrfs_inode *inode,
198 struct inode_defrag *defrag) 198 struct inode_defrag *defrag)
199{ 199{
200 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 200 struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
201 int ret; 201 int ret;
202 202
203 if (!__need_auto_defrag(fs_info)) 203 if (!__need_auto_defrag(fs_info))
@@ -334,7 +334,7 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
334 */ 334 */
335 if (num_defrag == BTRFS_DEFRAG_BATCH) { 335 if (num_defrag == BTRFS_DEFRAG_BATCH) {
336 defrag->last_offset = range.start; 336 defrag->last_offset = range.start;
337 btrfs_requeue_inode_defrag(inode, defrag); 337 btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag);
338 } else if (defrag->last_offset && !defrag->cycled) { 338 } else if (defrag->last_offset && !defrag->cycled) {
339 /* 339 /*
340 * we didn't fill our defrag batch, but 340 * we didn't fill our defrag batch, but
@@ -343,7 +343,7 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
343 */ 343 */
344 defrag->last_offset = 0; 344 defrag->last_offset = 0;
345 defrag->cycled = 1; 345 defrag->cycled = 1;
346 btrfs_requeue_inode_defrag(inode, defrag); 346 btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag);
347 } else { 347 } else {
348 kmem_cache_free(btrfs_inode_defrag_cachep, defrag); 348 kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
349 } 349 }
@@ -529,13 +529,13 @@ int btrfs_dirty_pages(struct inode *inode, struct page **pages,
529 * this drops all the extents in the cache that intersect the range 529 * this drops all the extents in the cache that intersect the range
530 * [start, end]. Existing extents are split as required. 530 * [start, end]. Existing extents are split as required.
531 */ 531 */
532void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, 532void btrfs_drop_extent_cache(struct btrfs_inode *inode, u64 start, u64 end,
533 int skip_pinned) 533 int skip_pinned)
534{ 534{
535 struct extent_map *em; 535 struct extent_map *em;
536 struct extent_map *split = NULL; 536 struct extent_map *split = NULL;
537 struct extent_map *split2 = NULL; 537 struct extent_map *split2 = NULL;
538 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 538 struct extent_map_tree *em_tree = &inode->extent_tree;
539 u64 len = end - start + 1; 539 u64 len = end - start + 1;
540 u64 gen; 540 u64 gen;
541 int ret; 541 int ret;
@@ -720,7 +720,7 @@ int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
720 int leafs_visited = 0; 720 int leafs_visited = 0;
721 721
722 if (drop_cache) 722 if (drop_cache)
723 btrfs_drop_extent_cache(inode, start, end - 1, 0); 723 btrfs_drop_extent_cache(BTRFS_I(inode), start, end - 1, 0);
724 724
725 if (start >= BTRFS_I(inode)->disk_i_size && !replace_extent) 725 if (start >= BTRFS_I(inode)->disk_i_size && !replace_extent)
726 modify_tree = 0; 726 modify_tree = 0;
@@ -1082,10 +1082,10 @@ static int extent_mergeable(struct extent_buffer *leaf, int slot,
1082 * two or three. 1082 * two or three.
1083 */ 1083 */
1084int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, 1084int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
1085 struct inode *inode, u64 start, u64 end) 1085 struct btrfs_inode *inode, u64 start, u64 end)
1086{ 1086{
1087 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 1087 struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
1088 struct btrfs_root *root = BTRFS_I(inode)->root; 1088 struct btrfs_root *root = inode->root;
1089 struct extent_buffer *leaf; 1089 struct extent_buffer *leaf;
1090 struct btrfs_path *path; 1090 struct btrfs_path *path;
1091 struct btrfs_file_extent_item *fi; 1091 struct btrfs_file_extent_item *fi;
@@ -1102,7 +1102,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
1102 int del_slot = 0; 1102 int del_slot = 0;
1103 int recow; 1103 int recow;
1104 int ret; 1104 int ret;
1105 u64 ino = btrfs_ino(BTRFS_I(inode)); 1105 u64 ino = btrfs_ino(inode);
1106 1106
1107 path = btrfs_alloc_path(); 1107 path = btrfs_alloc_path();
1108 if (!path) 1108 if (!path)
@@ -1415,13 +1415,13 @@ fail:
1415 * the other < 0 number - Something wrong happens 1415 * the other < 0 number - Something wrong happens
1416 */ 1416 */
1417static noinline int 1417static noinline int
1418lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages, 1418lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
1419 size_t num_pages, loff_t pos, 1419 size_t num_pages, loff_t pos,
1420 size_t write_bytes, 1420 size_t write_bytes,
1421 u64 *lockstart, u64 *lockend, 1421 u64 *lockstart, u64 *lockend,
1422 struct extent_state **cached_state) 1422 struct extent_state **cached_state)
1423{ 1423{
1424 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 1424 struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
1425 u64 start_pos; 1425 u64 start_pos;
1426 u64 last_pos; 1426 u64 last_pos;
1427 int i; 1427 int i;
@@ -1432,30 +1432,30 @@ lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages,
1432 + round_up(pos + write_bytes - start_pos, 1432 + round_up(pos + write_bytes - start_pos,
1433 fs_info->sectorsize) - 1; 1433 fs_info->sectorsize) - 1;
1434 1434
1435 if (start_pos < inode->i_size) { 1435 if (start_pos < inode->vfs_inode.i_size) {
1436 struct btrfs_ordered_extent *ordered; 1436 struct btrfs_ordered_extent *ordered;
1437 lock_extent_bits(&BTRFS_I(inode)->io_tree, 1437 lock_extent_bits(&inode->io_tree, start_pos, last_pos,
1438 start_pos, last_pos, cached_state); 1438 cached_state);
1439 ordered = btrfs_lookup_ordered_range(inode, start_pos, 1439 ordered = btrfs_lookup_ordered_range(inode, start_pos,
1440 last_pos - start_pos + 1); 1440 last_pos - start_pos + 1);
1441 if (ordered && 1441 if (ordered &&
1442 ordered->file_offset + ordered->len > start_pos && 1442 ordered->file_offset + ordered->len > start_pos &&
1443 ordered->file_offset <= last_pos) { 1443 ordered->file_offset <= last_pos) {
1444 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 1444 unlock_extent_cached(&inode->io_tree, start_pos,
1445 start_pos, last_pos, 1445 last_pos, cached_state, GFP_NOFS);
1446 cached_state, GFP_NOFS);
1447 for (i = 0; i < num_pages; i++) { 1446 for (i = 0; i < num_pages; i++) {
1448 unlock_page(pages[i]); 1447 unlock_page(pages[i]);
1449 put_page(pages[i]); 1448 put_page(pages[i]);
1450 } 1449 }
1451 btrfs_start_ordered_extent(inode, ordered, 1); 1450 btrfs_start_ordered_extent(&inode->vfs_inode,
1451 ordered, 1);
1452 btrfs_put_ordered_extent(ordered); 1452 btrfs_put_ordered_extent(ordered);
1453 return -EAGAIN; 1453 return -EAGAIN;
1454 } 1454 }
1455 if (ordered) 1455 if (ordered)
1456 btrfs_put_ordered_extent(ordered); 1456 btrfs_put_ordered_extent(ordered);
1457 1457
1458 clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos, 1458 clear_extent_bit(&inode->io_tree, start_pos,
1459 last_pos, EXTENT_DIRTY | EXTENT_DELALLOC | 1459 last_pos, EXTENT_DIRTY | EXTENT_DELALLOC |
1460 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 1460 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
1461 0, 0, cached_state, GFP_NOFS); 1461 0, 0, cached_state, GFP_NOFS);
@@ -1474,11 +1474,11 @@ lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages,
1474 return ret; 1474 return ret;
1475} 1475}
1476 1476
1477static noinline int check_can_nocow(struct inode *inode, loff_t pos, 1477static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos,
1478 size_t *write_bytes) 1478 size_t *write_bytes)
1479{ 1479{
1480 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 1480 struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
1481 struct btrfs_root *root = BTRFS_I(inode)->root; 1481 struct btrfs_root *root = inode->root;
1482 struct btrfs_ordered_extent *ordered; 1482 struct btrfs_ordered_extent *ordered;
1483 u64 lockstart, lockend; 1483 u64 lockstart, lockend;
1484 u64 num_bytes; 1484 u64 num_bytes;
@@ -1493,19 +1493,20 @@ static noinline int check_can_nocow(struct inode *inode, loff_t pos,
1493 fs_info->sectorsize) - 1; 1493 fs_info->sectorsize) - 1;
1494 1494
1495 while (1) { 1495 while (1) {
1496 lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend); 1496 lock_extent(&inode->io_tree, lockstart, lockend);
1497 ordered = btrfs_lookup_ordered_range(inode, lockstart, 1497 ordered = btrfs_lookup_ordered_range(inode, lockstart,
1498 lockend - lockstart + 1); 1498 lockend - lockstart + 1);
1499 if (!ordered) { 1499 if (!ordered) {
1500 break; 1500 break;
1501 } 1501 }
1502 unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend); 1502 unlock_extent(&inode->io_tree, lockstart, lockend);
1503 btrfs_start_ordered_extent(inode, ordered, 1); 1503 btrfs_start_ordered_extent(&inode->vfs_inode, ordered, 1);
1504 btrfs_put_ordered_extent(ordered); 1504 btrfs_put_ordered_extent(ordered);
1505 } 1505 }
1506 1506
1507 num_bytes = lockend - lockstart + 1; 1507 num_bytes = lockend - lockstart + 1;
1508 ret = can_nocow_extent(inode, lockstart, &num_bytes, NULL, NULL, NULL); 1508 ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes,
1509 NULL, NULL, NULL);
1509 if (ret <= 0) { 1510 if (ret <= 0) {
1510 ret = 0; 1511 ret = 0;
1511 btrfs_end_write_no_snapshoting(root); 1512 btrfs_end_write_no_snapshoting(root);
@@ -1514,7 +1515,7 @@ static noinline int check_can_nocow(struct inode *inode, loff_t pos,
1514 num_bytes - pos + lockstart); 1515 num_bytes - pos + lockstart);
1515 } 1516 }
1516 1517
1517 unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend); 1518 unlock_extent(&inode->io_tree, lockstart, lockend);
1518 1519
1519 return ret; 1520 return ret;
1520} 1521}
@@ -1579,7 +1580,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
1579 if (ret < 0) { 1580 if (ret < 0) {
1580 if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW | 1581 if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
1581 BTRFS_INODE_PREALLOC)) && 1582 BTRFS_INODE_PREALLOC)) &&
1582 check_can_nocow(inode, pos, &write_bytes) > 0) { 1583 check_can_nocow(BTRFS_I(inode), pos,
1584 &write_bytes) > 0) {
1583 /* 1585 /*
1584 * For nodata cow case, no need to reserve 1586 * For nodata cow case, no need to reserve
1585 * data space. 1587 * data space.
@@ -1599,7 +1601,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
1599 } 1601 }
1600 } 1602 }
1601 1603
1602 ret = btrfs_delalloc_reserve_metadata(inode, reserve_bytes); 1604 ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
1605 reserve_bytes);
1603 if (ret) { 1606 if (ret) {
1604 if (!only_release_metadata) 1607 if (!only_release_metadata)
1605 btrfs_free_reserved_data_space(inode, pos, 1608 btrfs_free_reserved_data_space(inode, pos,
@@ -1623,9 +1626,9 @@ again:
1623 if (ret) 1626 if (ret)
1624 break; 1627 break;
1625 1628
1626 ret = lock_and_cleanup_extent_if_need(inode, pages, num_pages, 1629 ret = lock_and_cleanup_extent_if_need(BTRFS_I(inode), pages,
1627 pos, write_bytes, &lockstart, 1630 num_pages, pos, write_bytes, &lockstart,
1628 &lockend, &cached_state); 1631 &lockend, &cached_state);
1629 if (ret < 0) { 1632 if (ret < 0) {
1630 if (ret == -EAGAIN) 1633 if (ret == -EAGAIN)
1631 goto again; 1634 goto again;
@@ -1677,7 +1680,7 @@ again:
1677 spin_unlock(&BTRFS_I(inode)->lock); 1680 spin_unlock(&BTRFS_I(inode)->lock);
1678 } 1681 }
1679 if (only_release_metadata) { 1682 if (only_release_metadata) {
1680 btrfs_delalloc_release_metadata(inode, 1683 btrfs_delalloc_release_metadata(BTRFS_I(inode),
1681 release_bytes); 1684 release_bytes);
1682 } else { 1685 } else {
1683 u64 __pos; 1686 u64 __pos;
@@ -1738,7 +1741,8 @@ again:
1738 if (release_bytes) { 1741 if (release_bytes) {
1739 if (only_release_metadata) { 1742 if (only_release_metadata) {
1740 btrfs_end_write_no_snapshoting(root); 1743 btrfs_end_write_no_snapshoting(root);
1741 btrfs_delalloc_release_metadata(inode, release_bytes); 1744 btrfs_delalloc_release_metadata(BTRFS_I(inode),
1745 release_bytes);
1742 } else { 1746 } else {
1743 btrfs_delalloc_release_space(inode, 1747 btrfs_delalloc_release_space(inode,
1744 round_down(pos, fs_info->sectorsize), 1748 round_down(pos, fs_info->sectorsize),
@@ -2193,7 +2197,7 @@ static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
2193 return 0; 2197 return 0;
2194} 2198}
2195 2199
2196static int hole_mergeable(struct inode *inode, struct extent_buffer *leaf, 2200static int hole_mergeable(struct btrfs_inode *inode, struct extent_buffer *leaf,
2197 int slot, u64 start, u64 end) 2201 int slot, u64 start, u64 end)
2198{ 2202{
2199 struct btrfs_file_extent_item *fi; 2203 struct btrfs_file_extent_item *fi;
@@ -2203,7 +2207,7 @@ static int hole_mergeable(struct inode *inode, struct extent_buffer *leaf,
2203 return 0; 2207 return 0;
2204 2208
2205 btrfs_item_key_to_cpu(leaf, &key, slot); 2209 btrfs_item_key_to_cpu(leaf, &key, slot);
2206 if (key.objectid != btrfs_ino(BTRFS_I(inode)) || 2210 if (key.objectid != btrfs_ino(inode) ||
2207 key.type != BTRFS_EXTENT_DATA_KEY) 2211 key.type != BTRFS_EXTENT_DATA_KEY)
2208 return 0; 2212 return 0;
2209 2213
@@ -2222,22 +2226,23 @@ static int hole_mergeable(struct inode *inode, struct extent_buffer *leaf,
2222 return 0; 2226 return 0;
2223} 2227}
2224 2228
2225static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode, 2229static int fill_holes(struct btrfs_trans_handle *trans,
2226 struct btrfs_path *path, u64 offset, u64 end) 2230 struct btrfs_inode *inode,
2231 struct btrfs_path *path, u64 offset, u64 end)
2227{ 2232{
2228 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 2233 struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
2229 struct btrfs_root *root = BTRFS_I(inode)->root; 2234 struct btrfs_root *root = inode->root;
2230 struct extent_buffer *leaf; 2235 struct extent_buffer *leaf;
2231 struct btrfs_file_extent_item *fi; 2236 struct btrfs_file_extent_item *fi;
2232 struct extent_map *hole_em; 2237 struct extent_map *hole_em;
2233 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 2238 struct extent_map_tree *em_tree = &inode->extent_tree;
2234 struct btrfs_key key; 2239 struct btrfs_key key;
2235 int ret; 2240 int ret;
2236 2241
2237 if (btrfs_fs_incompat(fs_info, NO_HOLES)) 2242 if (btrfs_fs_incompat(fs_info, NO_HOLES))
2238 goto out; 2243 goto out;
2239 2244
2240 key.objectid = btrfs_ino(BTRFS_I(inode)); 2245 key.objectid = btrfs_ino(inode);
2241 key.type = BTRFS_EXTENT_DATA_KEY; 2246 key.type = BTRFS_EXTENT_DATA_KEY;
2242 key.offset = offset; 2247 key.offset = offset;
2243 2248
@@ -2253,7 +2258,7 @@ static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode,
2253 } 2258 }
2254 2259
2255 leaf = path->nodes[0]; 2260 leaf = path->nodes[0];
2256 if (hole_mergeable(inode, leaf, path->slots[0]-1, offset, end)) { 2261 if (hole_mergeable(inode, leaf, path->slots[0] - 1, offset, end)) {
2257 u64 num_bytes; 2262 u64 num_bytes;
2258 2263
2259 path->slots[0]--; 2264 path->slots[0]--;
@@ -2285,7 +2290,7 @@ static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode,
2285 } 2290 }
2286 btrfs_release_path(path); 2291 btrfs_release_path(path);
2287 2292
2288 ret = btrfs_insert_file_extent(trans, root, btrfs_ino(BTRFS_I(inode)), 2293 ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode),
2289 offset, 0, 0, end - offset, 0, end - offset, 0, 0, 0); 2294 offset, 0, 0, end - offset, 0, end - offset, 0, 0, 0);
2290 if (ret) 2295 if (ret)
2291 return ret; 2296 return ret;
@@ -2296,8 +2301,7 @@ out:
2296 hole_em = alloc_extent_map(); 2301 hole_em = alloc_extent_map();
2297 if (!hole_em) { 2302 if (!hole_em) {
2298 btrfs_drop_extent_cache(inode, offset, end - 1, 0); 2303 btrfs_drop_extent_cache(inode, offset, end - 1, 0);
2299 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 2304 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
2300 &BTRFS_I(inode)->runtime_flags);
2301 } else { 2305 } else {
2302 hole_em->start = offset; 2306 hole_em->start = offset;
2303 hole_em->len = end - offset; 2307 hole_em->len = end - offset;
@@ -2320,7 +2324,7 @@ out:
2320 free_extent_map(hole_em); 2324 free_extent_map(hole_em);
2321 if (ret) 2325 if (ret)
2322 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 2326 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2323 &BTRFS_I(inode)->runtime_flags); 2327 &inode->runtime_flags);
2324 } 2328 }
2325 2329
2326 return 0; 2330 return 0;
@@ -2337,7 +2341,7 @@ static int find_first_non_hole(struct inode *inode, u64 *start, u64 *len)
2337 struct extent_map *em; 2341 struct extent_map *em;
2338 int ret = 0; 2342 int ret = 0;
2339 2343
2340 em = btrfs_get_extent(inode, NULL, 0, *start, *len, 0); 2344 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, *start, *len, 0);
2341 if (IS_ERR_OR_NULL(em)) { 2345 if (IS_ERR_OR_NULL(em)) {
2342 if (!em) 2346 if (!em)
2343 ret = -ENOMEM; 2347 ret = -ENOMEM;
@@ -2550,8 +2554,8 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
2550 trans->block_rsv = &fs_info->trans_block_rsv; 2554 trans->block_rsv = &fs_info->trans_block_rsv;
2551 2555
2552 if (cur_offset < drop_end && cur_offset < ino_size) { 2556 if (cur_offset < drop_end && cur_offset < ino_size) {
2553 ret = fill_holes(trans, inode, path, cur_offset, 2557 ret = fill_holes(trans, BTRFS_I(inode), path,
2554 drop_end); 2558 cur_offset, drop_end);
2555 if (ret) { 2559 if (ret) {
2556 /* 2560 /*
2557 * If we failed then we didn't insert our hole 2561 * If we failed then we didn't insert our hole
@@ -2622,7 +2626,8 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
2622 * cur_offset == drop_end). 2626 * cur_offset == drop_end).
2623 */ 2627 */
2624 if (cur_offset < ino_size && cur_offset < drop_end) { 2628 if (cur_offset < ino_size && cur_offset < drop_end) {
2625 ret = fill_holes(trans, inode, path, cur_offset, drop_end); 2629 ret = fill_holes(trans, BTRFS_I(inode), path,
2630 cur_offset, drop_end);
2626 if (ret) { 2631 if (ret) {
2627 /* Same comment as above. */ 2632 /* Same comment as above. */
2628 btrfs_abort_transaction(trans, ret); 2633 btrfs_abort_transaction(trans, ret);
@@ -2747,7 +2752,8 @@ static long btrfs_fallocate(struct file *file, int mode,
2747 * 2752 *
2748 * For qgroup space, it will be checked later. 2753 * For qgroup space, it will be checked later.
2749 */ 2754 */
2750 ret = btrfs_alloc_data_chunk_ondemand(inode, alloc_end - alloc_start); 2755 ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
2756 alloc_end - alloc_start);
2751 if (ret < 0) 2757 if (ret < 0)
2752 return ret; 2758 return ret;
2753 2759
@@ -2827,7 +2833,7 @@ static long btrfs_fallocate(struct file *file, int mode,
2827 /* First, check if we exceed the qgroup limit */ 2833 /* First, check if we exceed the qgroup limit */
2828 INIT_LIST_HEAD(&reserve_list); 2834 INIT_LIST_HEAD(&reserve_list);
2829 while (1) { 2835 while (1) {
2830 em = btrfs_get_extent(inode, NULL, 0, cur_offset, 2836 em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset,
2831 alloc_end - cur_offset, 0); 2837 alloc_end - cur_offset, 0);
2832 if (IS_ERR_OR_NULL(em)) { 2838 if (IS_ERR_OR_NULL(em)) {
2833 if (!em) 2839 if (!em)
@@ -2954,7 +2960,8 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
2954 &cached_state); 2960 &cached_state);
2955 2961
2956 while (start < inode->i_size) { 2962 while (start < inode->i_size) {
2957 em = btrfs_get_extent_fiemap(inode, NULL, 0, start, len, 0); 2963 em = btrfs_get_extent_fiemap(BTRFS_I(inode), NULL, 0,
2964 start, len, 0);
2958 if (IS_ERR(em)) { 2965 if (IS_ERR(em)) {
2959 ret = PTR_ERR(em); 2966 ret = PTR_ERR(em);
2960 em = NULL; 2967 em = NULL;