aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorYan, Zheng <zheng.yan@oracle.com>2009-11-12 04:34:21 -0500
committerChris Mason <chris.mason@oracle.com>2009-12-17 12:33:24 -0500
commitc216775458a2ee345d9412a2770c2916acfb5d30 (patch)
tree41a947a9d254aeeef40b7e42162d80646477f30a /fs
parent920bbbfb05c9fce22e088d20eb9dcb8f96342de9 (diff)
Btrfs: Fix disk_i_size update corner case
There are some cases file extents are inserted without involving ordered struct. In these cases, we update disk_i_size directly, without checking pending ordered extent and DELALLOC bit. This patch extends btrfs_ordered_update_i_size() to handle these cases. Signed-off-by: Yan Zheng <zheng.yan@oracle.com> Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/btrfs_inode.h5
-rw-r--r--fs/btrfs/inode.c71
-rw-r--r--fs/btrfs/ordered-data.c105
-rw-r--r--fs/btrfs/ordered-data.h2
4 files changed, 127 insertions, 56 deletions
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index f6783a42f010..3f1f50d9d916 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -44,9 +44,6 @@ struct btrfs_inode {
44 */ 44 */
45 struct extent_io_tree io_failure_tree; 45 struct extent_io_tree io_failure_tree;
46 46
47 /* held while inesrting or deleting extents from files */
48 struct mutex extent_mutex;
49
50 /* held while logging the inode in tree-log.c */ 47 /* held while logging the inode in tree-log.c */
51 struct mutex log_mutex; 48 struct mutex log_mutex;
52 49
@@ -166,7 +163,7 @@ static inline struct btrfs_inode *BTRFS_I(struct inode *inode)
166 163
167static inline void btrfs_i_size_write(struct inode *inode, u64 size) 164static inline void btrfs_i_size_write(struct inode *inode, u64 size)
168{ 165{
169 inode->i_size = size; 166 i_size_write(inode, size);
170 BTRFS_I(inode)->disk_i_size = size; 167 BTRFS_I(inode)->disk_i_size = size;
171} 168}
172 169
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index ef250be49cdb..fa57247887e3 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -188,8 +188,18 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
188 btrfs_mark_buffer_dirty(leaf); 188 btrfs_mark_buffer_dirty(leaf);
189 btrfs_free_path(path); 189 btrfs_free_path(path);
190 190
191 /*
192 * we're an inline extent, so nobody can
193 * extend the file past i_size without locking
194 * a page we already have locked.
195 *
196 * We must do any isize and inode updates
197 * before we unlock the pages. Otherwise we
198 * could end up racing with unlink.
199 */
191 BTRFS_I(inode)->disk_i_size = inode->i_size; 200 BTRFS_I(inode)->disk_i_size = inode->i_size;
192 btrfs_update_inode(trans, root, inode); 201 btrfs_update_inode(trans, root, inode);
202
193 return 0; 203 return 0;
194fail: 204fail:
195 btrfs_free_path(path); 205 btrfs_free_path(path);
@@ -415,7 +425,6 @@ again:
415 start, end, 425 start, end,
416 total_compressed, pages); 426 total_compressed, pages);
417 } 427 }
418 btrfs_end_transaction(trans, root);
419 if (ret == 0) { 428 if (ret == 0) {
420 /* 429 /*
421 * inline extent creation worked, we don't need 430 * inline extent creation worked, we don't need
@@ -429,9 +438,11 @@ again:
429 EXTENT_CLEAR_DELALLOC | 438 EXTENT_CLEAR_DELALLOC |
430 EXTENT_CLEAR_ACCOUNTING | 439 EXTENT_CLEAR_ACCOUNTING |
431 EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK); 440 EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK);
432 ret = 0; 441
442 btrfs_end_transaction(trans, root);
433 goto free_pages_out; 443 goto free_pages_out;
434 } 444 }
445 btrfs_end_transaction(trans, root);
435 } 446 }
436 447
437 if (will_compress) { 448 if (will_compress) {
@@ -542,7 +553,6 @@ static noinline int submit_compressed_extents(struct inode *inode,
542 if (list_empty(&async_cow->extents)) 553 if (list_empty(&async_cow->extents))
543 return 0; 554 return 0;
544 555
545 trans = btrfs_join_transaction(root, 1);
546 556
547 while (!list_empty(&async_cow->extents)) { 557 while (!list_empty(&async_cow->extents)) {
548 async_extent = list_entry(async_cow->extents.next, 558 async_extent = list_entry(async_cow->extents.next,
@@ -589,19 +599,15 @@ retry:
589 lock_extent(io_tree, async_extent->start, 599 lock_extent(io_tree, async_extent->start,
590 async_extent->start + async_extent->ram_size - 1, 600 async_extent->start + async_extent->ram_size - 1,
591 GFP_NOFS); 601 GFP_NOFS);
592 /*
593 * here we're doing allocation and writeback of the
594 * compressed pages
595 */
596 btrfs_drop_extent_cache(inode, async_extent->start,
597 async_extent->start +
598 async_extent->ram_size - 1, 0);
599 602
603 trans = btrfs_join_transaction(root, 1);
600 ret = btrfs_reserve_extent(trans, root, 604 ret = btrfs_reserve_extent(trans, root,
601 async_extent->compressed_size, 605 async_extent->compressed_size,
602 async_extent->compressed_size, 606 async_extent->compressed_size,
603 0, alloc_hint, 607 0, alloc_hint,
604 (u64)-1, &ins, 1); 608 (u64)-1, &ins, 1);
609 btrfs_end_transaction(trans, root);
610
605 if (ret) { 611 if (ret) {
606 int i; 612 int i;
607 for (i = 0; i < async_extent->nr_pages; i++) { 613 for (i = 0; i < async_extent->nr_pages; i++) {
@@ -617,6 +623,14 @@ retry:
617 goto retry; 623 goto retry;
618 } 624 }
619 625
626 /*
627 * here we're doing allocation and writeback of the
628 * compressed pages
629 */
630 btrfs_drop_extent_cache(inode, async_extent->start,
631 async_extent->start +
632 async_extent->ram_size - 1, 0);
633
620 em = alloc_extent_map(GFP_NOFS); 634 em = alloc_extent_map(GFP_NOFS);
621 em->start = async_extent->start; 635 em->start = async_extent->start;
622 em->len = async_extent->ram_size; 636 em->len = async_extent->ram_size;
@@ -648,8 +662,6 @@ retry:
648 BTRFS_ORDERED_COMPRESSED); 662 BTRFS_ORDERED_COMPRESSED);
649 BUG_ON(ret); 663 BUG_ON(ret);
650 664
651 btrfs_end_transaction(trans, root);
652
653 /* 665 /*
654 * clear dirty, set writeback and unlock the pages. 666 * clear dirty, set writeback and unlock the pages.
655 */ 667 */
@@ -671,13 +683,11 @@ retry:
671 async_extent->nr_pages); 683 async_extent->nr_pages);
672 684
673 BUG_ON(ret); 685 BUG_ON(ret);
674 trans = btrfs_join_transaction(root, 1);
675 alloc_hint = ins.objectid + ins.offset; 686 alloc_hint = ins.objectid + ins.offset;
676 kfree(async_extent); 687 kfree(async_extent);
677 cond_resched(); 688 cond_resched();
678 } 689 }
679 690
680 btrfs_end_transaction(trans, root);
681 return 0; 691 return 0;
682} 692}
683 693
@@ -741,6 +751,7 @@ static noinline int cow_file_range(struct inode *inode,
741 EXTENT_CLEAR_DIRTY | 751 EXTENT_CLEAR_DIRTY |
742 EXTENT_SET_WRITEBACK | 752 EXTENT_SET_WRITEBACK |
743 EXTENT_END_WRITEBACK); 753 EXTENT_END_WRITEBACK);
754
744 *nr_written = *nr_written + 755 *nr_written = *nr_written +
745 (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE; 756 (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
746 *page_started = 1; 757 *page_started = 1;
@@ -1727,18 +1738,27 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1727 } 1738 }
1728 } 1739 }
1729 1740
1730 trans = btrfs_join_transaction(root, 1);
1731
1732 if (!ordered_extent) 1741 if (!ordered_extent)
1733 ordered_extent = btrfs_lookup_ordered_extent(inode, start); 1742 ordered_extent = btrfs_lookup_ordered_extent(inode, start);
1734 BUG_ON(!ordered_extent); 1743 BUG_ON(!ordered_extent);
1735 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) 1744 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
1736 goto nocow; 1745 BUG_ON(!list_empty(&ordered_extent->list));
1746 ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
1747 if (!ret) {
1748 trans = btrfs_join_transaction(root, 1);
1749 ret = btrfs_update_inode(trans, root, inode);
1750 BUG_ON(ret);
1751 btrfs_end_transaction(trans, root);
1752 }
1753 goto out;
1754 }
1737 1755
1738 lock_extent(io_tree, ordered_extent->file_offset, 1756 lock_extent(io_tree, ordered_extent->file_offset,
1739 ordered_extent->file_offset + ordered_extent->len - 1, 1757 ordered_extent->file_offset + ordered_extent->len - 1,
1740 GFP_NOFS); 1758 GFP_NOFS);
1741 1759
1760 trans = btrfs_join_transaction(root, 1);
1761
1742 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags)) 1762 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
1743 compressed = 1; 1763 compressed = 1;
1744 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { 1764 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
@@ -1765,22 +1785,20 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1765 unlock_extent(io_tree, ordered_extent->file_offset, 1785 unlock_extent(io_tree, ordered_extent->file_offset,
1766 ordered_extent->file_offset + ordered_extent->len - 1, 1786 ordered_extent->file_offset + ordered_extent->len - 1,
1767 GFP_NOFS); 1787 GFP_NOFS);
1768nocow:
1769 add_pending_csums(trans, inode, ordered_extent->file_offset, 1788 add_pending_csums(trans, inode, ordered_extent->file_offset,
1770 &ordered_extent->list); 1789 &ordered_extent->list);
1771 1790
1772 mutex_lock(&BTRFS_I(inode)->extent_mutex); 1791 /* this also removes the ordered extent from the tree */
1773 btrfs_ordered_update_i_size(inode, ordered_extent); 1792 btrfs_ordered_update_i_size(inode, 0, ordered_extent);
1774 btrfs_update_inode(trans, root, inode); 1793 ret = btrfs_update_inode(trans, root, inode);
1775 btrfs_remove_ordered_extent(inode, ordered_extent); 1794 BUG_ON(ret);
1776 mutex_unlock(&BTRFS_I(inode)->extent_mutex); 1795 btrfs_end_transaction(trans, root);
1777 1796out:
1778 /* once for us */ 1797 /* once for us */
1779 btrfs_put_ordered_extent(ordered_extent); 1798 btrfs_put_ordered_extent(ordered_extent);
1780 /* once for the tree */ 1799 /* once for the tree */
1781 btrfs_put_ordered_extent(ordered_extent); 1800 btrfs_put_ordered_extent(ordered_extent);
1782 1801
1783 btrfs_end_transaction(trans, root);
1784 return 0; 1802 return 0;
1785} 1803}
1786 1804
@@ -3562,7 +3580,6 @@ static noinline void init_btrfs_i(struct inode *inode)
3562 INIT_LIST_HEAD(&BTRFS_I(inode)->ordered_operations); 3580 INIT_LIST_HEAD(&BTRFS_I(inode)->ordered_operations);
3563 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node); 3581 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
3564 btrfs_ordered_inode_tree_init(&BTRFS_I(inode)->ordered_tree); 3582 btrfs_ordered_inode_tree_init(&BTRFS_I(inode)->ordered_tree);
3565 mutex_init(&BTRFS_I(inode)->extent_mutex);
3566 mutex_init(&BTRFS_I(inode)->log_mutex); 3583 mutex_init(&BTRFS_I(inode)->log_mutex);
3567} 3584}
3568 3585
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 5799bc46a309..9b16073bb875 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -291,16 +291,16 @@ int btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
291 291
292/* 292/*
293 * remove an ordered extent from the tree. No references are dropped 293 * remove an ordered extent from the tree. No references are dropped
294 * but, anyone waiting on this extent is woken up. 294 * and you must wake_up entry->wait. You must hold the tree mutex
295 * while you call this function.
295 */ 296 */
296int btrfs_remove_ordered_extent(struct inode *inode, 297static int __btrfs_remove_ordered_extent(struct inode *inode,
297 struct btrfs_ordered_extent *entry) 298 struct btrfs_ordered_extent *entry)
298{ 299{
299 struct btrfs_ordered_inode_tree *tree; 300 struct btrfs_ordered_inode_tree *tree;
300 struct rb_node *node; 301 struct rb_node *node;
301 302
302 tree = &BTRFS_I(inode)->ordered_tree; 303 tree = &BTRFS_I(inode)->ordered_tree;
303 mutex_lock(&tree->mutex);
304 node = &entry->rb_node; 304 node = &entry->rb_node;
305 rb_erase(node, &tree->tree); 305 rb_erase(node, &tree->tree);
306 tree->last = NULL; 306 tree->last = NULL;
@@ -326,9 +326,26 @@ int btrfs_remove_ordered_extent(struct inode *inode,
326 } 326 }
327 spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); 327 spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
328 328
329 return 0;
330}
331
332/*
333 * remove an ordered extent from the tree. No references are dropped
334 * but any waiters are woken.
335 */
336int btrfs_remove_ordered_extent(struct inode *inode,
337 struct btrfs_ordered_extent *entry)
338{
339 struct btrfs_ordered_inode_tree *tree;
340 int ret;
341
342 tree = &BTRFS_I(inode)->ordered_tree;
343 mutex_lock(&tree->mutex);
344 ret = __btrfs_remove_ordered_extent(inode, entry);
329 mutex_unlock(&tree->mutex); 345 mutex_unlock(&tree->mutex);
330 wake_up(&entry->wait); 346 wake_up(&entry->wait);
331 return 0; 347
348 return ret;
332} 349}
333 350
334/* 351/*
@@ -589,7 +606,7 @@ out:
589 * After an extent is done, call this to conditionally update the on disk 606 * After an extent is done, call this to conditionally update the on disk
590 * i_size. i_size is updated to cover any fully written part of the file. 607 * i_size. i_size is updated to cover any fully written part of the file.
591 */ 608 */
592int btrfs_ordered_update_i_size(struct inode *inode, 609int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
593 struct btrfs_ordered_extent *ordered) 610 struct btrfs_ordered_extent *ordered)
594{ 611{
595 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; 612 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
@@ -597,18 +614,30 @@ int btrfs_ordered_update_i_size(struct inode *inode,
597 u64 disk_i_size; 614 u64 disk_i_size;
598 u64 new_i_size; 615 u64 new_i_size;
599 u64 i_size_test; 616 u64 i_size_test;
617 u64 i_size = i_size_read(inode);
600 struct rb_node *node; 618 struct rb_node *node;
619 struct rb_node *prev = NULL;
601 struct btrfs_ordered_extent *test; 620 struct btrfs_ordered_extent *test;
621 int ret = 1;
622
623 if (ordered)
624 offset = entry_end(ordered);
602 625
603 mutex_lock(&tree->mutex); 626 mutex_lock(&tree->mutex);
604 disk_i_size = BTRFS_I(inode)->disk_i_size; 627 disk_i_size = BTRFS_I(inode)->disk_i_size;
605 628
629 /* truncate file */
630 if (disk_i_size > i_size) {
631 BTRFS_I(inode)->disk_i_size = i_size;
632 ret = 0;
633 goto out;
634 }
635
606 /* 636 /*
607 * if the disk i_size is already at the inode->i_size, or 637 * if the disk i_size is already at the inode->i_size, or
608 * this ordered extent is inside the disk i_size, we're done 638 * this ordered extent is inside the disk i_size, we're done
609 */ 639 */
610 if (disk_i_size >= inode->i_size || 640 if (disk_i_size == i_size || offset <= disk_i_size) {
611 ordered->file_offset + ordered->len <= disk_i_size) {
612 goto out; 641 goto out;
613 } 642 }
614 643
@@ -616,8 +645,7 @@ int btrfs_ordered_update_i_size(struct inode *inode,
616 * we can't update the disk_isize if there are delalloc bytes 645 * we can't update the disk_isize if there are delalloc bytes
617 * between disk_i_size and this ordered extent 646 * between disk_i_size and this ordered extent
618 */ 647 */
619 if (test_range_bit(io_tree, disk_i_size, 648 if (test_range_bit(io_tree, disk_i_size, offset - 1,
620 ordered->file_offset + ordered->len - 1,
621 EXTENT_DELALLOC, 0, NULL)) { 649 EXTENT_DELALLOC, 0, NULL)) {
622 goto out; 650 goto out;
623 } 651 }
@@ -626,20 +654,32 @@ int btrfs_ordered_update_i_size(struct inode *inode,
626 * if we find an ordered extent then we can't update disk i_size 654 * if we find an ordered extent then we can't update disk i_size
627 * yet 655 * yet
628 */ 656 */
629 node = &ordered->rb_node; 657 if (ordered) {
630 while (1) { 658 node = rb_prev(&ordered->rb_node);
631 node = rb_prev(node); 659 } else {
632 if (!node) 660 prev = tree_search(tree, offset);
633 break; 661 /*
662 * we insert file extents without involving ordered struct,
663 * so there should be no ordered struct cover this offset
664 */
665 if (prev) {
666 test = rb_entry(prev, struct btrfs_ordered_extent,
667 rb_node);
668 BUG_ON(offset_in_entry(test, offset));
669 }
670 node = prev;
671 }
672 while (node) {
634 test = rb_entry(node, struct btrfs_ordered_extent, rb_node); 673 test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
635 if (test->file_offset + test->len <= disk_i_size) 674 if (test->file_offset + test->len <= disk_i_size)
636 break; 675 break;
637 if (test->file_offset >= inode->i_size) 676 if (test->file_offset >= i_size)
638 break; 677 break;
639 if (test->file_offset >= disk_i_size) 678 if (test->file_offset >= disk_i_size)
640 goto out; 679 goto out;
680 node = rb_prev(node);
641 } 681 }
642 new_i_size = min_t(u64, entry_end(ordered), i_size_read(inode)); 682 new_i_size = min_t(u64, offset, i_size);
643 683
644 /* 684 /*
645 * at this point, we know we can safely update i_size to at least 685 * at this point, we know we can safely update i_size to at least
@@ -647,7 +687,14 @@ int btrfs_ordered_update_i_size(struct inode *inode,
647 * walk forward and see if ios from higher up in the file have 687 * walk forward and see if ios from higher up in the file have
648 * finished. 688 * finished.
649 */ 689 */
650 node = rb_next(&ordered->rb_node); 690 if (ordered) {
691 node = rb_next(&ordered->rb_node);
692 } else {
693 if (prev)
694 node = rb_next(prev);
695 else
696 node = rb_first(&tree->tree);
697 }
651 i_size_test = 0; 698 i_size_test = 0;
652 if (node) { 699 if (node) {
653 /* 700 /*
@@ -655,10 +702,10 @@ int btrfs_ordered_update_i_size(struct inode *inode,
655 * between our ordered extent and the next one. 702 * between our ordered extent and the next one.
656 */ 703 */
657 test = rb_entry(node, struct btrfs_ordered_extent, rb_node); 704 test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
658 if (test->file_offset > entry_end(ordered)) 705 if (test->file_offset > offset)
659 i_size_test = test->file_offset; 706 i_size_test = test->file_offset;
660 } else { 707 } else {
661 i_size_test = i_size_read(inode); 708 i_size_test = i_size;
662 } 709 }
663 710
664 /* 711 /*
@@ -667,15 +714,25 @@ int btrfs_ordered_update_i_size(struct inode *inode,
667 * are no delalloc bytes in this area, it is safe to update 714 * are no delalloc bytes in this area, it is safe to update
668 * disk_i_size to the end of the region. 715 * disk_i_size to the end of the region.
669 */ 716 */
670 if (i_size_test > entry_end(ordered) && 717 if (i_size_test > offset &&
671 !test_range_bit(io_tree, entry_end(ordered), i_size_test - 1, 718 !test_range_bit(io_tree, offset, i_size_test - 1,
672 EXTENT_DELALLOC, 0, NULL)) { 719 EXTENT_DELALLOC, 0, NULL)) {
673 new_i_size = min_t(u64, i_size_test, i_size_read(inode)); 720 new_i_size = min_t(u64, i_size_test, i_size);
674 } 721 }
675 BTRFS_I(inode)->disk_i_size = new_i_size; 722 BTRFS_I(inode)->disk_i_size = new_i_size;
723 ret = 0;
676out: 724out:
725 /*
726 * we need to remove the ordered extent with the tree lock held
727 * so that other people calling this function don't find our fully
728 * processed ordered entry and skip updating the i_size
729 */
730 if (ordered)
731 __btrfs_remove_ordered_extent(inode, ordered);
677 mutex_unlock(&tree->mutex); 732 mutex_unlock(&tree->mutex);
678 return 0; 733 if (ordered)
734 wake_up(&ordered->wait);
735 return ret;
679} 736}
680 737
681/* 738/*
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index f82e87488ca8..4fa20398aec1 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -150,7 +150,7 @@ void btrfs_start_ordered_extent(struct inode *inode,
150int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len); 150int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len);
151struct btrfs_ordered_extent * 151struct btrfs_ordered_extent *
152btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset); 152btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset);
153int btrfs_ordered_update_i_size(struct inode *inode, 153int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
154 struct btrfs_ordered_extent *ordered); 154 struct btrfs_ordered_extent *ordered);
155int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, u32 *sum); 155int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, u32 *sum);
156int btrfs_wait_ordered_extents(struct btrfs_root *root, int nocow_only); 156int btrfs_wait_ordered_extents(struct btrfs_root *root, int nocow_only);