diff options
| author | Christoph Hellwig <hch@lst.de> | 2009-10-01 12:58:30 -0400 |
|---|---|---|
| committer | Chris Mason <chris.mason@oracle.com> | 2009-10-01 12:58:30 -0400 |
| commit | 8aa38c31b7659e338fee4d9af4c3805acbd9806f (patch) | |
| tree | 44d20f44a432ba05cfcfcd093428e777e4d3fd9e | |
| parent | 25472b880c69c0daa485c4f80a6550437ed1149f (diff) | |
Btrfs: remove duplicates of filemap_ helpers
Use filemap_fdatawrite_range and filemap_fdatawait_range instead of
local copies of the functions. For filemap_fdatawait_range that
also means replacing the awkward old wait_on_page_writeback_range
calling convention with the regular filemap byte offsets.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Chris Mason <chris.mason@oracle.com>
| -rw-r--r-- | fs/btrfs/disk-io.c | 10 | ||||
| -rw-r--r-- | fs/btrfs/file.c | 5 | ||||
| -rw-r--r-- | fs/btrfs/ordered-data.c | 93 | ||||
| -rw-r--r-- | fs/btrfs/ordered-data.h | 4 |
4 files changed, 10 insertions, 102 deletions
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index d20dc05208fe..af0435f79fa6 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
| @@ -822,16 +822,14 @@ struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root, | |||
| 822 | 822 | ||
| 823 | int btrfs_write_tree_block(struct extent_buffer *buf) | 823 | int btrfs_write_tree_block(struct extent_buffer *buf) |
| 824 | { | 824 | { |
| 825 | return btrfs_fdatawrite_range(buf->first_page->mapping, buf->start, | 825 | return filemap_fdatawrite_range(buf->first_page->mapping, buf->start, |
| 826 | buf->start + buf->len - 1, WB_SYNC_ALL); | 826 | buf->start + buf->len - 1); |
| 827 | } | 827 | } |
| 828 | 828 | ||
| 829 | int btrfs_wait_tree_block_writeback(struct extent_buffer *buf) | 829 | int btrfs_wait_tree_block_writeback(struct extent_buffer *buf) |
| 830 | { | 830 | { |
| 831 | return btrfs_wait_on_page_writeback_range(buf->first_page->mapping, | 831 | return filemap_fdatawait_range(buf->first_page->mapping, |
| 832 | buf->start >> PAGE_CACHE_SHIFT, | 832 | buf->start, buf->start + buf->len - 1); |
| 833 | (buf->start + buf->len - 1) >> | ||
| 834 | PAGE_CACHE_SHIFT); | ||
| 835 | } | 833 | } |
| 836 | 834 | ||
| 837 | struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr, | 835 | struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr, |
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 7351bdbca26f..ca784a7fbeba 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c | |||
| @@ -1022,9 +1022,8 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf, | |||
| 1022 | } | 1022 | } |
| 1023 | 1023 | ||
| 1024 | if (will_write) { | 1024 | if (will_write) { |
| 1025 | btrfs_fdatawrite_range(inode->i_mapping, pos, | 1025 | filemap_fdatawrite_range(inode->i_mapping, pos, |
| 1026 | pos + write_bytes - 1, | 1026 | pos + write_bytes - 1); |
| 1027 | WB_SYNC_ALL); | ||
| 1028 | } else { | 1027 | } else { |
| 1029 | balance_dirty_pages_ratelimited_nr(inode->i_mapping, | 1028 | balance_dirty_pages_ratelimited_nr(inode->i_mapping, |
| 1030 | num_pages); | 1029 | num_pages); |
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index b5d6d24726b0..897fba835f89 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c | |||
| @@ -458,7 +458,7 @@ void btrfs_start_ordered_extent(struct inode *inode, | |||
| 458 | * start IO on any dirty ones so the wait doesn't stall waiting | 458 | * start IO on any dirty ones so the wait doesn't stall waiting |
| 459 | * for pdflush to find them | 459 | * for pdflush to find them |
| 460 | */ | 460 | */ |
| 461 | btrfs_fdatawrite_range(inode->i_mapping, start, end, WB_SYNC_ALL); | 461 | filemap_fdatawrite_range(inode->i_mapping, start, end); |
| 462 | if (wait) { | 462 | if (wait) { |
| 463 | wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, | 463 | wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, |
| 464 | &entry->flags)); | 464 | &entry->flags)); |
| @@ -488,17 +488,15 @@ again: | |||
| 488 | /* start IO across the range first to instantiate any delalloc | 488 | /* start IO across the range first to instantiate any delalloc |
| 489 | * extents | 489 | * extents |
| 490 | */ | 490 | */ |
| 491 | btrfs_fdatawrite_range(inode->i_mapping, start, orig_end, WB_SYNC_ALL); | 491 | filemap_fdatawrite_range(inode->i_mapping, start, orig_end); |
| 492 | 492 | ||
| 493 | /* The compression code will leave pages locked but return from | 493 | /* The compression code will leave pages locked but return from |
| 494 | * writepage without setting the page writeback. Starting again | 494 | * writepage without setting the page writeback. Starting again |
| 495 | * with WB_SYNC_ALL will end up waiting for the IO to actually start. | 495 | * with WB_SYNC_ALL will end up waiting for the IO to actually start. |
| 496 | */ | 496 | */ |
| 497 | btrfs_fdatawrite_range(inode->i_mapping, start, orig_end, WB_SYNC_ALL); | 497 | filemap_fdatawrite_range(inode->i_mapping, start, orig_end); |
| 498 | 498 | ||
| 499 | btrfs_wait_on_page_writeback_range(inode->i_mapping, | 499 | filemap_fdatawait_range(inode->i_mapping, start, orig_end); |
| 500 | start >> PAGE_CACHE_SHIFT, | ||
| 501 | orig_end >> PAGE_CACHE_SHIFT); | ||
| 502 | 500 | ||
| 503 | end = orig_end; | 501 | end = orig_end; |
| 504 | found = 0; | 502 | found = 0; |
| @@ -716,89 +714,6 @@ out: | |||
| 716 | } | 714 | } |
| 717 | 715 | ||
| 718 | 716 | ||
| 719 | /** | ||
| 720 | * taken from mm/filemap.c because it isn't exported | ||
| 721 | * | ||
| 722 | * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range | ||
| 723 | * @mapping: address space structure to write | ||
| 724 | * @start: offset in bytes where the range starts | ||
| 725 | * @end: offset in bytes where the range ends (inclusive) | ||
| 726 | * @sync_mode: enable synchronous operation | ||
| 727 | * | ||
| 728 | * Start writeback against all of a mapping's dirty pages that lie | ||
| 729 | * within the byte offsets <start, end> inclusive. | ||
| 730 | * | ||
| 731 | * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as | ||
| 732 | * opposed to a regular memory cleansing writeback. The difference between | ||
| 733 | * these two operations is that if a dirty page/buffer is encountered, it must | ||
| 734 | * be waited upon, and not just skipped over. | ||
| 735 | */ | ||
| 736 | int btrfs_fdatawrite_range(struct address_space *mapping, loff_t start, | ||
| 737 | loff_t end, int sync_mode) | ||
| 738 | { | ||
| 739 | struct writeback_control wbc = { | ||
| 740 | .sync_mode = sync_mode, | ||
| 741 | .nr_to_write = mapping->nrpages * 2, | ||
| 742 | .range_start = start, | ||
| 743 | .range_end = end, | ||
| 744 | }; | ||
| 745 | return btrfs_writepages(mapping, &wbc); | ||
| 746 | } | ||
| 747 | |||
| 748 | /** | ||
| 749 | * taken from mm/filemap.c because it isn't exported | ||
| 750 | * | ||
| 751 | * wait_on_page_writeback_range - wait for writeback to complete | ||
| 752 | * @mapping: target address_space | ||
| 753 | * @start: beginning page index | ||
| 754 | * @end: ending page index | ||
| 755 | * | ||
| 756 | * Wait for writeback to complete against pages indexed by start->end | ||
| 757 | * inclusive | ||
| 758 | */ | ||
| 759 | int btrfs_wait_on_page_writeback_range(struct address_space *mapping, | ||
| 760 | pgoff_t start, pgoff_t end) | ||
| 761 | { | ||
| 762 | struct pagevec pvec; | ||
| 763 | int nr_pages; | ||
| 764 | int ret = 0; | ||
| 765 | pgoff_t index; | ||
| 766 | |||
| 767 | if (end < start) | ||
| 768 | return 0; | ||
| 769 | |||
| 770 | pagevec_init(&pvec, 0); | ||
| 771 | index = start; | ||
| 772 | while ((index <= end) && | ||
| 773 | (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, | ||
| 774 | PAGECACHE_TAG_WRITEBACK, | ||
| 775 | min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) { | ||
| 776 | unsigned i; | ||
| 777 | |||
| 778 | for (i = 0; i < nr_pages; i++) { | ||
| 779 | struct page *page = pvec.pages[i]; | ||
| 780 | |||
| 781 | /* until radix tree lookup accepts end_index */ | ||
| 782 | if (page->index > end) | ||
| 783 | continue; | ||
| 784 | |||
| 785 | wait_on_page_writeback(page); | ||
| 786 | if (PageError(page)) | ||
| 787 | ret = -EIO; | ||
| 788 | } | ||
| 789 | pagevec_release(&pvec); | ||
| 790 | cond_resched(); | ||
| 791 | } | ||
| 792 | |||
| 793 | /* Check for outstanding write errors */ | ||
| 794 | if (test_and_clear_bit(AS_ENOSPC, &mapping->flags)) | ||
| 795 | ret = -ENOSPC; | ||
| 796 | if (test_and_clear_bit(AS_EIO, &mapping->flags)) | ||
| 797 | ret = -EIO; | ||
| 798 | |||
| 799 | return ret; | ||
| 800 | } | ||
| 801 | |||
| 802 | /* | 717 | /* |
| 803 | * add a given inode to the list of inodes that must be fully on | 718 | * add a given inode to the list of inodes that must be fully on |
| 804 | * disk before a transaction commit finishes. | 719 | * disk before a transaction commit finishes. |
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h index 993a7ea45c70..f82e87488ca8 100644 --- a/fs/btrfs/ordered-data.h +++ b/fs/btrfs/ordered-data.h | |||
| @@ -153,10 +153,6 @@ btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset); | |||
| 153 | int btrfs_ordered_update_i_size(struct inode *inode, | 153 | int btrfs_ordered_update_i_size(struct inode *inode, |
| 154 | struct btrfs_ordered_extent *ordered); | 154 | struct btrfs_ordered_extent *ordered); |
| 155 | int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, u32 *sum); | 155 | int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, u32 *sum); |
| 156 | int btrfs_wait_on_page_writeback_range(struct address_space *mapping, | ||
| 157 | pgoff_t start, pgoff_t end); | ||
| 158 | int btrfs_fdatawrite_range(struct address_space *mapping, loff_t start, | ||
| 159 | loff_t end, int sync_mode); | ||
| 160 | int btrfs_wait_ordered_extents(struct btrfs_root *root, int nocow_only); | 156 | int btrfs_wait_ordered_extents(struct btrfs_root *root, int nocow_only); |
| 161 | int btrfs_run_ordered_operations(struct btrfs_root *root, int wait); | 157 | int btrfs_run_ordered_operations(struct btrfs_root *root, int wait); |
| 162 | int btrfs_add_ordered_operation(struct btrfs_trans_handle *trans, | 158 | int btrfs_add_ordered_operation(struct btrfs_trans_handle *trans, |
