aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent_io.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r--fs/btrfs/extent_io.c68
1 files changed, 25 insertions, 43 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 1b319df29eee..5c00d6aeae75 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1834,7 +1834,7 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1834 */ 1834 */
1835static void check_page_uptodate(struct extent_io_tree *tree, struct page *page) 1835static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
1836{ 1836{
1837 u64 start = (u64)page->index << PAGE_CACHE_SHIFT; 1837 u64 start = page_offset(page);
1838 u64 end = start + PAGE_CACHE_SIZE - 1; 1838 u64 end = start + PAGE_CACHE_SIZE - 1;
1839 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL)) 1839 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
1840 SetPageUptodate(page); 1840 SetPageUptodate(page);
@@ -1846,7 +1846,7 @@ static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
1846 */ 1846 */
1847static void check_page_locked(struct extent_io_tree *tree, struct page *page) 1847static void check_page_locked(struct extent_io_tree *tree, struct page *page)
1848{ 1848{
1849 u64 start = (u64)page->index << PAGE_CACHE_SHIFT; 1849 u64 start = page_offset(page);
1850 u64 end = start + PAGE_CACHE_SIZE - 1; 1850 u64 end = start + PAGE_CACHE_SIZE - 1;
1851 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL)) 1851 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL))
1852 unlock_page(page); 1852 unlock_page(page);
@@ -1960,7 +1960,7 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
1960 return -EIO; 1960 return -EIO;
1961 } 1961 }
1962 bio->bi_bdev = dev->bdev; 1962 bio->bi_bdev = dev->bdev;
1963 bio_add_page(bio, page, length, start-page_offset(page)); 1963 bio_add_page(bio, page, length, start - page_offset(page));
1964 btrfsic_submit_bio(WRITE_SYNC, bio); 1964 btrfsic_submit_bio(WRITE_SYNC, bio);
1965 wait_for_completion(&compl); 1965 wait_for_completion(&compl);
1966 1966
@@ -2293,8 +2293,7 @@ static void end_bio_extent_writepage(struct bio *bio, int err)
2293 struct page *page = bvec->bv_page; 2293 struct page *page = bvec->bv_page;
2294 tree = &BTRFS_I(page->mapping->host)->io_tree; 2294 tree = &BTRFS_I(page->mapping->host)->io_tree;
2295 2295
2296 start = ((u64)page->index << PAGE_CACHE_SHIFT) + 2296 start = page_offset(page) + bvec->bv_offset;
2297 bvec->bv_offset;
2298 end = start + bvec->bv_len - 1; 2297 end = start + bvec->bv_len - 1;
2299 2298
2300 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE) 2299 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
@@ -2353,8 +2352,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
2353 (long int)bio->bi_bdev); 2352 (long int)bio->bi_bdev);
2354 tree = &BTRFS_I(page->mapping->host)->io_tree; 2353 tree = &BTRFS_I(page->mapping->host)->io_tree;
2355 2354
2356 start = ((u64)page->index << PAGE_CACHE_SHIFT) + 2355 start = page_offset(page) + bvec->bv_offset;
2357 bvec->bv_offset;
2358 end = start + bvec->bv_len - 1; 2356 end = start + bvec->bv_len - 1;
2359 2357
2360 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE) 2358 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
@@ -2471,7 +2469,7 @@ static int __must_check submit_one_bio(int rw, struct bio *bio,
2471 struct extent_io_tree *tree = bio->bi_private; 2469 struct extent_io_tree *tree = bio->bi_private;
2472 u64 start; 2470 u64 start;
2473 2471
2474 start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset; 2472 start = page_offset(page) + bvec->bv_offset;
2475 2473
2476 bio->bi_private = NULL; 2474 bio->bi_private = NULL;
2477 2475
@@ -2595,7 +2593,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2595 unsigned long *bio_flags) 2593 unsigned long *bio_flags)
2596{ 2594{
2597 struct inode *inode = page->mapping->host; 2595 struct inode *inode = page->mapping->host;
2598 u64 start = (u64)page->index << PAGE_CACHE_SHIFT; 2596 u64 start = page_offset(page);
2599 u64 page_end = start + PAGE_CACHE_SIZE - 1; 2597 u64 page_end = start + PAGE_CACHE_SIZE - 1;
2600 u64 end; 2598 u64 end;
2601 u64 cur = start; 2599 u64 cur = start;
@@ -2648,6 +2646,8 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2648 } 2646 }
2649 } 2647 }
2650 while (cur <= end) { 2648 while (cur <= end) {
2649 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
2650
2651 if (cur >= last_byte) { 2651 if (cur >= last_byte) {
2652 char *userpage; 2652 char *userpage;
2653 struct extent_state *cached = NULL; 2653 struct extent_state *cached = NULL;
@@ -2735,26 +2735,17 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2735 continue; 2735 continue;
2736 } 2736 }
2737 2737
2738 ret = 0; 2738 pnr -= page->index;
2739 if (tree->ops && tree->ops->readpage_io_hook) { 2739 ret = submit_extent_page(READ, tree, page,
2740 ret = tree->ops->readpage_io_hook(page, cur,
2741 cur + iosize - 1);
2742 }
2743 if (!ret) {
2744 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
2745 pnr -= page->index;
2746 ret = submit_extent_page(READ, tree, page,
2747 sector, disk_io_size, pg_offset, 2740 sector, disk_io_size, pg_offset,
2748 bdev, bio, pnr, 2741 bdev, bio, pnr,
2749 end_bio_extent_readpage, mirror_num, 2742 end_bio_extent_readpage, mirror_num,
2750 *bio_flags, 2743 *bio_flags,
2751 this_bio_flag); 2744 this_bio_flag);
2752 if (!ret) { 2745 if (!ret) {
2753 nr++; 2746 nr++;
2754 *bio_flags = this_bio_flag; 2747 *bio_flags = this_bio_flag;
2755 } 2748 } else {
2756 }
2757 if (ret) {
2758 SetPageError(page); 2749 SetPageError(page);
2759 unlock_extent(tree, cur, cur + iosize - 1); 2750 unlock_extent(tree, cur, cur + iosize - 1);
2760 } 2751 }
@@ -2806,7 +2797,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2806 struct inode *inode = page->mapping->host; 2797 struct inode *inode = page->mapping->host;
2807 struct extent_page_data *epd = data; 2798 struct extent_page_data *epd = data;
2808 struct extent_io_tree *tree = epd->tree; 2799 struct extent_io_tree *tree = epd->tree;
2809 u64 start = (u64)page->index << PAGE_CACHE_SHIFT; 2800 u64 start = page_offset(page);
2810 u64 delalloc_start; 2801 u64 delalloc_start;
2811 u64 page_end = start + PAGE_CACHE_SIZE - 1; 2802 u64 page_end = start + PAGE_CACHE_SIZE - 1;
2812 u64 end; 2803 u64 end;
@@ -3124,12 +3115,9 @@ static int lock_extent_buffer_for_io(struct extent_buffer *eb,
3124 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags); 3115 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3125 spin_unlock(&eb->refs_lock); 3116 spin_unlock(&eb->refs_lock);
3126 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN); 3117 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
3127 spin_lock(&fs_info->delalloc_lock); 3118 __percpu_counter_add(&fs_info->dirty_metadata_bytes,
3128 if (fs_info->dirty_metadata_bytes >= eb->len) 3119 -eb->len,
3129 fs_info->dirty_metadata_bytes -= eb->len; 3120 fs_info->dirty_metadata_batch);
3130 else
3131 WARN_ON(1);
3132 spin_unlock(&fs_info->delalloc_lock);
3133 ret = 1; 3121 ret = 1;
3134 } else { 3122 } else {
3135 spin_unlock(&eb->refs_lock); 3123 spin_unlock(&eb->refs_lock);
@@ -3446,15 +3434,9 @@ retry:
3446 * swizzled back from swapper_space to tmpfs file 3434 * swizzled back from swapper_space to tmpfs file
3447 * mapping 3435 * mapping
3448 */ 3436 */
3449 if (tree->ops && 3437 if (!trylock_page(page)) {
3450 tree->ops->write_cache_pages_lock_hook) { 3438 flush_fn(data);
3451 tree->ops->write_cache_pages_lock_hook(page, 3439 lock_page(page);
3452 data, flush_fn);
3453 } else {
3454 if (!trylock_page(page)) {
3455 flush_fn(data);
3456 lock_page(page);
3457 }
3458 } 3440 }
3459 3441
3460 if (unlikely(page->mapping != mapping)) { 3442 if (unlikely(page->mapping != mapping)) {
@@ -3674,7 +3656,7 @@ int extent_invalidatepage(struct extent_io_tree *tree,
3674 struct page *page, unsigned long offset) 3656 struct page *page, unsigned long offset)
3675{ 3657{
3676 struct extent_state *cached_state = NULL; 3658 struct extent_state *cached_state = NULL;
3677 u64 start = ((u64)page->index << PAGE_CACHE_SHIFT); 3659 u64 start = page_offset(page);
3678 u64 end = start + PAGE_CACHE_SIZE - 1; 3660 u64 end = start + PAGE_CACHE_SIZE - 1;
3679 size_t blocksize = page->mapping->host->i_sb->s_blocksize; 3661 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
3680 3662
@@ -3700,7 +3682,7 @@ int try_release_extent_state(struct extent_map_tree *map,
3700 struct extent_io_tree *tree, struct page *page, 3682 struct extent_io_tree *tree, struct page *page,
3701 gfp_t mask) 3683 gfp_t mask)
3702{ 3684{
3703 u64 start = (u64)page->index << PAGE_CACHE_SHIFT; 3685 u64 start = page_offset(page);
3704 u64 end = start + PAGE_CACHE_SIZE - 1; 3686 u64 end = start + PAGE_CACHE_SIZE - 1;
3705 int ret = 1; 3687 int ret = 1;
3706 3688
@@ -3739,7 +3721,7 @@ int try_release_extent_mapping(struct extent_map_tree *map,
3739 gfp_t mask) 3721 gfp_t mask)
3740{ 3722{
3741 struct extent_map *em; 3723 struct extent_map *em;
3742 u64 start = (u64)page->index << PAGE_CACHE_SHIFT; 3724 u64 start = page_offset(page);
3743 u64 end = start + PAGE_CACHE_SIZE - 1; 3725 u64 end = start + PAGE_CACHE_SIZE - 1;
3744 3726
3745 if ((mask & __GFP_WAIT) && 3727 if ((mask & __GFP_WAIT) &&