diff options
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r-- | fs/btrfs/extent_io.c | 266 |
1 files changed, 132 insertions, 134 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 76a0c8597d98..d247fc0eea19 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -1363,23 +1363,23 @@ int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end) | |||
1363 | 1363 | ||
1364 | void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end) | 1364 | void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end) |
1365 | { | 1365 | { |
1366 | unsigned long index = start >> PAGE_CACHE_SHIFT; | 1366 | unsigned long index = start >> PAGE_SHIFT; |
1367 | unsigned long end_index = end >> PAGE_CACHE_SHIFT; | 1367 | unsigned long end_index = end >> PAGE_SHIFT; |
1368 | struct page *page; | 1368 | struct page *page; |
1369 | 1369 | ||
1370 | while (index <= end_index) { | 1370 | while (index <= end_index) { |
1371 | page = find_get_page(inode->i_mapping, index); | 1371 | page = find_get_page(inode->i_mapping, index); |
1372 | BUG_ON(!page); /* Pages should be in the extent_io_tree */ | 1372 | BUG_ON(!page); /* Pages should be in the extent_io_tree */ |
1373 | clear_page_dirty_for_io(page); | 1373 | clear_page_dirty_for_io(page); |
1374 | page_cache_release(page); | 1374 | put_page(page); |
1375 | index++; | 1375 | index++; |
1376 | } | 1376 | } |
1377 | } | 1377 | } |
1378 | 1378 | ||
1379 | void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end) | 1379 | void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end) |
1380 | { | 1380 | { |
1381 | unsigned long index = start >> PAGE_CACHE_SHIFT; | 1381 | unsigned long index = start >> PAGE_SHIFT; |
1382 | unsigned long end_index = end >> PAGE_CACHE_SHIFT; | 1382 | unsigned long end_index = end >> PAGE_SHIFT; |
1383 | struct page *page; | 1383 | struct page *page; |
1384 | 1384 | ||
1385 | while (index <= end_index) { | 1385 | while (index <= end_index) { |
@@ -1387,7 +1387,7 @@ void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end) | |||
1387 | BUG_ON(!page); /* Pages should be in the extent_io_tree */ | 1387 | BUG_ON(!page); /* Pages should be in the extent_io_tree */ |
1388 | __set_page_dirty_nobuffers(page); | 1388 | __set_page_dirty_nobuffers(page); |
1389 | account_page_redirty(page); | 1389 | account_page_redirty(page); |
1390 | page_cache_release(page); | 1390 | put_page(page); |
1391 | index++; | 1391 | index++; |
1392 | } | 1392 | } |
1393 | } | 1393 | } |
@@ -1397,15 +1397,15 @@ void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end) | |||
1397 | */ | 1397 | */ |
1398 | static void set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end) | 1398 | static void set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end) |
1399 | { | 1399 | { |
1400 | unsigned long index = start >> PAGE_CACHE_SHIFT; | 1400 | unsigned long index = start >> PAGE_SHIFT; |
1401 | unsigned long end_index = end >> PAGE_CACHE_SHIFT; | 1401 | unsigned long end_index = end >> PAGE_SHIFT; |
1402 | struct page *page; | 1402 | struct page *page; |
1403 | 1403 | ||
1404 | while (index <= end_index) { | 1404 | while (index <= end_index) { |
1405 | page = find_get_page(tree->mapping, index); | 1405 | page = find_get_page(tree->mapping, index); |
1406 | BUG_ON(!page); /* Pages should be in the extent_io_tree */ | 1406 | BUG_ON(!page); /* Pages should be in the extent_io_tree */ |
1407 | set_page_writeback(page); | 1407 | set_page_writeback(page); |
1408 | page_cache_release(page); | 1408 | put_page(page); |
1409 | index++; | 1409 | index++; |
1410 | } | 1410 | } |
1411 | } | 1411 | } |
@@ -1556,8 +1556,8 @@ static noinline void __unlock_for_delalloc(struct inode *inode, | |||
1556 | { | 1556 | { |
1557 | int ret; | 1557 | int ret; |
1558 | struct page *pages[16]; | 1558 | struct page *pages[16]; |
1559 | unsigned long index = start >> PAGE_CACHE_SHIFT; | 1559 | unsigned long index = start >> PAGE_SHIFT; |
1560 | unsigned long end_index = end >> PAGE_CACHE_SHIFT; | 1560 | unsigned long end_index = end >> PAGE_SHIFT; |
1561 | unsigned long nr_pages = end_index - index + 1; | 1561 | unsigned long nr_pages = end_index - index + 1; |
1562 | int i; | 1562 | int i; |
1563 | 1563 | ||
@@ -1571,7 +1571,7 @@ static noinline void __unlock_for_delalloc(struct inode *inode, | |||
1571 | for (i = 0; i < ret; i++) { | 1571 | for (i = 0; i < ret; i++) { |
1572 | if (pages[i] != locked_page) | 1572 | if (pages[i] != locked_page) |
1573 | unlock_page(pages[i]); | 1573 | unlock_page(pages[i]); |
1574 | page_cache_release(pages[i]); | 1574 | put_page(pages[i]); |
1575 | } | 1575 | } |
1576 | nr_pages -= ret; | 1576 | nr_pages -= ret; |
1577 | index += ret; | 1577 | index += ret; |
@@ -1584,9 +1584,9 @@ static noinline int lock_delalloc_pages(struct inode *inode, | |||
1584 | u64 delalloc_start, | 1584 | u64 delalloc_start, |
1585 | u64 delalloc_end) | 1585 | u64 delalloc_end) |
1586 | { | 1586 | { |
1587 | unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT; | 1587 | unsigned long index = delalloc_start >> PAGE_SHIFT; |
1588 | unsigned long start_index = index; | 1588 | unsigned long start_index = index; |
1589 | unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT; | 1589 | unsigned long end_index = delalloc_end >> PAGE_SHIFT; |
1590 | unsigned long pages_locked = 0; | 1590 | unsigned long pages_locked = 0; |
1591 | struct page *pages[16]; | 1591 | struct page *pages[16]; |
1592 | unsigned long nrpages; | 1592 | unsigned long nrpages; |
@@ -1619,11 +1619,11 @@ static noinline int lock_delalloc_pages(struct inode *inode, | |||
1619 | pages[i]->mapping != inode->i_mapping) { | 1619 | pages[i]->mapping != inode->i_mapping) { |
1620 | ret = -EAGAIN; | 1620 | ret = -EAGAIN; |
1621 | unlock_page(pages[i]); | 1621 | unlock_page(pages[i]); |
1622 | page_cache_release(pages[i]); | 1622 | put_page(pages[i]); |
1623 | goto done; | 1623 | goto done; |
1624 | } | 1624 | } |
1625 | } | 1625 | } |
1626 | page_cache_release(pages[i]); | 1626 | put_page(pages[i]); |
1627 | pages_locked++; | 1627 | pages_locked++; |
1628 | } | 1628 | } |
1629 | nrpages -= ret; | 1629 | nrpages -= ret; |
@@ -1636,7 +1636,7 @@ done: | |||
1636 | __unlock_for_delalloc(inode, locked_page, | 1636 | __unlock_for_delalloc(inode, locked_page, |
1637 | delalloc_start, | 1637 | delalloc_start, |
1638 | ((u64)(start_index + pages_locked - 1)) << | 1638 | ((u64)(start_index + pages_locked - 1)) << |
1639 | PAGE_CACHE_SHIFT); | 1639 | PAGE_SHIFT); |
1640 | } | 1640 | } |
1641 | return ret; | 1641 | return ret; |
1642 | } | 1642 | } |
@@ -1696,7 +1696,7 @@ again: | |||
1696 | free_extent_state(cached_state); | 1696 | free_extent_state(cached_state); |
1697 | cached_state = NULL; | 1697 | cached_state = NULL; |
1698 | if (!loops) { | 1698 | if (!loops) { |
1699 | max_bytes = PAGE_CACHE_SIZE; | 1699 | max_bytes = PAGE_SIZE; |
1700 | loops = 1; | 1700 | loops = 1; |
1701 | goto again; | 1701 | goto again; |
1702 | } else { | 1702 | } else { |
@@ -1735,8 +1735,8 @@ void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end, | |||
1735 | struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; | 1735 | struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; |
1736 | int ret; | 1736 | int ret; |
1737 | struct page *pages[16]; | 1737 | struct page *pages[16]; |
1738 | unsigned long index = start >> PAGE_CACHE_SHIFT; | 1738 | unsigned long index = start >> PAGE_SHIFT; |
1739 | unsigned long end_index = end >> PAGE_CACHE_SHIFT; | 1739 | unsigned long end_index = end >> PAGE_SHIFT; |
1740 | unsigned long nr_pages = end_index - index + 1; | 1740 | unsigned long nr_pages = end_index - index + 1; |
1741 | int i; | 1741 | int i; |
1742 | 1742 | ||
@@ -1757,7 +1757,7 @@ void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end, | |||
1757 | SetPagePrivate2(pages[i]); | 1757 | SetPagePrivate2(pages[i]); |
1758 | 1758 | ||
1759 | if (pages[i] == locked_page) { | 1759 | if (pages[i] == locked_page) { |
1760 | page_cache_release(pages[i]); | 1760 | put_page(pages[i]); |
1761 | continue; | 1761 | continue; |
1762 | } | 1762 | } |
1763 | if (page_ops & PAGE_CLEAR_DIRTY) | 1763 | if (page_ops & PAGE_CLEAR_DIRTY) |
@@ -1770,7 +1770,7 @@ void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end, | |||
1770 | end_page_writeback(pages[i]); | 1770 | end_page_writeback(pages[i]); |
1771 | if (page_ops & PAGE_UNLOCK) | 1771 | if (page_ops & PAGE_UNLOCK) |
1772 | unlock_page(pages[i]); | 1772 | unlock_page(pages[i]); |
1773 | page_cache_release(pages[i]); | 1773 | put_page(pages[i]); |
1774 | } | 1774 | } |
1775 | nr_pages -= ret; | 1775 | nr_pages -= ret; |
1776 | index += ret; | 1776 | index += ret; |
@@ -1961,7 +1961,7 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, | |||
1961 | static void check_page_uptodate(struct extent_io_tree *tree, struct page *page) | 1961 | static void check_page_uptodate(struct extent_io_tree *tree, struct page *page) |
1962 | { | 1962 | { |
1963 | u64 start = page_offset(page); | 1963 | u64 start = page_offset(page); |
1964 | u64 end = start + PAGE_CACHE_SIZE - 1; | 1964 | u64 end = start + PAGE_SIZE - 1; |
1965 | if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL)) | 1965 | if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL)) |
1966 | SetPageUptodate(page); | 1966 | SetPageUptodate(page); |
1967 | } | 1967 | } |
@@ -2071,11 +2071,11 @@ int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb, | |||
2071 | struct page *p = eb->pages[i]; | 2071 | struct page *p = eb->pages[i]; |
2072 | 2072 | ||
2073 | ret = repair_io_failure(root->fs_info->btree_inode, start, | 2073 | ret = repair_io_failure(root->fs_info->btree_inode, start, |
2074 | PAGE_CACHE_SIZE, start, p, | 2074 | PAGE_SIZE, start, p, |
2075 | start - page_offset(p), mirror_num); | 2075 | start - page_offset(p), mirror_num); |
2076 | if (ret) | 2076 | if (ret) |
2077 | break; | 2077 | break; |
2078 | start += PAGE_CACHE_SIZE; | 2078 | start += PAGE_SIZE; |
2079 | } | 2079 | } |
2080 | 2080 | ||
2081 | return ret; | 2081 | return ret; |
@@ -2466,8 +2466,8 @@ static void end_bio_extent_writepage(struct bio *bio) | |||
2466 | * advance bv_offset and adjust bv_len to compensate. | 2466 | * advance bv_offset and adjust bv_len to compensate. |
2467 | * Print a warning for nonzero offsets, and an error | 2467 | * Print a warning for nonzero offsets, and an error |
2468 | * if they don't add up to a full page. */ | 2468 | * if they don't add up to a full page. */ |
2469 | if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE) { | 2469 | if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) { |
2470 | if (bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE) | 2470 | if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE) |
2471 | btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info, | 2471 | btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info, |
2472 | "partial page write in btrfs with offset %u and length %u", | 2472 | "partial page write in btrfs with offset %u and length %u", |
2473 | bvec->bv_offset, bvec->bv_len); | 2473 | bvec->bv_offset, bvec->bv_len); |
@@ -2541,8 +2541,8 @@ static void end_bio_extent_readpage(struct bio *bio) | |||
2541 | * advance bv_offset and adjust bv_len to compensate. | 2541 | * advance bv_offset and adjust bv_len to compensate. |
2542 | * Print a warning for nonzero offsets, and an error | 2542 | * Print a warning for nonzero offsets, and an error |
2543 | * if they don't add up to a full page. */ | 2543 | * if they don't add up to a full page. */ |
2544 | if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE) { | 2544 | if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) { |
2545 | if (bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE) | 2545 | if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE) |
2546 | btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info, | 2546 | btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info, |
2547 | "partial page read in btrfs with offset %u and length %u", | 2547 | "partial page read in btrfs with offset %u and length %u", |
2548 | bvec->bv_offset, bvec->bv_len); | 2548 | bvec->bv_offset, bvec->bv_len); |
@@ -2598,13 +2598,13 @@ static void end_bio_extent_readpage(struct bio *bio) | |||
2598 | readpage_ok: | 2598 | readpage_ok: |
2599 | if (likely(uptodate)) { | 2599 | if (likely(uptodate)) { |
2600 | loff_t i_size = i_size_read(inode); | 2600 | loff_t i_size = i_size_read(inode); |
2601 | pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; | 2601 | pgoff_t end_index = i_size >> PAGE_SHIFT; |
2602 | unsigned off; | 2602 | unsigned off; |
2603 | 2603 | ||
2604 | /* Zero out the end if this page straddles i_size */ | 2604 | /* Zero out the end if this page straddles i_size */ |
2605 | off = i_size & (PAGE_CACHE_SIZE-1); | 2605 | off = i_size & (PAGE_SIZE-1); |
2606 | if (page->index == end_index && off) | 2606 | if (page->index == end_index && off) |
2607 | zero_user_segment(page, off, PAGE_CACHE_SIZE); | 2607 | zero_user_segment(page, off, PAGE_SIZE); |
2608 | SetPageUptodate(page); | 2608 | SetPageUptodate(page); |
2609 | } else { | 2609 | } else { |
2610 | ClearPageUptodate(page); | 2610 | ClearPageUptodate(page); |
@@ -2768,7 +2768,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree, | |||
2768 | struct bio *bio; | 2768 | struct bio *bio; |
2769 | int contig = 0; | 2769 | int contig = 0; |
2770 | int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED; | 2770 | int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED; |
2771 | size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE); | 2771 | size_t page_size = min_t(size_t, size, PAGE_SIZE); |
2772 | 2772 | ||
2773 | if (bio_ret && *bio_ret) { | 2773 | if (bio_ret && *bio_ret) { |
2774 | bio = *bio_ret; | 2774 | bio = *bio_ret; |
@@ -2821,7 +2821,7 @@ static void attach_extent_buffer_page(struct extent_buffer *eb, | |||
2821 | { | 2821 | { |
2822 | if (!PagePrivate(page)) { | 2822 | if (!PagePrivate(page)) { |
2823 | SetPagePrivate(page); | 2823 | SetPagePrivate(page); |
2824 | page_cache_get(page); | 2824 | get_page(page); |
2825 | set_page_private(page, (unsigned long)eb); | 2825 | set_page_private(page, (unsigned long)eb); |
2826 | } else { | 2826 | } else { |
2827 | WARN_ON(page->private != (unsigned long)eb); | 2827 | WARN_ON(page->private != (unsigned long)eb); |
@@ -2832,7 +2832,7 @@ void set_page_extent_mapped(struct page *page) | |||
2832 | { | 2832 | { |
2833 | if (!PagePrivate(page)) { | 2833 | if (!PagePrivate(page)) { |
2834 | SetPagePrivate(page); | 2834 | SetPagePrivate(page); |
2835 | page_cache_get(page); | 2835 | get_page(page); |
2836 | set_page_private(page, EXTENT_PAGE_PRIVATE); | 2836 | set_page_private(page, EXTENT_PAGE_PRIVATE); |
2837 | } | 2837 | } |
2838 | } | 2838 | } |
@@ -2880,7 +2880,7 @@ static int __do_readpage(struct extent_io_tree *tree, | |||
2880 | { | 2880 | { |
2881 | struct inode *inode = page->mapping->host; | 2881 | struct inode *inode = page->mapping->host; |
2882 | u64 start = page_offset(page); | 2882 | u64 start = page_offset(page); |
2883 | u64 page_end = start + PAGE_CACHE_SIZE - 1; | 2883 | u64 page_end = start + PAGE_SIZE - 1; |
2884 | u64 end; | 2884 | u64 end; |
2885 | u64 cur = start; | 2885 | u64 cur = start; |
2886 | u64 extent_offset; | 2886 | u64 extent_offset; |
@@ -2909,12 +2909,12 @@ static int __do_readpage(struct extent_io_tree *tree, | |||
2909 | } | 2909 | } |
2910 | } | 2910 | } |
2911 | 2911 | ||
2912 | if (page->index == last_byte >> PAGE_CACHE_SHIFT) { | 2912 | if (page->index == last_byte >> PAGE_SHIFT) { |
2913 | char *userpage; | 2913 | char *userpage; |
2914 | size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1); | 2914 | size_t zero_offset = last_byte & (PAGE_SIZE - 1); |
2915 | 2915 | ||
2916 | if (zero_offset) { | 2916 | if (zero_offset) { |
2917 | iosize = PAGE_CACHE_SIZE - zero_offset; | 2917 | iosize = PAGE_SIZE - zero_offset; |
2918 | userpage = kmap_atomic(page); | 2918 | userpage = kmap_atomic(page); |
2919 | memset(userpage + zero_offset, 0, iosize); | 2919 | memset(userpage + zero_offset, 0, iosize); |
2920 | flush_dcache_page(page); | 2920 | flush_dcache_page(page); |
@@ -2922,14 +2922,14 @@ static int __do_readpage(struct extent_io_tree *tree, | |||
2922 | } | 2922 | } |
2923 | } | 2923 | } |
2924 | while (cur <= end) { | 2924 | while (cur <= end) { |
2925 | unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1; | 2925 | unsigned long pnr = (last_byte >> PAGE_SHIFT) + 1; |
2926 | bool force_bio_submit = false; | 2926 | bool force_bio_submit = false; |
2927 | 2927 | ||
2928 | if (cur >= last_byte) { | 2928 | if (cur >= last_byte) { |
2929 | char *userpage; | 2929 | char *userpage; |
2930 | struct extent_state *cached = NULL; | 2930 | struct extent_state *cached = NULL; |
2931 | 2931 | ||
2932 | iosize = PAGE_CACHE_SIZE - pg_offset; | 2932 | iosize = PAGE_SIZE - pg_offset; |
2933 | userpage = kmap_atomic(page); | 2933 | userpage = kmap_atomic(page); |
2934 | memset(userpage + pg_offset, 0, iosize); | 2934 | memset(userpage + pg_offset, 0, iosize); |
2935 | flush_dcache_page(page); | 2935 | flush_dcache_page(page); |
@@ -3112,7 +3112,7 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree, | |||
3112 | for (index = 0; index < nr_pages; index++) { | 3112 | for (index = 0; index < nr_pages; index++) { |
3113 | __do_readpage(tree, pages[index], get_extent, em_cached, bio, | 3113 | __do_readpage(tree, pages[index], get_extent, em_cached, bio, |
3114 | mirror_num, bio_flags, rw, prev_em_start); | 3114 | mirror_num, bio_flags, rw, prev_em_start); |
3115 | page_cache_release(pages[index]); | 3115 | put_page(pages[index]); |
3116 | } | 3116 | } |
3117 | } | 3117 | } |
3118 | 3118 | ||
@@ -3134,10 +3134,10 @@ static void __extent_readpages(struct extent_io_tree *tree, | |||
3134 | page_start = page_offset(pages[index]); | 3134 | page_start = page_offset(pages[index]); |
3135 | if (!end) { | 3135 | if (!end) { |
3136 | start = page_start; | 3136 | start = page_start; |
3137 | end = start + PAGE_CACHE_SIZE - 1; | 3137 | end = start + PAGE_SIZE - 1; |
3138 | first_index = index; | 3138 | first_index = index; |
3139 | } else if (end + 1 == page_start) { | 3139 | } else if (end + 1 == page_start) { |
3140 | end += PAGE_CACHE_SIZE; | 3140 | end += PAGE_SIZE; |
3141 | } else { | 3141 | } else { |
3142 | __do_contiguous_readpages(tree, &pages[first_index], | 3142 | __do_contiguous_readpages(tree, &pages[first_index], |
3143 | index - first_index, start, | 3143 | index - first_index, start, |
@@ -3145,7 +3145,7 @@ static void __extent_readpages(struct extent_io_tree *tree, | |||
3145 | bio, mirror_num, bio_flags, | 3145 | bio, mirror_num, bio_flags, |
3146 | rw, prev_em_start); | 3146 | rw, prev_em_start); |
3147 | start = page_start; | 3147 | start = page_start; |
3148 | end = start + PAGE_CACHE_SIZE - 1; | 3148 | end = start + PAGE_SIZE - 1; |
3149 | first_index = index; | 3149 | first_index = index; |
3150 | } | 3150 | } |
3151 | } | 3151 | } |
@@ -3167,13 +3167,13 @@ static int __extent_read_full_page(struct extent_io_tree *tree, | |||
3167 | struct inode *inode = page->mapping->host; | 3167 | struct inode *inode = page->mapping->host; |
3168 | struct btrfs_ordered_extent *ordered; | 3168 | struct btrfs_ordered_extent *ordered; |
3169 | u64 start = page_offset(page); | 3169 | u64 start = page_offset(page); |
3170 | u64 end = start + PAGE_CACHE_SIZE - 1; | 3170 | u64 end = start + PAGE_SIZE - 1; |
3171 | int ret; | 3171 | int ret; |
3172 | 3172 | ||
3173 | while (1) { | 3173 | while (1) { |
3174 | lock_extent(tree, start, end); | 3174 | lock_extent(tree, start, end); |
3175 | ordered = btrfs_lookup_ordered_range(inode, start, | 3175 | ordered = btrfs_lookup_ordered_range(inode, start, |
3176 | PAGE_CACHE_SIZE); | 3176 | PAGE_SIZE); |
3177 | if (!ordered) | 3177 | if (!ordered) |
3178 | break; | 3178 | break; |
3179 | unlock_extent(tree, start, end); | 3179 | unlock_extent(tree, start, end); |
@@ -3227,7 +3227,7 @@ static noinline_for_stack int writepage_delalloc(struct inode *inode, | |||
3227 | unsigned long *nr_written) | 3227 | unsigned long *nr_written) |
3228 | { | 3228 | { |
3229 | struct extent_io_tree *tree = epd->tree; | 3229 | struct extent_io_tree *tree = epd->tree; |
3230 | u64 page_end = delalloc_start + PAGE_CACHE_SIZE - 1; | 3230 | u64 page_end = delalloc_start + PAGE_SIZE - 1; |
3231 | u64 nr_delalloc; | 3231 | u64 nr_delalloc; |
3232 | u64 delalloc_to_write = 0; | 3232 | u64 delalloc_to_write = 0; |
3233 | u64 delalloc_end = 0; | 3233 | u64 delalloc_end = 0; |
@@ -3264,13 +3264,11 @@ static noinline_for_stack int writepage_delalloc(struct inode *inode, | |||
3264 | goto done; | 3264 | goto done; |
3265 | } | 3265 | } |
3266 | /* | 3266 | /* |
3267 | * delalloc_end is already one less than the total | 3267 | * delalloc_end is already one less than the total length, so |
3268 | * length, so we don't subtract one from | 3268 | * we don't subtract one from PAGE_SIZE |
3269 | * PAGE_CACHE_SIZE | ||
3270 | */ | 3269 | */ |
3271 | delalloc_to_write += (delalloc_end - delalloc_start + | 3270 | delalloc_to_write += (delalloc_end - delalloc_start + |
3272 | PAGE_CACHE_SIZE) >> | 3271 | PAGE_SIZE) >> PAGE_SHIFT; |
3273 | PAGE_CACHE_SHIFT; | ||
3274 | delalloc_start = delalloc_end + 1; | 3272 | delalloc_start = delalloc_end + 1; |
3275 | } | 3273 | } |
3276 | if (wbc->nr_to_write < delalloc_to_write) { | 3274 | if (wbc->nr_to_write < delalloc_to_write) { |
@@ -3319,7 +3317,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode, | |||
3319 | { | 3317 | { |
3320 | struct extent_io_tree *tree = epd->tree; | 3318 | struct extent_io_tree *tree = epd->tree; |
3321 | u64 start = page_offset(page); | 3319 | u64 start = page_offset(page); |
3322 | u64 page_end = start + PAGE_CACHE_SIZE - 1; | 3320 | u64 page_end = start + PAGE_SIZE - 1; |
3323 | u64 end; | 3321 | u64 end; |
3324 | u64 cur = start; | 3322 | u64 cur = start; |
3325 | u64 extent_offset; | 3323 | u64 extent_offset; |
@@ -3434,7 +3432,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode, | |||
3434 | if (ret) { | 3432 | if (ret) { |
3435 | SetPageError(page); | 3433 | SetPageError(page); |
3436 | } else { | 3434 | } else { |
3437 | unsigned long max_nr = (i_size >> PAGE_CACHE_SHIFT) + 1; | 3435 | unsigned long max_nr = (i_size >> PAGE_SHIFT) + 1; |
3438 | 3436 | ||
3439 | set_range_writeback(tree, cur, cur + iosize - 1); | 3437 | set_range_writeback(tree, cur, cur + iosize - 1); |
3440 | if (!PageWriteback(page)) { | 3438 | if (!PageWriteback(page)) { |
@@ -3477,12 +3475,12 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, | |||
3477 | struct inode *inode = page->mapping->host; | 3475 | struct inode *inode = page->mapping->host; |
3478 | struct extent_page_data *epd = data; | 3476 | struct extent_page_data *epd = data; |
3479 | u64 start = page_offset(page); | 3477 | u64 start = page_offset(page); |
3480 | u64 page_end = start + PAGE_CACHE_SIZE - 1; | 3478 | u64 page_end = start + PAGE_SIZE - 1; |
3481 | int ret; | 3479 | int ret; |
3482 | int nr = 0; | 3480 | int nr = 0; |
3483 | size_t pg_offset = 0; | 3481 | size_t pg_offset = 0; |
3484 | loff_t i_size = i_size_read(inode); | 3482 | loff_t i_size = i_size_read(inode); |
3485 | unsigned long end_index = i_size >> PAGE_CACHE_SHIFT; | 3483 | unsigned long end_index = i_size >> PAGE_SHIFT; |
3486 | int write_flags; | 3484 | int write_flags; |
3487 | unsigned long nr_written = 0; | 3485 | unsigned long nr_written = 0; |
3488 | 3486 | ||
@@ -3497,10 +3495,10 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, | |||
3497 | 3495 | ||
3498 | ClearPageError(page); | 3496 | ClearPageError(page); |
3499 | 3497 | ||
3500 | pg_offset = i_size & (PAGE_CACHE_SIZE - 1); | 3498 | pg_offset = i_size & (PAGE_SIZE - 1); |
3501 | if (page->index > end_index || | 3499 | if (page->index > end_index || |
3502 | (page->index == end_index && !pg_offset)) { | 3500 | (page->index == end_index && !pg_offset)) { |
3503 | page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE); | 3501 | page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE); |
3504 | unlock_page(page); | 3502 | unlock_page(page); |
3505 | return 0; | 3503 | return 0; |
3506 | } | 3504 | } |
@@ -3510,7 +3508,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, | |||
3510 | 3508 | ||
3511 | userpage = kmap_atomic(page); | 3509 | userpage = kmap_atomic(page); |
3512 | memset(userpage + pg_offset, 0, | 3510 | memset(userpage + pg_offset, 0, |
3513 | PAGE_CACHE_SIZE - pg_offset); | 3511 | PAGE_SIZE - pg_offset); |
3514 | kunmap_atomic(userpage); | 3512 | kunmap_atomic(userpage); |
3515 | flush_dcache_page(page); | 3513 | flush_dcache_page(page); |
3516 | } | 3514 | } |
@@ -3748,7 +3746,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb, | |||
3748 | clear_page_dirty_for_io(p); | 3746 | clear_page_dirty_for_io(p); |
3749 | set_page_writeback(p); | 3747 | set_page_writeback(p); |
3750 | ret = submit_extent_page(rw, tree, wbc, p, offset >> 9, | 3748 | ret = submit_extent_page(rw, tree, wbc, p, offset >> 9, |
3751 | PAGE_CACHE_SIZE, 0, bdev, &epd->bio, | 3749 | PAGE_SIZE, 0, bdev, &epd->bio, |
3752 | -1, end_bio_extent_buffer_writepage, | 3750 | -1, end_bio_extent_buffer_writepage, |
3753 | 0, epd->bio_flags, bio_flags, false); | 3751 | 0, epd->bio_flags, bio_flags, false); |
3754 | epd->bio_flags = bio_flags; | 3752 | epd->bio_flags = bio_flags; |
@@ -3760,7 +3758,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb, | |||
3760 | ret = -EIO; | 3758 | ret = -EIO; |
3761 | break; | 3759 | break; |
3762 | } | 3760 | } |
3763 | offset += PAGE_CACHE_SIZE; | 3761 | offset += PAGE_SIZE; |
3764 | update_nr_written(p, wbc, 1); | 3762 | update_nr_written(p, wbc, 1); |
3765 | unlock_page(p); | 3763 | unlock_page(p); |
3766 | } | 3764 | } |
@@ -3804,8 +3802,8 @@ int btree_write_cache_pages(struct address_space *mapping, | |||
3804 | index = mapping->writeback_index; /* Start from prev offset */ | 3802 | index = mapping->writeback_index; /* Start from prev offset */ |
3805 | end = -1; | 3803 | end = -1; |
3806 | } else { | 3804 | } else { |
3807 | index = wbc->range_start >> PAGE_CACHE_SHIFT; | 3805 | index = wbc->range_start >> PAGE_SHIFT; |
3808 | end = wbc->range_end >> PAGE_CACHE_SHIFT; | 3806 | end = wbc->range_end >> PAGE_SHIFT; |
3809 | scanned = 1; | 3807 | scanned = 1; |
3810 | } | 3808 | } |
3811 | if (wbc->sync_mode == WB_SYNC_ALL) | 3809 | if (wbc->sync_mode == WB_SYNC_ALL) |
@@ -3948,8 +3946,8 @@ static int extent_write_cache_pages(struct extent_io_tree *tree, | |||
3948 | index = mapping->writeback_index; /* Start from prev offset */ | 3946 | index = mapping->writeback_index; /* Start from prev offset */ |
3949 | end = -1; | 3947 | end = -1; |
3950 | } else { | 3948 | } else { |
3951 | index = wbc->range_start >> PAGE_CACHE_SHIFT; | 3949 | index = wbc->range_start >> PAGE_SHIFT; |
3952 | end = wbc->range_end >> PAGE_CACHE_SHIFT; | 3950 | end = wbc->range_end >> PAGE_SHIFT; |
3953 | scanned = 1; | 3951 | scanned = 1; |
3954 | } | 3952 | } |
3955 | if (wbc->sync_mode == WB_SYNC_ALL) | 3953 | if (wbc->sync_mode == WB_SYNC_ALL) |
@@ -4083,8 +4081,8 @@ int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode, | |||
4083 | int ret = 0; | 4081 | int ret = 0; |
4084 | struct address_space *mapping = inode->i_mapping; | 4082 | struct address_space *mapping = inode->i_mapping; |
4085 | struct page *page; | 4083 | struct page *page; |
4086 | unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >> | 4084 | unsigned long nr_pages = (end - start + PAGE_SIZE) >> |
4087 | PAGE_CACHE_SHIFT; | 4085 | PAGE_SHIFT; |
4088 | 4086 | ||
4089 | struct extent_page_data epd = { | 4087 | struct extent_page_data epd = { |
4090 | .bio = NULL, | 4088 | .bio = NULL, |
@@ -4102,18 +4100,18 @@ int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode, | |||
4102 | }; | 4100 | }; |
4103 | 4101 | ||
4104 | while (start <= end) { | 4102 | while (start <= end) { |
4105 | page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT); | 4103 | page = find_get_page(mapping, start >> PAGE_SHIFT); |
4106 | if (clear_page_dirty_for_io(page)) | 4104 | if (clear_page_dirty_for_io(page)) |
4107 | ret = __extent_writepage(page, &wbc_writepages, &epd); | 4105 | ret = __extent_writepage(page, &wbc_writepages, &epd); |
4108 | else { | 4106 | else { |
4109 | if (tree->ops && tree->ops->writepage_end_io_hook) | 4107 | if (tree->ops && tree->ops->writepage_end_io_hook) |
4110 | tree->ops->writepage_end_io_hook(page, start, | 4108 | tree->ops->writepage_end_io_hook(page, start, |
4111 | start + PAGE_CACHE_SIZE - 1, | 4109 | start + PAGE_SIZE - 1, |
4112 | NULL, 1); | 4110 | NULL, 1); |
4113 | unlock_page(page); | 4111 | unlock_page(page); |
4114 | } | 4112 | } |
4115 | page_cache_release(page); | 4113 | put_page(page); |
4116 | start += PAGE_CACHE_SIZE; | 4114 | start += PAGE_SIZE; |
4117 | } | 4115 | } |
4118 | 4116 | ||
4119 | flush_epd_write_bio(&epd); | 4117 | flush_epd_write_bio(&epd); |
@@ -4163,7 +4161,7 @@ int extent_readpages(struct extent_io_tree *tree, | |||
4163 | list_del(&page->lru); | 4161 | list_del(&page->lru); |
4164 | if (add_to_page_cache_lru(page, mapping, | 4162 | if (add_to_page_cache_lru(page, mapping, |
4165 | page->index, GFP_NOFS)) { | 4163 | page->index, GFP_NOFS)) { |
4166 | page_cache_release(page); | 4164 | put_page(page); |
4167 | continue; | 4165 | continue; |
4168 | } | 4166 | } |
4169 | 4167 | ||
@@ -4197,7 +4195,7 @@ int extent_invalidatepage(struct extent_io_tree *tree, | |||
4197 | { | 4195 | { |
4198 | struct extent_state *cached_state = NULL; | 4196 | struct extent_state *cached_state = NULL; |
4199 | u64 start = page_offset(page); | 4197 | u64 start = page_offset(page); |
4200 | u64 end = start + PAGE_CACHE_SIZE - 1; | 4198 | u64 end = start + PAGE_SIZE - 1; |
4201 | size_t blocksize = page->mapping->host->i_sb->s_blocksize; | 4199 | size_t blocksize = page->mapping->host->i_sb->s_blocksize; |
4202 | 4200 | ||
4203 | start += ALIGN(offset, blocksize); | 4201 | start += ALIGN(offset, blocksize); |
@@ -4223,7 +4221,7 @@ static int try_release_extent_state(struct extent_map_tree *map, | |||
4223 | struct page *page, gfp_t mask) | 4221 | struct page *page, gfp_t mask) |
4224 | { | 4222 | { |
4225 | u64 start = page_offset(page); | 4223 | u64 start = page_offset(page); |
4226 | u64 end = start + PAGE_CACHE_SIZE - 1; | 4224 | u64 end = start + PAGE_SIZE - 1; |
4227 | int ret = 1; | 4225 | int ret = 1; |
4228 | 4226 | ||
4229 | if (test_range_bit(tree, start, end, | 4227 | if (test_range_bit(tree, start, end, |
@@ -4262,7 +4260,7 @@ int try_release_extent_mapping(struct extent_map_tree *map, | |||
4262 | { | 4260 | { |
4263 | struct extent_map *em; | 4261 | struct extent_map *em; |
4264 | u64 start = page_offset(page); | 4262 | u64 start = page_offset(page); |
4265 | u64 end = start + PAGE_CACHE_SIZE - 1; | 4263 | u64 end = start + PAGE_SIZE - 1; |
4266 | 4264 | ||
4267 | if (gfpflags_allow_blocking(mask) && | 4265 | if (gfpflags_allow_blocking(mask) && |
4268 | page->mapping->host->i_size > SZ_16M) { | 4266 | page->mapping->host->i_size > SZ_16M) { |
@@ -4587,14 +4585,14 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb) | |||
4587 | ClearPagePrivate(page); | 4585 | ClearPagePrivate(page); |
4588 | set_page_private(page, 0); | 4586 | set_page_private(page, 0); |
4589 | /* One for the page private */ | 4587 | /* One for the page private */ |
4590 | page_cache_release(page); | 4588 | put_page(page); |
4591 | } | 4589 | } |
4592 | 4590 | ||
4593 | if (mapped) | 4591 | if (mapped) |
4594 | spin_unlock(&page->mapping->private_lock); | 4592 | spin_unlock(&page->mapping->private_lock); |
4595 | 4593 | ||
4596 | /* One for when we alloced the page */ | 4594 | /* One for when we alloced the page */ |
4597 | page_cache_release(page); | 4595 | put_page(page); |
4598 | } while (index != 0); | 4596 | } while (index != 0); |
4599 | } | 4597 | } |
4600 | 4598 | ||
@@ -4779,7 +4777,7 @@ struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info, | |||
4779 | 4777 | ||
4780 | rcu_read_lock(); | 4778 | rcu_read_lock(); |
4781 | eb = radix_tree_lookup(&fs_info->buffer_radix, | 4779 | eb = radix_tree_lookup(&fs_info->buffer_radix, |
4782 | start >> PAGE_CACHE_SHIFT); | 4780 | start >> PAGE_SHIFT); |
4783 | if (eb && atomic_inc_not_zero(&eb->refs)) { | 4781 | if (eb && atomic_inc_not_zero(&eb->refs)) { |
4784 | rcu_read_unlock(); | 4782 | rcu_read_unlock(); |
4785 | /* | 4783 | /* |
@@ -4829,7 +4827,7 @@ again: | |||
4829 | goto free_eb; | 4827 | goto free_eb; |
4830 | spin_lock(&fs_info->buffer_lock); | 4828 | spin_lock(&fs_info->buffer_lock); |
4831 | ret = radix_tree_insert(&fs_info->buffer_radix, | 4829 | ret = radix_tree_insert(&fs_info->buffer_radix, |
4832 | start >> PAGE_CACHE_SHIFT, eb); | 4830 | start >> PAGE_SHIFT, eb); |
4833 | spin_unlock(&fs_info->buffer_lock); | 4831 | spin_unlock(&fs_info->buffer_lock); |
4834 | radix_tree_preload_end(); | 4832 | radix_tree_preload_end(); |
4835 | if (ret == -EEXIST) { | 4833 | if (ret == -EEXIST) { |
@@ -4862,7 +4860,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, | |||
4862 | unsigned long len = fs_info->tree_root->nodesize; | 4860 | unsigned long len = fs_info->tree_root->nodesize; |
4863 | unsigned long num_pages = num_extent_pages(start, len); | 4861 | unsigned long num_pages = num_extent_pages(start, len); |
4864 | unsigned long i; | 4862 | unsigned long i; |
4865 | unsigned long index = start >> PAGE_CACHE_SHIFT; | 4863 | unsigned long index = start >> PAGE_SHIFT; |
4866 | struct extent_buffer *eb; | 4864 | struct extent_buffer *eb; |
4867 | struct extent_buffer *exists = NULL; | 4865 | struct extent_buffer *exists = NULL; |
4868 | struct page *p; | 4866 | struct page *p; |
@@ -4896,7 +4894,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, | |||
4896 | if (atomic_inc_not_zero(&exists->refs)) { | 4894 | if (atomic_inc_not_zero(&exists->refs)) { |
4897 | spin_unlock(&mapping->private_lock); | 4895 | spin_unlock(&mapping->private_lock); |
4898 | unlock_page(p); | 4896 | unlock_page(p); |
4899 | page_cache_release(p); | 4897 | put_page(p); |
4900 | mark_extent_buffer_accessed(exists, p); | 4898 | mark_extent_buffer_accessed(exists, p); |
4901 | goto free_eb; | 4899 | goto free_eb; |
4902 | } | 4900 | } |
@@ -4908,7 +4906,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, | |||
4908 | */ | 4906 | */ |
4909 | ClearPagePrivate(p); | 4907 | ClearPagePrivate(p); |
4910 | WARN_ON(PageDirty(p)); | 4908 | WARN_ON(PageDirty(p)); |
4911 | page_cache_release(p); | 4909 | put_page(p); |
4912 | } | 4910 | } |
4913 | attach_extent_buffer_page(eb, p); | 4911 | attach_extent_buffer_page(eb, p); |
4914 | spin_unlock(&mapping->private_lock); | 4912 | spin_unlock(&mapping->private_lock); |
@@ -4931,7 +4929,7 @@ again: | |||
4931 | 4929 | ||
4932 | spin_lock(&fs_info->buffer_lock); | 4930 | spin_lock(&fs_info->buffer_lock); |
4933 | ret = radix_tree_insert(&fs_info->buffer_radix, | 4931 | ret = radix_tree_insert(&fs_info->buffer_radix, |
4934 | start >> PAGE_CACHE_SHIFT, eb); | 4932 | start >> PAGE_SHIFT, eb); |
4935 | spin_unlock(&fs_info->buffer_lock); | 4933 | spin_unlock(&fs_info->buffer_lock); |
4936 | radix_tree_preload_end(); | 4934 | radix_tree_preload_end(); |
4937 | if (ret == -EEXIST) { | 4935 | if (ret == -EEXIST) { |
@@ -4994,7 +4992,7 @@ static int release_extent_buffer(struct extent_buffer *eb) | |||
4994 | 4992 | ||
4995 | spin_lock(&fs_info->buffer_lock); | 4993 | spin_lock(&fs_info->buffer_lock); |
4996 | radix_tree_delete(&fs_info->buffer_radix, | 4994 | radix_tree_delete(&fs_info->buffer_radix, |
4997 | eb->start >> PAGE_CACHE_SHIFT); | 4995 | eb->start >> PAGE_SHIFT); |
4998 | spin_unlock(&fs_info->buffer_lock); | 4996 | spin_unlock(&fs_info->buffer_lock); |
4999 | } else { | 4997 | } else { |
5000 | spin_unlock(&eb->refs_lock); | 4998 | spin_unlock(&eb->refs_lock); |
@@ -5168,8 +5166,8 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, | |||
5168 | 5166 | ||
5169 | if (start) { | 5167 | if (start) { |
5170 | WARN_ON(start < eb->start); | 5168 | WARN_ON(start < eb->start); |
5171 | start_i = (start >> PAGE_CACHE_SHIFT) - | 5169 | start_i = (start >> PAGE_SHIFT) - |
5172 | (eb->start >> PAGE_CACHE_SHIFT); | 5170 | (eb->start >> PAGE_SHIFT); |
5173 | } else { | 5171 | } else { |
5174 | start_i = 0; | 5172 | start_i = 0; |
5175 | } | 5173 | } |
@@ -5252,18 +5250,18 @@ void read_extent_buffer(struct extent_buffer *eb, void *dstv, | |||
5252 | struct page *page; | 5250 | struct page *page; |
5253 | char *kaddr; | 5251 | char *kaddr; |
5254 | char *dst = (char *)dstv; | 5252 | char *dst = (char *)dstv; |
5255 | size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); | 5253 | size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1); |
5256 | unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; | 5254 | unsigned long i = (start_offset + start) >> PAGE_SHIFT; |
5257 | 5255 | ||
5258 | WARN_ON(start > eb->len); | 5256 | WARN_ON(start > eb->len); |
5259 | WARN_ON(start + len > eb->start + eb->len); | 5257 | WARN_ON(start + len > eb->start + eb->len); |
5260 | 5258 | ||
5261 | offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1); | 5259 | offset = (start_offset + start) & (PAGE_SIZE - 1); |
5262 | 5260 | ||
5263 | while (len > 0) { | 5261 | while (len > 0) { |
5264 | page = eb->pages[i]; | 5262 | page = eb->pages[i]; |
5265 | 5263 | ||
5266 | cur = min(len, (PAGE_CACHE_SIZE - offset)); | 5264 | cur = min(len, (PAGE_SIZE - offset)); |
5267 | kaddr = page_address(page); | 5265 | kaddr = page_address(page); |
5268 | memcpy(dst, kaddr + offset, cur); | 5266 | memcpy(dst, kaddr + offset, cur); |
5269 | 5267 | ||
@@ -5283,19 +5281,19 @@ int read_extent_buffer_to_user(struct extent_buffer *eb, void __user *dstv, | |||
5283 | struct page *page; | 5281 | struct page *page; |
5284 | char *kaddr; | 5282 | char *kaddr; |
5285 | char __user *dst = (char __user *)dstv; | 5283 | char __user *dst = (char __user *)dstv; |
5286 | size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); | 5284 | size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1); |
5287 | unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; | 5285 | unsigned long i = (start_offset + start) >> PAGE_SHIFT; |
5288 | int ret = 0; | 5286 | int ret = 0; |
5289 | 5287 | ||
5290 | WARN_ON(start > eb->len); | 5288 | WARN_ON(start > eb->len); |
5291 | WARN_ON(start + len > eb->start + eb->len); | 5289 | WARN_ON(start + len > eb->start + eb->len); |
5292 | 5290 | ||
5293 | offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1); | 5291 | offset = (start_offset + start) & (PAGE_SIZE - 1); |
5294 | 5292 | ||
5295 | while (len > 0) { | 5293 | while (len > 0) { |
5296 | page = eb->pages[i]; | 5294 | page = eb->pages[i]; |
5297 | 5295 | ||
5298 | cur = min(len, (PAGE_CACHE_SIZE - offset)); | 5296 | cur = min(len, (PAGE_SIZE - offset)); |
5299 | kaddr = page_address(page); | 5297 | kaddr = page_address(page); |
5300 | if (copy_to_user(dst, kaddr + offset, cur)) { | 5298 | if (copy_to_user(dst, kaddr + offset, cur)) { |
5301 | ret = -EFAULT; | 5299 | ret = -EFAULT; |
@@ -5316,13 +5314,13 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start, | |||
5316 | unsigned long *map_start, | 5314 | unsigned long *map_start, |
5317 | unsigned long *map_len) | 5315 | unsigned long *map_len) |
5318 | { | 5316 | { |
5319 | size_t offset = start & (PAGE_CACHE_SIZE - 1); | 5317 | size_t offset = start & (PAGE_SIZE - 1); |
5320 | char *kaddr; | 5318 | char *kaddr; |
5321 | struct page *p; | 5319 | struct page *p; |
5322 | size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); | 5320 | size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1); |
5323 | unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; | 5321 | unsigned long i = (start_offset + start) >> PAGE_SHIFT; |
5324 | unsigned long end_i = (start_offset + start + min_len - 1) >> | 5322 | unsigned long end_i = (start_offset + start + min_len - 1) >> |
5325 | PAGE_CACHE_SHIFT; | 5323 | PAGE_SHIFT; |
5326 | 5324 | ||
5327 | if (i != end_i) | 5325 | if (i != end_i) |
5328 | return -EINVAL; | 5326 | return -EINVAL; |
@@ -5332,7 +5330,7 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start, | |||
5332 | *map_start = 0; | 5330 | *map_start = 0; |
5333 | } else { | 5331 | } else { |
5334 | offset = 0; | 5332 | offset = 0; |
5335 | *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset; | 5333 | *map_start = ((u64)i << PAGE_SHIFT) - start_offset; |
5336 | } | 5334 | } |
5337 | 5335 | ||
5338 | if (start + min_len > eb->len) { | 5336 | if (start + min_len > eb->len) { |
@@ -5345,7 +5343,7 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start, | |||
5345 | p = eb->pages[i]; | 5343 | p = eb->pages[i]; |
5346 | kaddr = page_address(p); | 5344 | kaddr = page_address(p); |
5347 | *map = kaddr + offset; | 5345 | *map = kaddr + offset; |
5348 | *map_len = PAGE_CACHE_SIZE - offset; | 5346 | *map_len = PAGE_SIZE - offset; |
5349 | return 0; | 5347 | return 0; |
5350 | } | 5348 | } |
5351 | 5349 | ||
@@ -5358,19 +5356,19 @@ int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv, | |||
5358 | struct page *page; | 5356 | struct page *page; |
5359 | char *kaddr; | 5357 | char *kaddr; |
5360 | char *ptr = (char *)ptrv; | 5358 | char *ptr = (char *)ptrv; |
5361 | size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); | 5359 | size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1); |
5362 | unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; | 5360 | unsigned long i = (start_offset + start) >> PAGE_SHIFT; |
5363 | int ret = 0; | 5361 | int ret = 0; |
5364 | 5362 | ||
5365 | WARN_ON(start > eb->len); | 5363 | WARN_ON(start > eb->len); |
5366 | WARN_ON(start + len > eb->start + eb->len); | 5364 | WARN_ON(start + len > eb->start + eb->len); |
5367 | 5365 | ||
5368 | offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1); | 5366 | offset = (start_offset + start) & (PAGE_SIZE - 1); |
5369 | 5367 | ||
5370 | while (len > 0) { | 5368 | while (len > 0) { |
5371 | page = eb->pages[i]; | 5369 | page = eb->pages[i]; |
5372 | 5370 | ||
5373 | cur = min(len, (PAGE_CACHE_SIZE - offset)); | 5371 | cur = min(len, (PAGE_SIZE - offset)); |
5374 | 5372 | ||
5375 | kaddr = page_address(page); | 5373 | kaddr = page_address(page); |
5376 | ret = memcmp(ptr, kaddr + offset, cur); | 5374 | ret = memcmp(ptr, kaddr + offset, cur); |
@@ -5393,19 +5391,19 @@ void write_extent_buffer(struct extent_buffer *eb, const void *srcv, | |||
5393 | struct page *page; | 5391 | struct page *page; |
5394 | char *kaddr; | 5392 | char *kaddr; |
5395 | char *src = (char *)srcv; | 5393 | char *src = (char *)srcv; |
5396 | size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); | 5394 | size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1); |
5397 | unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; | 5395 | unsigned long i = (start_offset + start) >> PAGE_SHIFT; |
5398 | 5396 | ||
5399 | WARN_ON(start > eb->len); | 5397 | WARN_ON(start > eb->len); |
5400 | WARN_ON(start + len > eb->start + eb->len); | 5398 | WARN_ON(start + len > eb->start + eb->len); |
5401 | 5399 | ||
5402 | offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1); | 5400 | offset = (start_offset + start) & (PAGE_SIZE - 1); |
5403 | 5401 | ||
5404 | while (len > 0) { | 5402 | while (len > 0) { |
5405 | page = eb->pages[i]; | 5403 | page = eb->pages[i]; |
5406 | WARN_ON(!PageUptodate(page)); | 5404 | WARN_ON(!PageUptodate(page)); |
5407 | 5405 | ||
5408 | cur = min(len, PAGE_CACHE_SIZE - offset); | 5406 | cur = min(len, PAGE_SIZE - offset); |
5409 | kaddr = page_address(page); | 5407 | kaddr = page_address(page); |
5410 | memcpy(kaddr + offset, src, cur); | 5408 | memcpy(kaddr + offset, src, cur); |
5411 | 5409 | ||
@@ -5423,19 +5421,19 @@ void memset_extent_buffer(struct extent_buffer *eb, char c, | |||
5423 | size_t offset; | 5421 | size_t offset; |
5424 | struct page *page; | 5422 | struct page *page; |
5425 | char *kaddr; | 5423 | char *kaddr; |
5426 | size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); | 5424 | size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1); |
5427 | unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; | 5425 | unsigned long i = (start_offset + start) >> PAGE_SHIFT; |
5428 | 5426 | ||
5429 | WARN_ON(start > eb->len); | 5427 | WARN_ON(start > eb->len); |
5430 | WARN_ON(start + len > eb->start + eb->len); | 5428 | WARN_ON(start + len > eb->start + eb->len); |
5431 | 5429 | ||
5432 | offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1); | 5430 | offset = (start_offset + start) & (PAGE_SIZE - 1); |
5433 | 5431 | ||
5434 | while (len > 0) { | 5432 | while (len > 0) { |
5435 | page = eb->pages[i]; | 5433 | page = eb->pages[i]; |
5436 | WARN_ON(!PageUptodate(page)); | 5434 | WARN_ON(!PageUptodate(page)); |
5437 | 5435 | ||
5438 | cur = min(len, PAGE_CACHE_SIZE - offset); | 5436 | cur = min(len, PAGE_SIZE - offset); |
5439 | kaddr = page_address(page); | 5437 | kaddr = page_address(page); |
5440 | memset(kaddr + offset, c, cur); | 5438 | memset(kaddr + offset, c, cur); |
5441 | 5439 | ||
@@ -5454,19 +5452,19 @@ void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src, | |||
5454 | size_t offset; | 5452 | size_t offset; |
5455 | struct page *page; | 5453 | struct page *page; |
5456 | char *kaddr; | 5454 | char *kaddr; |
5457 | size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1); | 5455 | size_t start_offset = dst->start & ((u64)PAGE_SIZE - 1); |
5458 | unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT; | 5456 | unsigned long i = (start_offset + dst_offset) >> PAGE_SHIFT; |
5459 | 5457 | ||
5460 | WARN_ON(src->len != dst_len); | 5458 | WARN_ON(src->len != dst_len); |
5461 | 5459 | ||
5462 | offset = (start_offset + dst_offset) & | 5460 | offset = (start_offset + dst_offset) & |
5463 | (PAGE_CACHE_SIZE - 1); | 5461 | (PAGE_SIZE - 1); |
5464 | 5462 | ||
5465 | while (len > 0) { | 5463 | while (len > 0) { |
5466 | page = dst->pages[i]; | 5464 | page = dst->pages[i]; |
5467 | WARN_ON(!PageUptodate(page)); | 5465 | WARN_ON(!PageUptodate(page)); |
5468 | 5466 | ||
5469 | cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset)); | 5467 | cur = min(len, (unsigned long)(PAGE_SIZE - offset)); |
5470 | 5468 | ||
5471 | kaddr = page_address(page); | 5469 | kaddr = page_address(page); |
5472 | read_extent_buffer(src, kaddr + offset, src_offset, cur); | 5470 | read_extent_buffer(src, kaddr + offset, src_offset, cur); |
@@ -5508,7 +5506,7 @@ static inline void eb_bitmap_offset(struct extent_buffer *eb, | |||
5508 | unsigned long *page_index, | 5506 | unsigned long *page_index, |
5509 | size_t *page_offset) | 5507 | size_t *page_offset) |
5510 | { | 5508 | { |
5511 | size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); | 5509 | size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1); |
5512 | size_t byte_offset = BIT_BYTE(nr); | 5510 | size_t byte_offset = BIT_BYTE(nr); |
5513 | size_t offset; | 5511 | size_t offset; |
5514 | 5512 | ||
@@ -5519,8 +5517,8 @@ static inline void eb_bitmap_offset(struct extent_buffer *eb, | |||
5519 | */ | 5517 | */ |
5520 | offset = start_offset + start + byte_offset; | 5518 | offset = start_offset + start + byte_offset; |
5521 | 5519 | ||
5522 | *page_index = offset >> PAGE_CACHE_SHIFT; | 5520 | *page_index = offset >> PAGE_SHIFT; |
5523 | *page_offset = offset & (PAGE_CACHE_SIZE - 1); | 5521 | *page_offset = offset & (PAGE_SIZE - 1); |
5524 | } | 5522 | } |
5525 | 5523 | ||
5526 | /** | 5524 | /** |
@@ -5572,7 +5570,7 @@ void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start, | |||
5572 | len -= bits_to_set; | 5570 | len -= bits_to_set; |
5573 | bits_to_set = BITS_PER_BYTE; | 5571 | bits_to_set = BITS_PER_BYTE; |
5574 | mask_to_set = ~0U; | 5572 | mask_to_set = ~0U; |
5575 | if (++offset >= PAGE_CACHE_SIZE && len > 0) { | 5573 | if (++offset >= PAGE_SIZE && len > 0) { |
5576 | offset = 0; | 5574 | offset = 0; |
5577 | page = eb->pages[++i]; | 5575 | page = eb->pages[++i]; |
5578 | WARN_ON(!PageUptodate(page)); | 5576 | WARN_ON(!PageUptodate(page)); |
@@ -5614,7 +5612,7 @@ void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start, | |||
5614 | len -= bits_to_clear; | 5612 | len -= bits_to_clear; |
5615 | bits_to_clear = BITS_PER_BYTE; | 5613 | bits_to_clear = BITS_PER_BYTE; |
5616 | mask_to_clear = ~0U; | 5614 | mask_to_clear = ~0U; |
5617 | if (++offset >= PAGE_CACHE_SIZE && len > 0) { | 5615 | if (++offset >= PAGE_SIZE && len > 0) { |
5618 | offset = 0; | 5616 | offset = 0; |
5619 | page = eb->pages[++i]; | 5617 | page = eb->pages[++i]; |
5620 | WARN_ON(!PageUptodate(page)); | 5618 | WARN_ON(!PageUptodate(page)); |
@@ -5661,7 +5659,7 @@ void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, | |||
5661 | size_t cur; | 5659 | size_t cur; |
5662 | size_t dst_off_in_page; | 5660 | size_t dst_off_in_page; |
5663 | size_t src_off_in_page; | 5661 | size_t src_off_in_page; |
5664 | size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1); | 5662 | size_t start_offset = dst->start & ((u64)PAGE_SIZE - 1); |
5665 | unsigned long dst_i; | 5663 | unsigned long dst_i; |
5666 | unsigned long src_i; | 5664 | unsigned long src_i; |
5667 | 5665 | ||
@@ -5680,17 +5678,17 @@ void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, | |||
5680 | 5678 | ||
5681 | while (len > 0) { | 5679 | while (len > 0) { |
5682 | dst_off_in_page = (start_offset + dst_offset) & | 5680 | dst_off_in_page = (start_offset + dst_offset) & |
5683 | (PAGE_CACHE_SIZE - 1); | 5681 | (PAGE_SIZE - 1); |
5684 | src_off_in_page = (start_offset + src_offset) & | 5682 | src_off_in_page = (start_offset + src_offset) & |
5685 | (PAGE_CACHE_SIZE - 1); | 5683 | (PAGE_SIZE - 1); |
5686 | 5684 | ||
5687 | dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT; | 5685 | dst_i = (start_offset + dst_offset) >> PAGE_SHIFT; |
5688 | src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT; | 5686 | src_i = (start_offset + src_offset) >> PAGE_SHIFT; |
5689 | 5687 | ||
5690 | cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - | 5688 | cur = min(len, (unsigned long)(PAGE_SIZE - |
5691 | src_off_in_page)); | 5689 | src_off_in_page)); |
5692 | cur = min_t(unsigned long, cur, | 5690 | cur = min_t(unsigned long, cur, |
5693 | (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page)); | 5691 | (unsigned long)(PAGE_SIZE - dst_off_in_page)); |
5694 | 5692 | ||
5695 | copy_pages(dst->pages[dst_i], dst->pages[src_i], | 5693 | copy_pages(dst->pages[dst_i], dst->pages[src_i], |
5696 | dst_off_in_page, src_off_in_page, cur); | 5694 | dst_off_in_page, src_off_in_page, cur); |
@@ -5709,7 +5707,7 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, | |||
5709 | size_t src_off_in_page; | 5707 | size_t src_off_in_page; |
5710 | unsigned long dst_end = dst_offset + len - 1; | 5708 | unsigned long dst_end = dst_offset + len - 1; |
5711 | unsigned long src_end = src_offset + len - 1; | 5709 | unsigned long src_end = src_offset + len - 1; |
5712 | size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1); | 5710 | size_t start_offset = dst->start & ((u64)PAGE_SIZE - 1); |
5713 | unsigned long dst_i; | 5711 | unsigned long dst_i; |
5714 | unsigned long src_i; | 5712 | unsigned long src_i; |
5715 | 5713 | ||
@@ -5728,13 +5726,13 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, | |||
5728 | return; | 5726 | return; |
5729 | } | 5727 | } |
5730 | while (len > 0) { | 5728 | while (len > 0) { |
5731 | dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT; | 5729 | dst_i = (start_offset + dst_end) >> PAGE_SHIFT; |
5732 | src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT; | 5730 | src_i = (start_offset + src_end) >> PAGE_SHIFT; |
5733 | 5731 | ||
5734 | dst_off_in_page = (start_offset + dst_end) & | 5732 | dst_off_in_page = (start_offset + dst_end) & |
5735 | (PAGE_CACHE_SIZE - 1); | 5733 | (PAGE_SIZE - 1); |
5736 | src_off_in_page = (start_offset + src_end) & | 5734 | src_off_in_page = (start_offset + src_end) & |
5737 | (PAGE_CACHE_SIZE - 1); | 5735 | (PAGE_SIZE - 1); |
5738 | 5736 | ||
5739 | cur = min_t(unsigned long, len, src_off_in_page + 1); | 5737 | cur = min_t(unsigned long, len, src_off_in_page + 1); |
5740 | cur = min(cur, dst_off_in_page + 1); | 5738 | cur = min(cur, dst_off_in_page + 1); |