diff options
Diffstat (limited to 'fs/btrfs/extent_io.c')
| -rw-r--r-- | fs/btrfs/extent_io.c | 84 |
1 files changed, 63 insertions, 21 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 20ddb28602a8..ba41da59e31b 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
| @@ -690,6 +690,15 @@ static void cache_state(struct extent_state *state, | |||
| 690 | } | 690 | } |
| 691 | } | 691 | } |
| 692 | 692 | ||
| 693 | static void uncache_state(struct extent_state **cached_ptr) | ||
| 694 | { | ||
| 695 | if (cached_ptr && (*cached_ptr)) { | ||
| 696 | struct extent_state *state = *cached_ptr; | ||
| 697 | *cached_ptr = NULL; | ||
| 698 | free_extent_state(state); | ||
| 699 | } | ||
| 700 | } | ||
| 701 | |||
| 693 | /* | 702 | /* |
| 694 | * set some bits on a range in the tree. This may require allocations or | 703 | * set some bits on a range in the tree. This may require allocations or |
| 695 | * sleeping, so the gfp mask is used to indicate what is allowed. | 704 | * sleeping, so the gfp mask is used to indicate what is allowed. |
| @@ -940,10 +949,10 @@ static int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end, | |||
| 940 | } | 949 | } |
| 941 | 950 | ||
| 942 | int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, | 951 | int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, |
| 943 | gfp_t mask) | 952 | struct extent_state **cached_state, gfp_t mask) |
| 944 | { | 953 | { |
| 945 | return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL, | 954 | return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, |
| 946 | NULL, mask); | 955 | NULL, cached_state, mask); |
| 947 | } | 956 | } |
| 948 | 957 | ||
| 949 | static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, | 958 | static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, |
| @@ -1012,8 +1021,7 @@ int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end, | |||
| 1012 | mask); | 1021 | mask); |
| 1013 | } | 1022 | } |
| 1014 | 1023 | ||
| 1015 | int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, | 1024 | int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) |
| 1016 | gfp_t mask) | ||
| 1017 | { | 1025 | { |
| 1018 | return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL, | 1026 | return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL, |
| 1019 | mask); | 1027 | mask); |
| @@ -1735,6 +1743,9 @@ static void end_bio_extent_readpage(struct bio *bio, int err) | |||
| 1735 | 1743 | ||
| 1736 | do { | 1744 | do { |
| 1737 | struct page *page = bvec->bv_page; | 1745 | struct page *page = bvec->bv_page; |
| 1746 | struct extent_state *cached = NULL; | ||
| 1747 | struct extent_state *state; | ||
| 1748 | |||
| 1738 | tree = &BTRFS_I(page->mapping->host)->io_tree; | 1749 | tree = &BTRFS_I(page->mapping->host)->io_tree; |
| 1739 | 1750 | ||
| 1740 | start = ((u64)page->index << PAGE_CACHE_SHIFT) + | 1751 | start = ((u64)page->index << PAGE_CACHE_SHIFT) + |
| @@ -1749,9 +1760,20 @@ static void end_bio_extent_readpage(struct bio *bio, int err) | |||
| 1749 | if (++bvec <= bvec_end) | 1760 | if (++bvec <= bvec_end) |
| 1750 | prefetchw(&bvec->bv_page->flags); | 1761 | prefetchw(&bvec->bv_page->flags); |
| 1751 | 1762 | ||
| 1763 | spin_lock(&tree->lock); | ||
| 1764 | state = find_first_extent_bit_state(tree, start, EXTENT_LOCKED); | ||
| 1765 | if (state && state->start == start) { | ||
| 1766 | /* | ||
| 1767 | * take a reference on the state, unlock will drop | ||
| 1768 | * the ref | ||
| 1769 | */ | ||
| 1770 | cache_state(state, &cached); | ||
| 1771 | } | ||
| 1772 | spin_unlock(&tree->lock); | ||
| 1773 | |||
| 1752 | if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) { | 1774 | if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) { |
| 1753 | ret = tree->ops->readpage_end_io_hook(page, start, end, | 1775 | ret = tree->ops->readpage_end_io_hook(page, start, end, |
| 1754 | NULL); | 1776 | state); |
| 1755 | if (ret) | 1777 | if (ret) |
| 1756 | uptodate = 0; | 1778 | uptodate = 0; |
| 1757 | } | 1779 | } |
| @@ -1764,15 +1786,16 @@ static void end_bio_extent_readpage(struct bio *bio, int err) | |||
| 1764 | test_bit(BIO_UPTODATE, &bio->bi_flags); | 1786 | test_bit(BIO_UPTODATE, &bio->bi_flags); |
| 1765 | if (err) | 1787 | if (err) |
| 1766 | uptodate = 0; | 1788 | uptodate = 0; |
| 1789 | uncache_state(&cached); | ||
| 1767 | continue; | 1790 | continue; |
| 1768 | } | 1791 | } |
| 1769 | } | 1792 | } |
| 1770 | 1793 | ||
| 1771 | if (uptodate) { | 1794 | if (uptodate) { |
| 1772 | set_extent_uptodate(tree, start, end, | 1795 | set_extent_uptodate(tree, start, end, &cached, |
| 1773 | GFP_ATOMIC); | 1796 | GFP_ATOMIC); |
| 1774 | } | 1797 | } |
| 1775 | unlock_extent(tree, start, end, GFP_ATOMIC); | 1798 | unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC); |
| 1776 | 1799 | ||
| 1777 | if (whole_page) { | 1800 | if (whole_page) { |
| 1778 | if (uptodate) { | 1801 | if (uptodate) { |
| @@ -1811,6 +1834,7 @@ static void end_bio_extent_preparewrite(struct bio *bio, int err) | |||
| 1811 | 1834 | ||
| 1812 | do { | 1835 | do { |
| 1813 | struct page *page = bvec->bv_page; | 1836 | struct page *page = bvec->bv_page; |
| 1837 | struct extent_state *cached = NULL; | ||
| 1814 | tree = &BTRFS_I(page->mapping->host)->io_tree; | 1838 | tree = &BTRFS_I(page->mapping->host)->io_tree; |
| 1815 | 1839 | ||
| 1816 | start = ((u64)page->index << PAGE_CACHE_SHIFT) + | 1840 | start = ((u64)page->index << PAGE_CACHE_SHIFT) + |
| @@ -1821,13 +1845,14 @@ static void end_bio_extent_preparewrite(struct bio *bio, int err) | |||
| 1821 | prefetchw(&bvec->bv_page->flags); | 1845 | prefetchw(&bvec->bv_page->flags); |
| 1822 | 1846 | ||
| 1823 | if (uptodate) { | 1847 | if (uptodate) { |
| 1824 | set_extent_uptodate(tree, start, end, GFP_ATOMIC); | 1848 | set_extent_uptodate(tree, start, end, &cached, |
| 1849 | GFP_ATOMIC); | ||
| 1825 | } else { | 1850 | } else { |
| 1826 | ClearPageUptodate(page); | 1851 | ClearPageUptodate(page); |
| 1827 | SetPageError(page); | 1852 | SetPageError(page); |
| 1828 | } | 1853 | } |
| 1829 | 1854 | ||
| 1830 | unlock_extent(tree, start, end, GFP_ATOMIC); | 1855 | unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC); |
| 1831 | 1856 | ||
| 1832 | } while (bvec >= bio->bi_io_vec); | 1857 | } while (bvec >= bio->bi_io_vec); |
| 1833 | 1858 | ||
| @@ -2016,14 +2041,17 @@ static int __extent_read_full_page(struct extent_io_tree *tree, | |||
| 2016 | while (cur <= end) { | 2041 | while (cur <= end) { |
| 2017 | if (cur >= last_byte) { | 2042 | if (cur >= last_byte) { |
| 2018 | char *userpage; | 2043 | char *userpage; |
| 2044 | struct extent_state *cached = NULL; | ||
| 2045 | |||
| 2019 | iosize = PAGE_CACHE_SIZE - page_offset; | 2046 | iosize = PAGE_CACHE_SIZE - page_offset; |
| 2020 | userpage = kmap_atomic(page, KM_USER0); | 2047 | userpage = kmap_atomic(page, KM_USER0); |
| 2021 | memset(userpage + page_offset, 0, iosize); | 2048 | memset(userpage + page_offset, 0, iosize); |
| 2022 | flush_dcache_page(page); | 2049 | flush_dcache_page(page); |
| 2023 | kunmap_atomic(userpage, KM_USER0); | 2050 | kunmap_atomic(userpage, KM_USER0); |
| 2024 | set_extent_uptodate(tree, cur, cur + iosize - 1, | 2051 | set_extent_uptodate(tree, cur, cur + iosize - 1, |
| 2025 | GFP_NOFS); | 2052 | &cached, GFP_NOFS); |
| 2026 | unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); | 2053 | unlock_extent_cached(tree, cur, cur + iosize - 1, |
| 2054 | &cached, GFP_NOFS); | ||
| 2027 | break; | 2055 | break; |
| 2028 | } | 2056 | } |
| 2029 | em = get_extent(inode, page, page_offset, cur, | 2057 | em = get_extent(inode, page, page_offset, cur, |
| @@ -2063,14 +2091,17 @@ static int __extent_read_full_page(struct extent_io_tree *tree, | |||
| 2063 | /* we've found a hole, just zero and go on */ | 2091 | /* we've found a hole, just zero and go on */ |
| 2064 | if (block_start == EXTENT_MAP_HOLE) { | 2092 | if (block_start == EXTENT_MAP_HOLE) { |
| 2065 | char *userpage; | 2093 | char *userpage; |
| 2094 | struct extent_state *cached = NULL; | ||
| 2095 | |||
| 2066 | userpage = kmap_atomic(page, KM_USER0); | 2096 | userpage = kmap_atomic(page, KM_USER0); |
| 2067 | memset(userpage + page_offset, 0, iosize); | 2097 | memset(userpage + page_offset, 0, iosize); |
| 2068 | flush_dcache_page(page); | 2098 | flush_dcache_page(page); |
| 2069 | kunmap_atomic(userpage, KM_USER0); | 2099 | kunmap_atomic(userpage, KM_USER0); |
| 2070 | 2100 | ||
| 2071 | set_extent_uptodate(tree, cur, cur + iosize - 1, | 2101 | set_extent_uptodate(tree, cur, cur + iosize - 1, |
| 2072 | GFP_NOFS); | 2102 | &cached, GFP_NOFS); |
| 2073 | unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); | 2103 | unlock_extent_cached(tree, cur, cur + iosize - 1, |
| 2104 | &cached, GFP_NOFS); | ||
| 2074 | cur = cur + iosize; | 2105 | cur = cur + iosize; |
| 2075 | page_offset += iosize; | 2106 | page_offset += iosize; |
| 2076 | continue; | 2107 | continue; |
| @@ -2650,7 +2681,7 @@ int extent_readpages(struct extent_io_tree *tree, | |||
| 2650 | prefetchw(&page->flags); | 2681 | prefetchw(&page->flags); |
| 2651 | list_del(&page->lru); | 2682 | list_del(&page->lru); |
| 2652 | if (!add_to_page_cache_lru(page, mapping, | 2683 | if (!add_to_page_cache_lru(page, mapping, |
| 2653 | page->index, GFP_KERNEL)) { | 2684 | page->index, GFP_NOFS)) { |
| 2654 | __extent_read_full_page(tree, page, get_extent, | 2685 | __extent_read_full_page(tree, page, get_extent, |
| 2655 | &bio, 0, &bio_flags); | 2686 | &bio, 0, &bio_flags); |
| 2656 | } | 2687 | } |
| @@ -2789,9 +2820,12 @@ int extent_prepare_write(struct extent_io_tree *tree, | |||
| 2789 | iocount++; | 2820 | iocount++; |
| 2790 | block_start = block_start + iosize; | 2821 | block_start = block_start + iosize; |
| 2791 | } else { | 2822 | } else { |
| 2792 | set_extent_uptodate(tree, block_start, cur_end, | 2823 | struct extent_state *cached = NULL; |
| 2824 | |||
| 2825 | set_extent_uptodate(tree, block_start, cur_end, &cached, | ||
| 2793 | GFP_NOFS); | 2826 | GFP_NOFS); |
| 2794 | unlock_extent(tree, block_start, cur_end, GFP_NOFS); | 2827 | unlock_extent_cached(tree, block_start, cur_end, |
| 2828 | &cached, GFP_NOFS); | ||
| 2795 | block_start = cur_end + 1; | 2829 | block_start = cur_end + 1; |
| 2796 | } | 2830 | } |
| 2797 | page_offset = block_start & (PAGE_CACHE_SIZE - 1); | 2831 | page_offset = block_start & (PAGE_CACHE_SIZE - 1); |
| @@ -3457,7 +3491,7 @@ int set_extent_buffer_uptodate(struct extent_io_tree *tree, | |||
| 3457 | num_pages = num_extent_pages(eb->start, eb->len); | 3491 | num_pages = num_extent_pages(eb->start, eb->len); |
| 3458 | 3492 | ||
| 3459 | set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1, | 3493 | set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1, |
| 3460 | GFP_NOFS); | 3494 | NULL, GFP_NOFS); |
| 3461 | for (i = 0; i < num_pages; i++) { | 3495 | for (i = 0; i < num_pages; i++) { |
| 3462 | page = extent_buffer_page(eb, i); | 3496 | page = extent_buffer_page(eb, i); |
| 3463 | if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) || | 3497 | if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) || |
| @@ -3885,6 +3919,12 @@ static void move_pages(struct page *dst_page, struct page *src_page, | |||
| 3885 | kunmap_atomic(dst_kaddr, KM_USER0); | 3919 | kunmap_atomic(dst_kaddr, KM_USER0); |
| 3886 | } | 3920 | } |
| 3887 | 3921 | ||
| 3922 | static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len) | ||
| 3923 | { | ||
| 3924 | unsigned long distance = (src > dst) ? src - dst : dst - src; | ||
| 3925 | return distance < len; | ||
| 3926 | } | ||
| 3927 | |||
| 3888 | static void copy_pages(struct page *dst_page, struct page *src_page, | 3928 | static void copy_pages(struct page *dst_page, struct page *src_page, |
| 3889 | unsigned long dst_off, unsigned long src_off, | 3929 | unsigned long dst_off, unsigned long src_off, |
| 3890 | unsigned long len) | 3930 | unsigned long len) |
| @@ -3892,10 +3932,12 @@ static void copy_pages(struct page *dst_page, struct page *src_page, | |||
| 3892 | char *dst_kaddr = kmap_atomic(dst_page, KM_USER0); | 3932 | char *dst_kaddr = kmap_atomic(dst_page, KM_USER0); |
| 3893 | char *src_kaddr; | 3933 | char *src_kaddr; |
| 3894 | 3934 | ||
| 3895 | if (dst_page != src_page) | 3935 | if (dst_page != src_page) { |
| 3896 | src_kaddr = kmap_atomic(src_page, KM_USER1); | 3936 | src_kaddr = kmap_atomic(src_page, KM_USER1); |
| 3897 | else | 3937 | } else { |
| 3898 | src_kaddr = dst_kaddr; | 3938 | src_kaddr = dst_kaddr; |
| 3939 | BUG_ON(areas_overlap(src_off, dst_off, len)); | ||
| 3940 | } | ||
| 3899 | 3941 | ||
| 3900 | memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len); | 3942 | memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len); |
| 3901 | kunmap_atomic(dst_kaddr, KM_USER0); | 3943 | kunmap_atomic(dst_kaddr, KM_USER0); |
| @@ -3970,7 +4012,7 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, | |||
| 3970 | "len %lu len %lu\n", dst_offset, len, dst->len); | 4012 | "len %lu len %lu\n", dst_offset, len, dst->len); |
| 3971 | BUG_ON(1); | 4013 | BUG_ON(1); |
| 3972 | } | 4014 | } |
| 3973 | if (dst_offset < src_offset) { | 4015 | if (!areas_overlap(src_offset, dst_offset, len)) { |
| 3974 | memcpy_extent_buffer(dst, dst_offset, src_offset, len); | 4016 | memcpy_extent_buffer(dst, dst_offset, src_offset, len); |
| 3975 | return; | 4017 | return; |
| 3976 | } | 4018 | } |
