aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent_io.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r--fs/btrfs/extent_io.c267
1 files changed, 19 insertions, 248 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 4bf90abea3d6..64c8b361b539 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -101,7 +101,7 @@ void extent_io_exit(void)
101} 101}
102 102
103void extent_io_tree_init(struct extent_io_tree *tree, 103void extent_io_tree_init(struct extent_io_tree *tree,
104 struct address_space *mapping, gfp_t mask) 104 struct address_space *mapping)
105{ 105{
106 tree->state = RB_ROOT; 106 tree->state = RB_ROOT;
107 INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC); 107 INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC);
@@ -941,13 +941,6 @@ int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
941 NULL, mask); 941 NULL, mask);
942} 942}
943 943
944static int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
945 gfp_t mask)
946{
947 return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0,
948 NULL, mask);
949}
950
951int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, 944int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
952 struct extent_state **cached_state, gfp_t mask) 945 struct extent_state **cached_state, gfp_t mask)
953{ 946{
@@ -963,11 +956,6 @@ static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
963 cached_state, mask); 956 cached_state, mask);
964} 957}
965 958
966int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
967{
968 return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
969}
970
971/* 959/*
972 * either insert or lock state struct between start and end use mask to tell 960 * either insert or lock state struct between start and end use mask to tell
973 * us if waiting is desired. 961 * us if waiting is desired.
@@ -1028,25 +1016,6 @@ int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
1028} 1016}
1029 1017
1030/* 1018/*
1031 * helper function to set pages and extents in the tree dirty
1032 */
1033int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end)
1034{
1035 unsigned long index = start >> PAGE_CACHE_SHIFT;
1036 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1037 struct page *page;
1038
1039 while (index <= end_index) {
1040 page = find_get_page(tree->mapping, index);
1041 BUG_ON(!page);
1042 __set_page_dirty_nobuffers(page);
1043 page_cache_release(page);
1044 index++;
1045 }
1046 return 0;
1047}
1048
1049/*
1050 * helper function to set both pages and extents in the tree writeback 1019 * helper function to set both pages and extents in the tree writeback
1051 */ 1020 */
1052static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end) 1021static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
@@ -1819,46 +1788,6 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
1819 bio_put(bio); 1788 bio_put(bio);
1820} 1789}
1821 1790
1822/*
1823 * IO done from prepare_write is pretty simple, we just unlock
1824 * the structs in the extent tree when done, and set the uptodate bits
1825 * as appropriate.
1826 */
1827static void end_bio_extent_preparewrite(struct bio *bio, int err)
1828{
1829 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1830 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1831 struct extent_io_tree *tree;
1832 u64 start;
1833 u64 end;
1834
1835 do {
1836 struct page *page = bvec->bv_page;
1837 struct extent_state *cached = NULL;
1838 tree = &BTRFS_I(page->mapping->host)->io_tree;
1839
1840 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1841 bvec->bv_offset;
1842 end = start + bvec->bv_len - 1;
1843
1844 if (--bvec >= bio->bi_io_vec)
1845 prefetchw(&bvec->bv_page->flags);
1846
1847 if (uptodate) {
1848 set_extent_uptodate(tree, start, end, &cached,
1849 GFP_ATOMIC);
1850 } else {
1851 ClearPageUptodate(page);
1852 SetPageError(page);
1853 }
1854
1855 unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
1856
1857 } while (bvec >= bio->bi_io_vec);
1858
1859 bio_put(bio);
1860}
1861
1862struct bio * 1791struct bio *
1863btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs, 1792btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1864 gfp_t gfp_flags) 1793 gfp_t gfp_flags)
@@ -2007,7 +1936,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2007 struct btrfs_ordered_extent *ordered; 1936 struct btrfs_ordered_extent *ordered;
2008 int ret; 1937 int ret;
2009 int nr = 0; 1938 int nr = 0;
2010 size_t page_offset = 0; 1939 size_t pg_offset = 0;
2011 size_t iosize; 1940 size_t iosize;
2012 size_t disk_io_size; 1941 size_t disk_io_size;
2013 size_t blocksize = inode->i_sb->s_blocksize; 1942 size_t blocksize = inode->i_sb->s_blocksize;
@@ -2043,9 +1972,9 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2043 char *userpage; 1972 char *userpage;
2044 struct extent_state *cached = NULL; 1973 struct extent_state *cached = NULL;
2045 1974
2046 iosize = PAGE_CACHE_SIZE - page_offset; 1975 iosize = PAGE_CACHE_SIZE - pg_offset;
2047 userpage = kmap_atomic(page, KM_USER0); 1976 userpage = kmap_atomic(page, KM_USER0);
2048 memset(userpage + page_offset, 0, iosize); 1977 memset(userpage + pg_offset, 0, iosize);
2049 flush_dcache_page(page); 1978 flush_dcache_page(page);
2050 kunmap_atomic(userpage, KM_USER0); 1979 kunmap_atomic(userpage, KM_USER0);
2051 set_extent_uptodate(tree, cur, cur + iosize - 1, 1980 set_extent_uptodate(tree, cur, cur + iosize - 1,
@@ -2054,9 +1983,9 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2054 &cached, GFP_NOFS); 1983 &cached, GFP_NOFS);
2055 break; 1984 break;
2056 } 1985 }
2057 em = get_extent(inode, page, page_offset, cur, 1986 em = get_extent(inode, page, pg_offset, cur,
2058 end - cur + 1, 0); 1987 end - cur + 1, 0);
2059 if (IS_ERR(em) || !em) { 1988 if (IS_ERR_OR_NULL(em)) {
2060 SetPageError(page); 1989 SetPageError(page);
2061 unlock_extent(tree, cur, end, GFP_NOFS); 1990 unlock_extent(tree, cur, end, GFP_NOFS);
2062 break; 1991 break;
@@ -2094,7 +2023,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2094 struct extent_state *cached = NULL; 2023 struct extent_state *cached = NULL;
2095 2024
2096 userpage = kmap_atomic(page, KM_USER0); 2025 userpage = kmap_atomic(page, KM_USER0);
2097 memset(userpage + page_offset, 0, iosize); 2026 memset(userpage + pg_offset, 0, iosize);
2098 flush_dcache_page(page); 2027 flush_dcache_page(page);
2099 kunmap_atomic(userpage, KM_USER0); 2028 kunmap_atomic(userpage, KM_USER0);
2100 2029
@@ -2103,7 +2032,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2103 unlock_extent_cached(tree, cur, cur + iosize - 1, 2032 unlock_extent_cached(tree, cur, cur + iosize - 1,
2104 &cached, GFP_NOFS); 2033 &cached, GFP_NOFS);
2105 cur = cur + iosize; 2034 cur = cur + iosize;
2106 page_offset += iosize; 2035 pg_offset += iosize;
2107 continue; 2036 continue;
2108 } 2037 }
2109 /* the get_extent function already copied into the page */ 2038 /* the get_extent function already copied into the page */
@@ -2112,7 +2041,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2112 check_page_uptodate(tree, page); 2041 check_page_uptodate(tree, page);
2113 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); 2042 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2114 cur = cur + iosize; 2043 cur = cur + iosize;
2115 page_offset += iosize; 2044 pg_offset += iosize;
2116 continue; 2045 continue;
2117 } 2046 }
2118 /* we have an inline extent but it didn't get marked up 2047 /* we have an inline extent but it didn't get marked up
@@ -2122,7 +2051,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2122 SetPageError(page); 2051 SetPageError(page);
2123 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); 2052 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
2124 cur = cur + iosize; 2053 cur = cur + iosize;
2125 page_offset += iosize; 2054 pg_offset += iosize;
2126 continue; 2055 continue;
2127 } 2056 }
2128 2057
@@ -2135,7 +2064,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2135 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1; 2064 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
2136 pnr -= page->index; 2065 pnr -= page->index;
2137 ret = submit_extent_page(READ, tree, page, 2066 ret = submit_extent_page(READ, tree, page,
2138 sector, disk_io_size, page_offset, 2067 sector, disk_io_size, pg_offset,
2139 bdev, bio, pnr, 2068 bdev, bio, pnr,
2140 end_bio_extent_readpage, mirror_num, 2069 end_bio_extent_readpage, mirror_num,
2141 *bio_flags, 2070 *bio_flags,
@@ -2146,7 +2075,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2146 if (ret) 2075 if (ret)
2147 SetPageError(page); 2076 SetPageError(page);
2148 cur = cur + iosize; 2077 cur = cur + iosize;
2149 page_offset += iosize; 2078 pg_offset += iosize;
2150 } 2079 }
2151 if (!nr) { 2080 if (!nr) {
2152 if (!PageError(page)) 2081 if (!PageError(page))
@@ -2341,7 +2270,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2341 } 2270 }
2342 em = epd->get_extent(inode, page, pg_offset, cur, 2271 em = epd->get_extent(inode, page, pg_offset, cur,
2343 end - cur + 1, 1); 2272 end - cur + 1, 1);
2344 if (IS_ERR(em) || !em) { 2273 if (IS_ERR_OR_NULL(em)) {
2345 SetPageError(page); 2274 SetPageError(page);
2346 break; 2275 break;
2347 } 2276 }
@@ -2720,128 +2649,6 @@ int extent_invalidatepage(struct extent_io_tree *tree,
2720} 2649}
2721 2650
2722/* 2651/*
2723 * simple commit_write call, set_range_dirty is used to mark both
2724 * the pages and the extent records as dirty
2725 */
2726int extent_commit_write(struct extent_io_tree *tree,
2727 struct inode *inode, struct page *page,
2728 unsigned from, unsigned to)
2729{
2730 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2731
2732 set_page_extent_mapped(page);
2733 set_page_dirty(page);
2734
2735 if (pos > inode->i_size) {
2736 i_size_write(inode, pos);
2737 mark_inode_dirty(inode);
2738 }
2739 return 0;
2740}
2741
2742int extent_prepare_write(struct extent_io_tree *tree,
2743 struct inode *inode, struct page *page,
2744 unsigned from, unsigned to, get_extent_t *get_extent)
2745{
2746 u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2747 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
2748 u64 block_start;
2749 u64 orig_block_start;
2750 u64 block_end;
2751 u64 cur_end;
2752 struct extent_map *em;
2753 unsigned blocksize = 1 << inode->i_blkbits;
2754 size_t page_offset = 0;
2755 size_t block_off_start;
2756 size_t block_off_end;
2757 int err = 0;
2758 int iocount = 0;
2759 int ret = 0;
2760 int isnew;
2761
2762 set_page_extent_mapped(page);
2763
2764 block_start = (page_start + from) & ~((u64)blocksize - 1);
2765 block_end = (page_start + to - 1) | (blocksize - 1);
2766 orig_block_start = block_start;
2767
2768 lock_extent(tree, page_start, page_end, GFP_NOFS);
2769 while (block_start <= block_end) {
2770 em = get_extent(inode, page, page_offset, block_start,
2771 block_end - block_start + 1, 1);
2772 if (IS_ERR(em) || !em)
2773 goto err;
2774
2775 cur_end = min(block_end, extent_map_end(em) - 1);
2776 block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
2777 block_off_end = block_off_start + blocksize;
2778 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
2779
2780 if (!PageUptodate(page) && isnew &&
2781 (block_off_end > to || block_off_start < from)) {
2782 void *kaddr;
2783
2784 kaddr = kmap_atomic(page, KM_USER0);
2785 if (block_off_end > to)
2786 memset(kaddr + to, 0, block_off_end - to);
2787 if (block_off_start < from)
2788 memset(kaddr + block_off_start, 0,
2789 from - block_off_start);
2790 flush_dcache_page(page);
2791 kunmap_atomic(kaddr, KM_USER0);
2792 }
2793 if ((em->block_start != EXTENT_MAP_HOLE &&
2794 em->block_start != EXTENT_MAP_INLINE) &&
2795 !isnew && !PageUptodate(page) &&
2796 (block_off_end > to || block_off_start < from) &&
2797 !test_range_bit(tree, block_start, cur_end,
2798 EXTENT_UPTODATE, 1, NULL)) {
2799 u64 sector;
2800 u64 extent_offset = block_start - em->start;
2801 size_t iosize;
2802 sector = (em->block_start + extent_offset) >> 9;
2803 iosize = (cur_end - block_start + blocksize) &
2804 ~((u64)blocksize - 1);
2805 /*
2806 * we've already got the extent locked, but we
2807 * need to split the state such that our end_bio
2808 * handler can clear the lock.
2809 */
2810 set_extent_bit(tree, block_start,
2811 block_start + iosize - 1,
2812 EXTENT_LOCKED, 0, NULL, NULL, GFP_NOFS);
2813 ret = submit_extent_page(READ, tree, page,
2814 sector, iosize, page_offset, em->bdev,
2815 NULL, 1,
2816 end_bio_extent_preparewrite, 0,
2817 0, 0);
2818 if (ret && !err)
2819 err = ret;
2820 iocount++;
2821 block_start = block_start + iosize;
2822 } else {
2823 struct extent_state *cached = NULL;
2824
2825 set_extent_uptodate(tree, block_start, cur_end, &cached,
2826 GFP_NOFS);
2827 unlock_extent_cached(tree, block_start, cur_end,
2828 &cached, GFP_NOFS);
2829 block_start = cur_end + 1;
2830 }
2831 page_offset = block_start & (PAGE_CACHE_SIZE - 1);
2832 free_extent_map(em);
2833 }
2834 if (iocount) {
2835 wait_extent_bit(tree, orig_block_start,
2836 block_end, EXTENT_LOCKED);
2837 }
2838 check_page_uptodate(tree, page);
2839err:
2840 /* FIXME, zero out newly allocated blocks on error */
2841 return err;
2842}
2843
2844/*
2845 * a helper for releasepage, this tests for areas of the page that 2652 * a helper for releasepage, this tests for areas of the page that
2846 * are locked or under IO and drops the related state bits if it is safe 2653 * are locked or under IO and drops the related state bits if it is safe
2847 * to drop the page. 2654 * to drop the page.
@@ -2899,7 +2706,7 @@ int try_release_extent_mapping(struct extent_map_tree *map,
2899 len = end - start + 1; 2706 len = end - start + 1;
2900 write_lock(&map->lock); 2707 write_lock(&map->lock);
2901 em = lookup_extent_mapping(map, start, len); 2708 em = lookup_extent_mapping(map, start, len);
2902 if (!em || IS_ERR(em)) { 2709 if (IS_ERR_OR_NULL(em)) {
2903 write_unlock(&map->lock); 2710 write_unlock(&map->lock);
2904 break; 2711 break;
2905 } 2712 }
@@ -2927,33 +2734,6 @@ int try_release_extent_mapping(struct extent_map_tree *map,
2927 return try_release_extent_state(map, tree, page, mask); 2734 return try_release_extent_state(map, tree, page, mask);
2928} 2735}
2929 2736
2930sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
2931 get_extent_t *get_extent)
2932{
2933 struct inode *inode = mapping->host;
2934 struct extent_state *cached_state = NULL;
2935 u64 start = iblock << inode->i_blkbits;
2936 sector_t sector = 0;
2937 size_t blksize = (1 << inode->i_blkbits);
2938 struct extent_map *em;
2939
2940 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + blksize - 1,
2941 0, &cached_state, GFP_NOFS);
2942 em = get_extent(inode, NULL, 0, start, blksize, 0);
2943 unlock_extent_cached(&BTRFS_I(inode)->io_tree, start,
2944 start + blksize - 1, &cached_state, GFP_NOFS);
2945 if (!em || IS_ERR(em))
2946 return 0;
2947
2948 if (em->block_start > EXTENT_MAP_LAST_BYTE)
2949 goto out;
2950
2951 sector = (em->block_start + start - em->start) >> inode->i_blkbits;
2952out:
2953 free_extent_map(em);
2954 return sector;
2955}
2956
2957/* 2737/*
2958 * helper function for fiemap, which doesn't want to see any holes. 2738 * helper function for fiemap, which doesn't want to see any holes.
2959 * This maps until we find something past 'last' 2739 * This maps until we find something past 'last'
@@ -2976,7 +2756,7 @@ static struct extent_map *get_extent_skip_holes(struct inode *inode,
2976 break; 2756 break;
2977 len = (len + sectorsize - 1) & ~(sectorsize - 1); 2757 len = (len + sectorsize - 1) & ~(sectorsize - 1);
2978 em = get_extent(inode, NULL, 0, offset, len, 0); 2758 em = get_extent(inode, NULL, 0, offset, len, 0);
2979 if (!em || IS_ERR(em)) 2759 if (IS_ERR_OR_NULL(em))
2980 return em; 2760 return em;
2981 2761
2982 /* if this isn't a hole return it */ 2762 /* if this isn't a hole return it */
@@ -3266,8 +3046,7 @@ static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
3266 3046
3267struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, 3047struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
3268 u64 start, unsigned long len, 3048 u64 start, unsigned long len,
3269 struct page *page0, 3049 struct page *page0)
3270 gfp_t mask)
3271{ 3050{
3272 unsigned long num_pages = num_extent_pages(start, len); 3051 unsigned long num_pages = num_extent_pages(start, len);
3273 unsigned long i; 3052 unsigned long i;
@@ -3288,7 +3067,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
3288 } 3067 }
3289 rcu_read_unlock(); 3068 rcu_read_unlock();
3290 3069
3291 eb = __alloc_extent_buffer(tree, start, len, mask); 3070 eb = __alloc_extent_buffer(tree, start, len, GFP_NOFS);
3292 if (!eb) 3071 if (!eb)
3293 return NULL; 3072 return NULL;
3294 3073
@@ -3305,7 +3084,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
3305 i = 0; 3084 i = 0;
3306 } 3085 }
3307 for (; i < num_pages; i++, index++) { 3086 for (; i < num_pages; i++, index++) {
3308 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM); 3087 p = find_or_create_page(mapping, index, GFP_NOFS | __GFP_HIGHMEM);
3309 if (!p) { 3088 if (!p) {
3310 WARN_ON(1); 3089 WARN_ON(1);
3311 goto free_eb; 3090 goto free_eb;
@@ -3377,8 +3156,7 @@ free_eb:
3377} 3156}
3378 3157
3379struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree, 3158struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
3380 u64 start, unsigned long len, 3159 u64 start, unsigned long len)
3381 gfp_t mask)
3382{ 3160{
3383 struct extent_buffer *eb; 3161 struct extent_buffer *eb;
3384 3162
@@ -3439,13 +3217,6 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree,
3439 return 0; 3217 return 0;
3440} 3218}
3441 3219
3442int wait_on_extent_buffer_writeback(struct extent_io_tree *tree,
3443 struct extent_buffer *eb)
3444{
3445 return wait_on_extent_writeback(tree, eb->start,
3446 eb->start + eb->len - 1);
3447}
3448
3449int set_extent_buffer_dirty(struct extent_io_tree *tree, 3220int set_extent_buffer_dirty(struct extent_io_tree *tree,
3450 struct extent_buffer *eb) 3221 struct extent_buffer *eb)
3451{ 3222{