diff options
author | Miao Xie <miaox@cn.fujitsu.com> | 2013-07-25 07:22:37 -0400 |
---|---|---|
committer | Chris Mason <chris.mason@fusionio.com> | 2013-09-01 08:04:36 -0400 |
commit | 125bac016d60e78120e92904a5b2fc3a5ebf0475 (patch) | |
tree | d3120749dacb8646f8d2af851c390f79a3c86640 /fs/btrfs/extent_io.c | |
parent | 9974090bdd7ac310d99a8ce6da7d6a19b3099ff9 (diff) |
Btrfs: cache the extent map struct when reading several pages
When we read several pages at once, we needn't get the extent map object
every time we deal with a page, and we can cache the extent map object.
So, we can reduce the search time of the extent map, and besides that, we
also can reduce the lock contention of the extent map tree.
Signed-off-by: Miao Xie <miaox@cn.fujitsu.com>
Signed-off-by: Josef Bacik <jbacik@fusionio.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r-- | fs/btrfs/extent_io.c | 57 |
1 files changed, 46 insertions, 11 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 0d40d082f0c7..daf180dafe39 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -2720,6 +2720,33 @@ void set_page_extent_mapped(struct page *page) | |||
2720 | } | 2720 | } |
2721 | } | 2721 | } |
2722 | 2722 | ||
2723 | static struct extent_map * | ||
2724 | __get_extent_map(struct inode *inode, struct page *page, size_t pg_offset, | ||
2725 | u64 start, u64 len, get_extent_t *get_extent, | ||
2726 | struct extent_map **em_cached) | ||
2727 | { | ||
2728 | struct extent_map *em; | ||
2729 | |||
2730 | if (em_cached && *em_cached) { | ||
2731 | em = *em_cached; | ||
2732 | if (em->in_tree && start >= em->start && | ||
2733 | start < extent_map_end(em)) { | ||
2734 | atomic_inc(&em->refs); | ||
2735 | return em; | ||
2736 | } | ||
2737 | |||
2738 | free_extent_map(em); | ||
2739 | *em_cached = NULL; | ||
2740 | } | ||
2741 | |||
2742 | em = get_extent(inode, page, pg_offset, start, len, 0); | ||
2743 | if (em_cached && !IS_ERR_OR_NULL(em)) { | ||
2744 | BUG_ON(*em_cached); | ||
2745 | atomic_inc(&em->refs); | ||
2746 | *em_cached = em; | ||
2747 | } | ||
2748 | return em; | ||
2749 | } | ||
2723 | /* | 2750 | /* |
2724 | * basic readpage implementation. Locked extent state structs are inserted | 2751 | * basic readpage implementation. Locked extent state structs are inserted |
2725 | * into the tree that are removed when the IO is done (by the end_io | 2752 | * into the tree that are removed when the IO is done (by the end_io |
@@ -2729,6 +2756,7 @@ void set_page_extent_mapped(struct page *page) | |||
2729 | static int __do_readpage(struct extent_io_tree *tree, | 2756 | static int __do_readpage(struct extent_io_tree *tree, |
2730 | struct page *page, | 2757 | struct page *page, |
2731 | get_extent_t *get_extent, | 2758 | get_extent_t *get_extent, |
2759 | struct extent_map **em_cached, | ||
2732 | struct bio **bio, int mirror_num, | 2760 | struct bio **bio, int mirror_num, |
2733 | unsigned long *bio_flags, int rw) | 2761 | unsigned long *bio_flags, int rw) |
2734 | { | 2762 | { |
@@ -2793,8 +2821,8 @@ static int __do_readpage(struct extent_io_tree *tree, | |||
2793 | &cached, GFP_NOFS); | 2821 | &cached, GFP_NOFS); |
2794 | break; | 2822 | break; |
2795 | } | 2823 | } |
2796 | em = get_extent(inode, page, pg_offset, cur, | 2824 | em = __get_extent_map(inode, page, pg_offset, cur, |
2797 | end - cur + 1, 0); | 2825 | end - cur + 1, get_extent, em_cached); |
2798 | if (IS_ERR_OR_NULL(em)) { | 2826 | if (IS_ERR_OR_NULL(em)) { |
2799 | SetPageError(page); | 2827 | SetPageError(page); |
2800 | unlock_extent(tree, cur, end); | 2828 | unlock_extent(tree, cur, end); |
@@ -2895,6 +2923,7 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree, | |||
2895 | struct page *pages[], int nr_pages, | 2923 | struct page *pages[], int nr_pages, |
2896 | u64 start, u64 end, | 2924 | u64 start, u64 end, |
2897 | get_extent_t *get_extent, | 2925 | get_extent_t *get_extent, |
2926 | struct extent_map **em_cached, | ||
2898 | struct bio **bio, int mirror_num, | 2927 | struct bio **bio, int mirror_num, |
2899 | unsigned long *bio_flags, int rw) | 2928 | unsigned long *bio_flags, int rw) |
2900 | { | 2929 | { |
@@ -2915,8 +2944,8 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree, | |||
2915 | } | 2944 | } |
2916 | 2945 | ||
2917 | for (index = 0; index < nr_pages; index++) { | 2946 | for (index = 0; index < nr_pages; index++) { |
2918 | __do_readpage(tree, pages[index], get_extent, bio, mirror_num, | 2947 | __do_readpage(tree, pages[index], get_extent, em_cached, bio, |
2919 | bio_flags, rw); | 2948 | mirror_num, bio_flags, rw); |
2920 | page_cache_release(pages[index]); | 2949 | page_cache_release(pages[index]); |
2921 | } | 2950 | } |
2922 | } | 2951 | } |
@@ -2924,6 +2953,7 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree, | |||
2924 | static void __extent_readpages(struct extent_io_tree *tree, | 2953 | static void __extent_readpages(struct extent_io_tree *tree, |
2925 | struct page *pages[], | 2954 | struct page *pages[], |
2926 | int nr_pages, get_extent_t *get_extent, | 2955 | int nr_pages, get_extent_t *get_extent, |
2956 | struct extent_map **em_cached, | ||
2927 | struct bio **bio, int mirror_num, | 2957 | struct bio **bio, int mirror_num, |
2928 | unsigned long *bio_flags, int rw) | 2958 | unsigned long *bio_flags, int rw) |
2929 | { | 2959 | { |
@@ -2944,8 +2974,9 @@ static void __extent_readpages(struct extent_io_tree *tree, | |||
2944 | } else { | 2974 | } else { |
2945 | __do_contiguous_readpages(tree, &pages[first_index], | 2975 | __do_contiguous_readpages(tree, &pages[first_index], |
2946 | index - first_index, start, | 2976 | index - first_index, start, |
2947 | end, get_extent, bio, | 2977 | end, get_extent, em_cached, |
2948 | mirror_num, bio_flags, rw); | 2978 | bio, mirror_num, bio_flags, |
2979 | rw); | ||
2949 | start = page_start; | 2980 | start = page_start; |
2950 | end = start + PAGE_CACHE_SIZE - 1; | 2981 | end = start + PAGE_CACHE_SIZE - 1; |
2951 | first_index = index; | 2982 | first_index = index; |
@@ -2955,7 +2986,7 @@ static void __extent_readpages(struct extent_io_tree *tree, | |||
2955 | if (end) | 2986 | if (end) |
2956 | __do_contiguous_readpages(tree, &pages[first_index], | 2987 | __do_contiguous_readpages(tree, &pages[first_index], |
2957 | index - first_index, start, | 2988 | index - first_index, start, |
2958 | end, get_extent, bio, | 2989 | end, get_extent, em_cached, bio, |
2959 | mirror_num, bio_flags, rw); | 2990 | mirror_num, bio_flags, rw); |
2960 | } | 2991 | } |
2961 | 2992 | ||
@@ -2981,8 +3012,8 @@ static int __extent_read_full_page(struct extent_io_tree *tree, | |||
2981 | btrfs_put_ordered_extent(ordered); | 3012 | btrfs_put_ordered_extent(ordered); |
2982 | } | 3013 | } |
2983 | 3014 | ||
2984 | ret = __do_readpage(tree, page, get_extent, bio, mirror_num, bio_flags, | 3015 | ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num, |
2985 | rw); | 3016 | bio_flags, rw); |
2986 | return ret; | 3017 | return ret; |
2987 | } | 3018 | } |
2988 | 3019 | ||
@@ -3836,6 +3867,7 @@ int extent_readpages(struct extent_io_tree *tree, | |||
3836 | unsigned long bio_flags = 0; | 3867 | unsigned long bio_flags = 0; |
3837 | struct page *pagepool[16]; | 3868 | struct page *pagepool[16]; |
3838 | struct page *page; | 3869 | struct page *page; |
3870 | struct extent_map *em_cached = NULL; | ||
3839 | int nr = 0; | 3871 | int nr = 0; |
3840 | 3872 | ||
3841 | for (page_idx = 0; page_idx < nr_pages; page_idx++) { | 3873 | for (page_idx = 0; page_idx < nr_pages; page_idx++) { |
@@ -3852,14 +3884,17 @@ int extent_readpages(struct extent_io_tree *tree, | |||
3852 | pagepool[nr++] = page; | 3884 | pagepool[nr++] = page; |
3853 | if (nr < ARRAY_SIZE(pagepool)) | 3885 | if (nr < ARRAY_SIZE(pagepool)) |
3854 | continue; | 3886 | continue; |
3855 | __extent_readpages(tree, pagepool, nr, get_extent, | 3887 | __extent_readpages(tree, pagepool, nr, get_extent, &em_cached, |
3856 | &bio, 0, &bio_flags, READ); | 3888 | &bio, 0, &bio_flags, READ); |
3857 | nr = 0; | 3889 | nr = 0; |
3858 | } | 3890 | } |
3859 | if (nr) | 3891 | if (nr) |
3860 | __extent_readpages(tree, pagepool, nr, get_extent, | 3892 | __extent_readpages(tree, pagepool, nr, get_extent, &em_cached, |
3861 | &bio, 0, &bio_flags, READ); | 3893 | &bio, 0, &bio_flags, READ); |
3862 | 3894 | ||
3895 | if (em_cached) | ||
3896 | free_extent_map(em_cached); | ||
3897 | |||
3863 | BUG_ON(!list_empty(pages)); | 3898 | BUG_ON(!list_empty(pages)); |
3864 | if (bio) | 3899 | if (bio) |
3865 | return submit_one_bio(READ, bio, 0, bio_flags); | 3900 | return submit_one_bio(READ, bio, 0, bio_flags); |