aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent_io.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-04-09 16:28:12 -0400
committerChris Mason <chris.mason@oracle.com>2008-09-25 11:04:01 -0400
commitce9adaa5a792c2099a83246265eb4055bc38b6b8 (patch)
tree3ac03bb73cd3aad0c0b194d387cbb5b136c92fa7 /fs/btrfs/extent_io.c
parent728131d8e40c2a47c59ca91a806299c4708029f9 (diff)
Btrfs: Do metadata checksums for reads via a workqueue
Before, metadata checksumming was done by the callers of read_tree_block, which would set EXTENT_CSUM bits in the extent tree to show that a given range of pages was already checksummed and didn't need to be verified again. But, those bits could go away via try_to_releasepage, and the end result was bogus checksum failures on pages that never left the cache. The new code validates checksums when the page is read. It is a little tricky because metadata blocks can span pages and a single read may end up going via multiple bios. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r--fs/btrfs/extent_io.c100
1 files changed, 78 insertions, 22 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 13cc2360e375..cfc383c17a3a 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2592,6 +2592,22 @@ static inline struct page *extent_buffer_page(struct extent_buffer *eb,
2592 return p; 2592 return p;
2593} 2593}
2594 2594
2595int release_extent_buffer_tail_pages(struct extent_buffer *eb)
2596{
2597 unsigned long num_pages = num_extent_pages(eb->start, eb->len);
2598 struct page *page;
2599 unsigned long i;
2600
2601 if (num_pages == 1)
2602 return 0;
2603 for (i = 1; i < num_pages; i++) {
2604 page = extent_buffer_page(eb, i);
2605 page_cache_release(page);
2606 }
2607 return 0;
2608}
2609
2610
2595int invalidate_extent_lru(struct extent_io_tree *tree, u64 start, 2611int invalidate_extent_lru(struct extent_io_tree *tree, u64 start,
2596 unsigned long len) 2612 unsigned long len)
2597{ 2613{
@@ -2609,9 +2625,6 @@ int invalidate_extent_lru(struct extent_io_tree *tree, u64 start,
2609 if (eb->start <= start && eb->start + eb->len > start) { 2625 if (eb->start <= start && eb->start + eb->len > start) {
2610 eb->flags &= ~EXTENT_UPTODATE; 2626 eb->flags &= ~EXTENT_UPTODATE;
2611 } 2627 }
2612 if (eb->start == start) {
2613 eb->flags &= ~EXTENT_CSUM;
2614 }
2615 cur = cur->next; 2628 cur = cur->next;
2616 } while (cur != lru); 2629 } while (cur != lru);
2617out: 2630out:
@@ -2682,7 +2695,6 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
2682 page_cache_get(page0); 2695 page_cache_get(page0);
2683 mark_page_accessed(page0); 2696 mark_page_accessed(page0);
2684 set_page_extent_mapped(page0); 2697 set_page_extent_mapped(page0);
2685 WARN_ON(!PageUptodate(page0));
2686 set_page_extent_head(page0, len); 2698 set_page_extent_head(page0, len);
2687 } else { 2699 } else {
2688 i = 0; 2700 i = 0;
@@ -2933,13 +2945,39 @@ int set_extent_buffer_uptodate(struct extent_io_tree *tree,
2933} 2945}
2934EXPORT_SYMBOL(set_extent_buffer_uptodate); 2946EXPORT_SYMBOL(set_extent_buffer_uptodate);
2935 2947
2948int extent_range_uptodate(struct extent_io_tree *tree,
2949 u64 start, u64 end)
2950{
2951 struct page *page;
2952 int ret;
2953 int pg_uptodate = 1;
2954 int uptodate;
2955 unsigned long index;
2956
2957 ret = test_range_bit(tree, start, end, EXTENT_UPTODATE, 1);
2958 if (ret)
2959 return 1;
2960 while(start <= end) {
2961 index = start >> PAGE_CACHE_SHIFT;
2962 page = find_get_page(tree->mapping, index);
2963 uptodate = PageUptodate(page);
2964 page_cache_release(page);
2965 if (!uptodate) {
2966 pg_uptodate = 0;
2967 break;
2968 }
2969 start += PAGE_CACHE_SIZE;
2970 }
2971 return pg_uptodate;
2972}
2973
2936int extent_buffer_uptodate(struct extent_io_tree *tree, 2974int extent_buffer_uptodate(struct extent_io_tree *tree,
2937 struct extent_buffer *eb) 2975 struct extent_buffer *eb)
2938{ 2976{
2939 int ret = 0; 2977 int ret = 0;
2940 int ret2; 2978 int ret2;
2941 int num_pages; 2979 unsigned long num_pages;
2942 int i; 2980 unsigned long i;
2943 struct page *page; 2981 struct page *page;
2944 int pg_uptodate = 1; 2982 int pg_uptodate = 1;
2945 2983
@@ -2975,13 +3013,16 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
2975 struct page *page; 3013 struct page *page;
2976 int err; 3014 int err;
2977 int ret = 0; 3015 int ret = 0;
3016 int locked_pages = 0;
3017 int all_uptodate = 1;
3018 int inc_all_pages = 0;
2978 unsigned long num_pages; 3019 unsigned long num_pages;
2979 struct bio *bio = NULL; 3020 struct bio *bio = NULL;
2980 3021
2981 if (eb->flags & EXTENT_UPTODATE) 3022 if (eb->flags & EXTENT_UPTODATE)
2982 return 0; 3023 return 0;
2983 3024
2984 if (0 && test_range_bit(tree, eb->start, eb->start + eb->len - 1, 3025 if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2985 EXTENT_UPTODATE, 1)) { 3026 EXTENT_UPTODATE, 1)) {
2986 return 0; 3027 return 0;
2987 } 3028 }
@@ -2997,17 +3038,30 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
2997 num_pages = num_extent_pages(eb->start, eb->len); 3038 num_pages = num_extent_pages(eb->start, eb->len);
2998 for (i = start_i; i < num_pages; i++) { 3039 for (i = start_i; i < num_pages; i++) {
2999 page = extent_buffer_page(eb, i); 3040 page = extent_buffer_page(eb, i);
3000 if (PageUptodate(page)) {
3001 continue;
3002 }
3003 if (!wait) { 3041 if (!wait) {
3004 if (TestSetPageLocked(page)) { 3042 if (TestSetPageLocked(page))
3005 continue; 3043 goto unlock_exit;
3006 }
3007 } else { 3044 } else {
3008 lock_page(page); 3045 lock_page(page);
3009 } 3046 }
3047 locked_pages++;
3048 if (!PageUptodate(page)) {
3049 all_uptodate = 0;
3050 }
3051 }
3052 if (all_uptodate) {
3053 if (start_i == 0)
3054 eb->flags |= EXTENT_UPTODATE;
3055 goto unlock_exit;
3056 }
3057
3058 for (i = start_i; i < num_pages; i++) {
3059 page = extent_buffer_page(eb, i);
3060 if (inc_all_pages)
3061 page_cache_get(page);
3010 if (!PageUptodate(page)) { 3062 if (!PageUptodate(page)) {
3063 if (start_i == 0)
3064 inc_all_pages = 1;
3011 err = __extent_read_full_page(tree, page, 3065 err = __extent_read_full_page(tree, page,
3012 get_extent, &bio); 3066 get_extent, &bio);
3013 if (err) { 3067 if (err) {
@@ -3034,6 +3088,16 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
3034 if (!ret) 3088 if (!ret)
3035 eb->flags |= EXTENT_UPTODATE; 3089 eb->flags |= EXTENT_UPTODATE;
3036 return ret; 3090 return ret;
3091
3092unlock_exit:
3093 i = start_i;
3094 while(locked_pages > 0) {
3095 page = extent_buffer_page(eb, i);
3096 i++;
3097 unlock_page(page);
3098 locked_pages--;
3099 }
3100 return ret;
3037} 3101}
3038EXPORT_SYMBOL(read_extent_buffer_pages); 3102EXPORT_SYMBOL(read_extent_buffer_pages);
3039 3103
@@ -3048,7 +3112,6 @@ void read_extent_buffer(struct extent_buffer *eb, void *dstv,
3048 char *dst = (char *)dstv; 3112 char *dst = (char *)dstv;
3049 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); 3113 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3050 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; 3114 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3051 unsigned long num_pages = num_extent_pages(eb->start, eb->len);
3052 3115
3053 WARN_ON(start > eb->len); 3116 WARN_ON(start > eb->len);
3054 WARN_ON(start + len > eb->start + eb->len); 3117 WARN_ON(start + len > eb->start + eb->len);
@@ -3057,11 +3120,6 @@ void read_extent_buffer(struct extent_buffer *eb, void *dstv,
3057 3120
3058 while(len > 0) { 3121 while(len > 0) {
3059 page = extent_buffer_page(eb, i); 3122 page = extent_buffer_page(eb, i);
3060 if (!PageUptodate(page)) {
3061 printk("page %lu not up to date i %lu, total %lu, len %lu\n", page->index, i, num_pages, eb->len);
3062 WARN_ON(1);
3063 }
3064 WARN_ON(!PageUptodate(page));
3065 3123
3066 cur = min(len, (PAGE_CACHE_SIZE - offset)); 3124 cur = min(len, (PAGE_CACHE_SIZE - offset));
3067 kaddr = kmap_atomic(page, KM_USER1); 3125 kaddr = kmap_atomic(page, KM_USER1);
@@ -3105,7 +3163,6 @@ printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len,
3105 } 3163 }
3106 3164
3107 p = extent_buffer_page(eb, i); 3165 p = extent_buffer_page(eb, i);
3108 WARN_ON(!PageUptodate(p));
3109 kaddr = kmap_atomic(p, km); 3166 kaddr = kmap_atomic(p, km);
3110 *token = kaddr; 3167 *token = kaddr;
3111 *map = kaddr + offset; 3168 *map = kaddr + offset;
@@ -3165,7 +3222,6 @@ int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
3165 3222
3166 while(len > 0) { 3223 while(len > 0) {
3167 page = extent_buffer_page(eb, i); 3224 page = extent_buffer_page(eb, i);
3168 WARN_ON(!PageUptodate(page));
3169 3225
3170 cur = min(len, (PAGE_CACHE_SIZE - offset)); 3226 cur = min(len, (PAGE_CACHE_SIZE - offset));
3171 3227