diff options
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r-- | fs/btrfs/extent_io.c | 138 |
1 files changed, 65 insertions, 73 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 1b319df29eee..f173c5af6461 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -4,7 +4,6 @@ | |||
4 | #include <linux/mm.h> | 4 | #include <linux/mm.h> |
5 | #include <linux/pagemap.h> | 5 | #include <linux/pagemap.h> |
6 | #include <linux/page-flags.h> | 6 | #include <linux/page-flags.h> |
7 | #include <linux/module.h> | ||
8 | #include <linux/spinlock.h> | 7 | #include <linux/spinlock.h> |
9 | #include <linux/blkdev.h> | 8 | #include <linux/blkdev.h> |
10 | #include <linux/swap.h> | 9 | #include <linux/swap.h> |
@@ -1834,7 +1833,7 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, | |||
1834 | */ | 1833 | */ |
1835 | static void check_page_uptodate(struct extent_io_tree *tree, struct page *page) | 1834 | static void check_page_uptodate(struct extent_io_tree *tree, struct page *page) |
1836 | { | 1835 | { |
1837 | u64 start = (u64)page->index << PAGE_CACHE_SHIFT; | 1836 | u64 start = page_offset(page); |
1838 | u64 end = start + PAGE_CACHE_SIZE - 1; | 1837 | u64 end = start + PAGE_CACHE_SIZE - 1; |
1839 | if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL)) | 1838 | if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL)) |
1840 | SetPageUptodate(page); | 1839 | SetPageUptodate(page); |
@@ -1846,7 +1845,7 @@ static void check_page_uptodate(struct extent_io_tree *tree, struct page *page) | |||
1846 | */ | 1845 | */ |
1847 | static void check_page_locked(struct extent_io_tree *tree, struct page *page) | 1846 | static void check_page_locked(struct extent_io_tree *tree, struct page *page) |
1848 | { | 1847 | { |
1849 | u64 start = (u64)page->index << PAGE_CACHE_SHIFT; | 1848 | u64 start = page_offset(page); |
1850 | u64 end = start + PAGE_CACHE_SIZE - 1; | 1849 | u64 end = start + PAGE_CACHE_SIZE - 1; |
1851 | if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL)) | 1850 | if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL)) |
1852 | unlock_page(page); | 1851 | unlock_page(page); |
@@ -1895,13 +1894,11 @@ static int free_io_failure(struct inode *inode, struct io_failure_record *rec, | |||
1895 | if (ret) | 1894 | if (ret) |
1896 | err = ret; | 1895 | err = ret; |
1897 | 1896 | ||
1898 | if (did_repair) { | 1897 | ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start, |
1899 | ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start, | 1898 | rec->start + rec->len - 1, |
1900 | rec->start + rec->len - 1, | 1899 | EXTENT_DAMAGED, GFP_NOFS); |
1901 | EXTENT_DAMAGED, GFP_NOFS); | 1900 | if (ret && !err) |
1902 | if (ret && !err) | 1901 | err = ret; |
1903 | err = ret; | ||
1904 | } | ||
1905 | 1902 | ||
1906 | kfree(rec); | 1903 | kfree(rec); |
1907 | return err; | 1904 | return err; |
@@ -1932,10 +1929,15 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start, | |||
1932 | u64 map_length = 0; | 1929 | u64 map_length = 0; |
1933 | u64 sector; | 1930 | u64 sector; |
1934 | struct btrfs_bio *bbio = NULL; | 1931 | struct btrfs_bio *bbio = NULL; |
1932 | struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree; | ||
1935 | int ret; | 1933 | int ret; |
1936 | 1934 | ||
1937 | BUG_ON(!mirror_num); | 1935 | BUG_ON(!mirror_num); |
1938 | 1936 | ||
1937 | /* we can't repair anything in raid56 yet */ | ||
1938 | if (btrfs_is_parity_mirror(map_tree, logical, length, mirror_num)) | ||
1939 | return 0; | ||
1940 | |||
1939 | bio = bio_alloc(GFP_NOFS, 1); | 1941 | bio = bio_alloc(GFP_NOFS, 1); |
1940 | if (!bio) | 1942 | if (!bio) |
1941 | return -EIO; | 1943 | return -EIO; |
@@ -1960,7 +1962,7 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start, | |||
1960 | return -EIO; | 1962 | return -EIO; |
1961 | } | 1963 | } |
1962 | bio->bi_bdev = dev->bdev; | 1964 | bio->bi_bdev = dev->bdev; |
1963 | bio_add_page(bio, page, length, start-page_offset(page)); | 1965 | bio_add_page(bio, page, length, start - page_offset(page)); |
1964 | btrfsic_submit_bio(WRITE_SYNC, bio); | 1966 | btrfsic_submit_bio(WRITE_SYNC, bio); |
1965 | wait_for_completion(&compl); | 1967 | wait_for_completion(&compl); |
1966 | 1968 | ||
@@ -2052,6 +2054,7 @@ static int clean_io_failure(u64 start, struct page *page) | |||
2052 | failrec->failed_mirror); | 2054 | failrec->failed_mirror); |
2053 | did_repair = !ret; | 2055 | did_repair = !ret; |
2054 | } | 2056 | } |
2057 | ret = 0; | ||
2055 | } | 2058 | } |
2056 | 2059 | ||
2057 | out: | 2060 | out: |
@@ -2293,8 +2296,7 @@ static void end_bio_extent_writepage(struct bio *bio, int err) | |||
2293 | struct page *page = bvec->bv_page; | 2296 | struct page *page = bvec->bv_page; |
2294 | tree = &BTRFS_I(page->mapping->host)->io_tree; | 2297 | tree = &BTRFS_I(page->mapping->host)->io_tree; |
2295 | 2298 | ||
2296 | start = ((u64)page->index << PAGE_CACHE_SHIFT) + | 2299 | start = page_offset(page) + bvec->bv_offset; |
2297 | bvec->bv_offset; | ||
2298 | end = start + bvec->bv_len - 1; | 2300 | end = start + bvec->bv_len - 1; |
2299 | 2301 | ||
2300 | if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE) | 2302 | if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE) |
@@ -2353,8 +2355,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err) | |||
2353 | (long int)bio->bi_bdev); | 2355 | (long int)bio->bi_bdev); |
2354 | tree = &BTRFS_I(page->mapping->host)->io_tree; | 2356 | tree = &BTRFS_I(page->mapping->host)->io_tree; |
2355 | 2357 | ||
2356 | start = ((u64)page->index << PAGE_CACHE_SHIFT) + | 2358 | start = page_offset(page) + bvec->bv_offset; |
2357 | bvec->bv_offset; | ||
2358 | end = start + bvec->bv_len - 1; | 2359 | end = start + bvec->bv_len - 1; |
2359 | 2360 | ||
2360 | if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE) | 2361 | if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE) |
@@ -2471,7 +2472,7 @@ static int __must_check submit_one_bio(int rw, struct bio *bio, | |||
2471 | struct extent_io_tree *tree = bio->bi_private; | 2472 | struct extent_io_tree *tree = bio->bi_private; |
2472 | u64 start; | 2473 | u64 start; |
2473 | 2474 | ||
2474 | start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset; | 2475 | start = page_offset(page) + bvec->bv_offset; |
2475 | 2476 | ||
2476 | bio->bi_private = NULL; | 2477 | bio->bi_private = NULL; |
2477 | 2478 | ||
@@ -2489,13 +2490,13 @@ static int __must_check submit_one_bio(int rw, struct bio *bio, | |||
2489 | return ret; | 2490 | return ret; |
2490 | } | 2491 | } |
2491 | 2492 | ||
2492 | static int merge_bio(struct extent_io_tree *tree, struct page *page, | 2493 | static int merge_bio(int rw, struct extent_io_tree *tree, struct page *page, |
2493 | unsigned long offset, size_t size, struct bio *bio, | 2494 | unsigned long offset, size_t size, struct bio *bio, |
2494 | unsigned long bio_flags) | 2495 | unsigned long bio_flags) |
2495 | { | 2496 | { |
2496 | int ret = 0; | 2497 | int ret = 0; |
2497 | if (tree->ops && tree->ops->merge_bio_hook) | 2498 | if (tree->ops && tree->ops->merge_bio_hook) |
2498 | ret = tree->ops->merge_bio_hook(page, offset, size, bio, | 2499 | ret = tree->ops->merge_bio_hook(rw, page, offset, size, bio, |
2499 | bio_flags); | 2500 | bio_flags); |
2500 | BUG_ON(ret < 0); | 2501 | BUG_ON(ret < 0); |
2501 | return ret; | 2502 | return ret; |
@@ -2530,7 +2531,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree, | |||
2530 | sector; | 2531 | sector; |
2531 | 2532 | ||
2532 | if (prev_bio_flags != bio_flags || !contig || | 2533 | if (prev_bio_flags != bio_flags || !contig || |
2533 | merge_bio(tree, page, offset, page_size, bio, bio_flags) || | 2534 | merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) || |
2534 | bio_add_page(bio, page, page_size, offset) < page_size) { | 2535 | bio_add_page(bio, page, page_size, offset) < page_size) { |
2535 | ret = submit_one_bio(rw, bio, mirror_num, | 2536 | ret = submit_one_bio(rw, bio, mirror_num, |
2536 | prev_bio_flags); | 2537 | prev_bio_flags); |
@@ -2595,7 +2596,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree, | |||
2595 | unsigned long *bio_flags) | 2596 | unsigned long *bio_flags) |
2596 | { | 2597 | { |
2597 | struct inode *inode = page->mapping->host; | 2598 | struct inode *inode = page->mapping->host; |
2598 | u64 start = (u64)page->index << PAGE_CACHE_SHIFT; | 2599 | u64 start = page_offset(page); |
2599 | u64 page_end = start + PAGE_CACHE_SIZE - 1; | 2600 | u64 page_end = start + PAGE_CACHE_SIZE - 1; |
2600 | u64 end; | 2601 | u64 end; |
2601 | u64 cur = start; | 2602 | u64 cur = start; |
@@ -2648,6 +2649,8 @@ static int __extent_read_full_page(struct extent_io_tree *tree, | |||
2648 | } | 2649 | } |
2649 | } | 2650 | } |
2650 | while (cur <= end) { | 2651 | while (cur <= end) { |
2652 | unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1; | ||
2653 | |||
2651 | if (cur >= last_byte) { | 2654 | if (cur >= last_byte) { |
2652 | char *userpage; | 2655 | char *userpage; |
2653 | struct extent_state *cached = NULL; | 2656 | struct extent_state *cached = NULL; |
@@ -2682,7 +2685,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree, | |||
2682 | 2685 | ||
2683 | iosize = min(extent_map_end(em) - cur, end - cur + 1); | 2686 | iosize = min(extent_map_end(em) - cur, end - cur + 1); |
2684 | cur_end = min(extent_map_end(em) - 1, end); | 2687 | cur_end = min(extent_map_end(em) - 1, end); |
2685 | iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1); | 2688 | iosize = ALIGN(iosize, blocksize); |
2686 | if (this_bio_flag & EXTENT_BIO_COMPRESSED) { | 2689 | if (this_bio_flag & EXTENT_BIO_COMPRESSED) { |
2687 | disk_io_size = em->block_len; | 2690 | disk_io_size = em->block_len; |
2688 | sector = em->block_start >> 9; | 2691 | sector = em->block_start >> 9; |
@@ -2735,26 +2738,17 @@ static int __extent_read_full_page(struct extent_io_tree *tree, | |||
2735 | continue; | 2738 | continue; |
2736 | } | 2739 | } |
2737 | 2740 | ||
2738 | ret = 0; | 2741 | pnr -= page->index; |
2739 | if (tree->ops && tree->ops->readpage_io_hook) { | 2742 | ret = submit_extent_page(READ, tree, page, |
2740 | ret = tree->ops->readpage_io_hook(page, cur, | ||
2741 | cur + iosize - 1); | ||
2742 | } | ||
2743 | if (!ret) { | ||
2744 | unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1; | ||
2745 | pnr -= page->index; | ||
2746 | ret = submit_extent_page(READ, tree, page, | ||
2747 | sector, disk_io_size, pg_offset, | 2743 | sector, disk_io_size, pg_offset, |
2748 | bdev, bio, pnr, | 2744 | bdev, bio, pnr, |
2749 | end_bio_extent_readpage, mirror_num, | 2745 | end_bio_extent_readpage, mirror_num, |
2750 | *bio_flags, | 2746 | *bio_flags, |
2751 | this_bio_flag); | 2747 | this_bio_flag); |
2752 | if (!ret) { | 2748 | if (!ret) { |
2753 | nr++; | 2749 | nr++; |
2754 | *bio_flags = this_bio_flag; | 2750 | *bio_flags = this_bio_flag; |
2755 | } | 2751 | } else { |
2756 | } | ||
2757 | if (ret) { | ||
2758 | SetPageError(page); | 2752 | SetPageError(page); |
2759 | unlock_extent(tree, cur, cur + iosize - 1); | 2753 | unlock_extent(tree, cur, cur + iosize - 1); |
2760 | } | 2754 | } |
@@ -2806,7 +2800,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, | |||
2806 | struct inode *inode = page->mapping->host; | 2800 | struct inode *inode = page->mapping->host; |
2807 | struct extent_page_data *epd = data; | 2801 | struct extent_page_data *epd = data; |
2808 | struct extent_io_tree *tree = epd->tree; | 2802 | struct extent_io_tree *tree = epd->tree; |
2809 | u64 start = (u64)page->index << PAGE_CACHE_SHIFT; | 2803 | u64 start = page_offset(page); |
2810 | u64 delalloc_start; | 2804 | u64 delalloc_start; |
2811 | u64 page_end = start + PAGE_CACHE_SIZE - 1; | 2805 | u64 page_end = start + PAGE_CACHE_SIZE - 1; |
2812 | u64 end; | 2806 | u64 end; |
@@ -2982,7 +2976,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, | |||
2982 | BUG_ON(extent_map_end(em) <= cur); | 2976 | BUG_ON(extent_map_end(em) <= cur); |
2983 | BUG_ON(end < cur); | 2977 | BUG_ON(end < cur); |
2984 | iosize = min(extent_map_end(em) - cur, end - cur + 1); | 2978 | iosize = min(extent_map_end(em) - cur, end - cur + 1); |
2985 | iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1); | 2979 | iosize = ALIGN(iosize, blocksize); |
2986 | sector = (em->block_start + extent_offset) >> 9; | 2980 | sector = (em->block_start + extent_offset) >> 9; |
2987 | bdev = em->bdev; | 2981 | bdev = em->bdev; |
2988 | block_start = em->block_start; | 2982 | block_start = em->block_start; |
@@ -3124,12 +3118,9 @@ static int lock_extent_buffer_for_io(struct extent_buffer *eb, | |||
3124 | set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags); | 3118 | set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags); |
3125 | spin_unlock(&eb->refs_lock); | 3119 | spin_unlock(&eb->refs_lock); |
3126 | btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN); | 3120 | btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN); |
3127 | spin_lock(&fs_info->delalloc_lock); | 3121 | __percpu_counter_add(&fs_info->dirty_metadata_bytes, |
3128 | if (fs_info->dirty_metadata_bytes >= eb->len) | 3122 | -eb->len, |
3129 | fs_info->dirty_metadata_bytes -= eb->len; | 3123 | fs_info->dirty_metadata_batch); |
3130 | else | ||
3131 | WARN_ON(1); | ||
3132 | spin_unlock(&fs_info->delalloc_lock); | ||
3133 | ret = 1; | 3124 | ret = 1; |
3134 | } else { | 3125 | } else { |
3135 | spin_unlock(&eb->refs_lock); | 3126 | spin_unlock(&eb->refs_lock); |
@@ -3446,15 +3437,9 @@ retry: | |||
3446 | * swizzled back from swapper_space to tmpfs file | 3437 | * swizzled back from swapper_space to tmpfs file |
3447 | * mapping | 3438 | * mapping |
3448 | */ | 3439 | */ |
3449 | if (tree->ops && | 3440 | if (!trylock_page(page)) { |
3450 | tree->ops->write_cache_pages_lock_hook) { | 3441 | flush_fn(data); |
3451 | tree->ops->write_cache_pages_lock_hook(page, | 3442 | lock_page(page); |
3452 | data, flush_fn); | ||
3453 | } else { | ||
3454 | if (!trylock_page(page)) { | ||
3455 | flush_fn(data); | ||
3456 | lock_page(page); | ||
3457 | } | ||
3458 | } | 3443 | } |
3459 | 3444 | ||
3460 | if (unlikely(page->mapping != mapping)) { | 3445 | if (unlikely(page->mapping != mapping)) { |
@@ -3674,11 +3659,11 @@ int extent_invalidatepage(struct extent_io_tree *tree, | |||
3674 | struct page *page, unsigned long offset) | 3659 | struct page *page, unsigned long offset) |
3675 | { | 3660 | { |
3676 | struct extent_state *cached_state = NULL; | 3661 | struct extent_state *cached_state = NULL; |
3677 | u64 start = ((u64)page->index << PAGE_CACHE_SHIFT); | 3662 | u64 start = page_offset(page); |
3678 | u64 end = start + PAGE_CACHE_SIZE - 1; | 3663 | u64 end = start + PAGE_CACHE_SIZE - 1; |
3679 | size_t blocksize = page->mapping->host->i_sb->s_blocksize; | 3664 | size_t blocksize = page->mapping->host->i_sb->s_blocksize; |
3680 | 3665 | ||
3681 | start += (offset + blocksize - 1) & ~(blocksize - 1); | 3666 | start += ALIGN(offset, blocksize); |
3682 | if (start > end) | 3667 | if (start > end) |
3683 | return 0; | 3668 | return 0; |
3684 | 3669 | ||
@@ -3700,7 +3685,7 @@ int try_release_extent_state(struct extent_map_tree *map, | |||
3700 | struct extent_io_tree *tree, struct page *page, | 3685 | struct extent_io_tree *tree, struct page *page, |
3701 | gfp_t mask) | 3686 | gfp_t mask) |
3702 | { | 3687 | { |
3703 | u64 start = (u64)page->index << PAGE_CACHE_SHIFT; | 3688 | u64 start = page_offset(page); |
3704 | u64 end = start + PAGE_CACHE_SIZE - 1; | 3689 | u64 end = start + PAGE_CACHE_SIZE - 1; |
3705 | int ret = 1; | 3690 | int ret = 1; |
3706 | 3691 | ||
@@ -3739,7 +3724,7 @@ int try_release_extent_mapping(struct extent_map_tree *map, | |||
3739 | gfp_t mask) | 3724 | gfp_t mask) |
3740 | { | 3725 | { |
3741 | struct extent_map *em; | 3726 | struct extent_map *em; |
3742 | u64 start = (u64)page->index << PAGE_CACHE_SHIFT; | 3727 | u64 start = page_offset(page); |
3743 | u64 end = start + PAGE_CACHE_SIZE - 1; | 3728 | u64 end = start + PAGE_CACHE_SIZE - 1; |
3744 | 3729 | ||
3745 | if ((mask & __GFP_WAIT) && | 3730 | if ((mask & __GFP_WAIT) && |
@@ -3797,7 +3782,7 @@ static struct extent_map *get_extent_skip_holes(struct inode *inode, | |||
3797 | len = last - offset; | 3782 | len = last - offset; |
3798 | if (len == 0) | 3783 | if (len == 0) |
3799 | break; | 3784 | break; |
3800 | len = (len + sectorsize - 1) & ~(sectorsize - 1); | 3785 | len = ALIGN(len, sectorsize); |
3801 | em = get_extent(inode, NULL, 0, offset, len, 0); | 3786 | em = get_extent(inode, NULL, 0, offset, len, 0); |
3802 | if (IS_ERR_OR_NULL(em)) | 3787 | if (IS_ERR_OR_NULL(em)) |
3803 | return em; | 3788 | return em; |
@@ -3995,8 +3980,6 @@ static void __free_extent_buffer(struct extent_buffer *eb) | |||
3995 | list_del(&eb->leak_list); | 3980 | list_del(&eb->leak_list); |
3996 | spin_unlock_irqrestore(&leak_lock, flags); | 3981 | spin_unlock_irqrestore(&leak_lock, flags); |
3997 | #endif | 3982 | #endif |
3998 | if (eb->pages && eb->pages != eb->inline_pages) | ||
3999 | kfree(eb->pages); | ||
4000 | kmem_cache_free(extent_buffer_cache, eb); | 3983 | kmem_cache_free(extent_buffer_cache, eb); |
4001 | } | 3984 | } |
4002 | 3985 | ||
@@ -4037,19 +4020,12 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree, | |||
4037 | atomic_set(&eb->refs, 1); | 4020 | atomic_set(&eb->refs, 1); |
4038 | atomic_set(&eb->io_pages, 0); | 4021 | atomic_set(&eb->io_pages, 0); |
4039 | 4022 | ||
4040 | if (len > MAX_INLINE_EXTENT_BUFFER_SIZE) { | 4023 | /* |
4041 | struct page **pages; | 4024 | * Sanity checks, currently the maximum is 64k covered by 16x 4k pages |
4042 | int num_pages = (len + PAGE_CACHE_SIZE - 1) >> | 4025 | */ |
4043 | PAGE_CACHE_SHIFT; | 4026 | BUILD_BUG_ON(BTRFS_MAX_METADATA_BLOCKSIZE |
4044 | pages = kzalloc(num_pages, mask); | 4027 | > MAX_INLINE_EXTENT_BUFFER_SIZE); |
4045 | if (!pages) { | 4028 | BUG_ON(len > MAX_INLINE_EXTENT_BUFFER_SIZE); |
4046 | __free_extent_buffer(eb); | ||
4047 | return NULL; | ||
4048 | } | ||
4049 | eb->pages = pages; | ||
4050 | } else { | ||
4051 | eb->pages = eb->inline_pages; | ||
4052 | } | ||
4053 | 4029 | ||
4054 | return eb; | 4030 | return eb; |
4055 | } | 4031 | } |
@@ -4180,6 +4156,7 @@ static inline void btrfs_release_extent_buffer(struct extent_buffer *eb) | |||
4180 | 4156 | ||
4181 | static void check_buffer_tree_ref(struct extent_buffer *eb) | 4157 | static void check_buffer_tree_ref(struct extent_buffer *eb) |
4182 | { | 4158 | { |
4159 | int refs; | ||
4183 | /* the ref bit is tricky. We have to make sure it is set | 4160 | /* the ref bit is tricky. We have to make sure it is set |
4184 | * if we have the buffer dirty. Otherwise the | 4161 | * if we have the buffer dirty. Otherwise the |
4185 | * code to free a buffer can end up dropping a dirty | 4162 | * code to free a buffer can end up dropping a dirty |
@@ -4200,6 +4177,10 @@ static void check_buffer_tree_ref(struct extent_buffer *eb) | |||
4200 | * So bump the ref count first, then set the bit. If someone | 4177 | * So bump the ref count first, then set the bit. If someone |
4201 | * beat us to it, drop the ref we added. | 4178 | * beat us to it, drop the ref we added. |
4202 | */ | 4179 | */ |
4180 | refs = atomic_read(&eb->refs); | ||
4181 | if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) | ||
4182 | return; | ||
4183 | |||
4203 | spin_lock(&eb->refs_lock); | 4184 | spin_lock(&eb->refs_lock); |
4204 | if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) | 4185 | if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) |
4205 | atomic_inc(&eb->refs); | 4186 | atomic_inc(&eb->refs); |
@@ -4401,9 +4382,20 @@ static int release_extent_buffer(struct extent_buffer *eb, gfp_t mask) | |||
4401 | 4382 | ||
4402 | void free_extent_buffer(struct extent_buffer *eb) | 4383 | void free_extent_buffer(struct extent_buffer *eb) |
4403 | { | 4384 | { |
4385 | int refs; | ||
4386 | int old; | ||
4404 | if (!eb) | 4387 | if (!eb) |
4405 | return; | 4388 | return; |
4406 | 4389 | ||
4390 | while (1) { | ||
4391 | refs = atomic_read(&eb->refs); | ||
4392 | if (refs <= 3) | ||
4393 | break; | ||
4394 | old = atomic_cmpxchg(&eb->refs, refs, refs - 1); | ||
4395 | if (old == refs) | ||
4396 | return; | ||
4397 | } | ||
4398 | |||
4407 | spin_lock(&eb->refs_lock); | 4399 | spin_lock(&eb->refs_lock); |
4408 | if (atomic_read(&eb->refs) == 2 && | 4400 | if (atomic_read(&eb->refs) == 2 && |
4409 | test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags)) | 4401 | test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags)) |