diff options
author | Arne Jansen <sensille@gmx.net> | 2011-04-06 06:02:20 -0400 |
---|---|---|
committer | Chris Mason <chris.mason@oracle.com> | 2011-04-11 20:45:36 -0400 |
commit | 507903b81840a70cc6a179d4eb03584ad50e8c5b (patch) | |
tree | ec3ccd0d2dd8feb15f4b01627112c8e2cf74d989 /fs | |
parent | e15d0542426f063dc53b4c51bdfc11e0bbe4d298 (diff) |
btrfs: using cached extent_state in set/unlock combinations
In several places the sequence (set_extent_uptodate, unlock_extent) is used.
This leads to a duplicate lookup of the extent state. This patch lets
set_extent_uptodate return a cached extent_state which can be passed to
unlock_extent_cached.
The occurences of the above sequences are updated to use the cache. Only
end_bio_extent_readpage is updated that it first gets a cached state to
pass it to the readpage_end_io_hook as the prototype requested and is later
on being used for set/unlock.
Signed-off-by: Arne Jansen <sensille@gmx.net>
Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/btrfs/extent_io.c | 70 | ||||
-rw-r--r-- | fs/btrfs/extent_io.h | 2 | ||||
-rw-r--r-- | fs/btrfs/inode.c | 2 |
3 files changed, 55 insertions, 19 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 864e0496cc1c..8dcfb77678de 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -690,6 +690,17 @@ static void cache_state(struct extent_state *state, | |||
690 | } | 690 | } |
691 | } | 691 | } |
692 | 692 | ||
693 | static void uncache_state(struct extent_state **cached_ptr) | ||
694 | { | ||
695 | if (cached_ptr && (*cached_ptr)) { | ||
696 | struct extent_state *state = *cached_ptr; | ||
697 | if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) { | ||
698 | *cached_ptr = NULL; | ||
699 | free_extent_state(state); | ||
700 | } | ||
701 | } | ||
702 | } | ||
703 | |||
693 | /* | 704 | /* |
694 | * set some bits on a range in the tree. This may require allocations or | 705 | * set some bits on a range in the tree. This may require allocations or |
695 | * sleeping, so the gfp mask is used to indicate what is allowed. | 706 | * sleeping, so the gfp mask is used to indicate what is allowed. |
@@ -940,10 +951,10 @@ static int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end, | |||
940 | } | 951 | } |
941 | 952 | ||
942 | int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, | 953 | int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, |
943 | gfp_t mask) | 954 | struct extent_state **cached_state, gfp_t mask) |
944 | { | 955 | { |
945 | return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL, | 956 | return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, |
946 | NULL, mask); | 957 | NULL, cached_state, mask); |
947 | } | 958 | } |
948 | 959 | ||
949 | static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, | 960 | static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, |
@@ -1012,8 +1023,7 @@ int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end, | |||
1012 | mask); | 1023 | mask); |
1013 | } | 1024 | } |
1014 | 1025 | ||
1015 | int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, | 1026 | int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) |
1016 | gfp_t mask) | ||
1017 | { | 1027 | { |
1018 | return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL, | 1028 | return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL, |
1019 | mask); | 1029 | mask); |
@@ -1735,6 +1745,9 @@ static void end_bio_extent_readpage(struct bio *bio, int err) | |||
1735 | 1745 | ||
1736 | do { | 1746 | do { |
1737 | struct page *page = bvec->bv_page; | 1747 | struct page *page = bvec->bv_page; |
1748 | struct extent_state *cached = NULL; | ||
1749 | struct extent_state *state; | ||
1750 | |||
1738 | tree = &BTRFS_I(page->mapping->host)->io_tree; | 1751 | tree = &BTRFS_I(page->mapping->host)->io_tree; |
1739 | 1752 | ||
1740 | start = ((u64)page->index << PAGE_CACHE_SHIFT) + | 1753 | start = ((u64)page->index << PAGE_CACHE_SHIFT) + |
@@ -1749,9 +1762,20 @@ static void end_bio_extent_readpage(struct bio *bio, int err) | |||
1749 | if (++bvec <= bvec_end) | 1762 | if (++bvec <= bvec_end) |
1750 | prefetchw(&bvec->bv_page->flags); | 1763 | prefetchw(&bvec->bv_page->flags); |
1751 | 1764 | ||
1765 | spin_lock(&tree->lock); | ||
1766 | state = find_first_extent_bit_state(tree, start, 0); | ||
1767 | if (state) { | ||
1768 | /* | ||
1769 | * take a reference on the state, unlock will drop | ||
1770 | * the ref | ||
1771 | */ | ||
1772 | cache_state(state, &cached); | ||
1773 | } | ||
1774 | spin_unlock(&tree->lock); | ||
1775 | |||
1752 | if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) { | 1776 | if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) { |
1753 | ret = tree->ops->readpage_end_io_hook(page, start, end, | 1777 | ret = tree->ops->readpage_end_io_hook(page, start, end, |
1754 | NULL); | 1778 | state); |
1755 | if (ret) | 1779 | if (ret) |
1756 | uptodate = 0; | 1780 | uptodate = 0; |
1757 | } | 1781 | } |
@@ -1764,15 +1788,16 @@ static void end_bio_extent_readpage(struct bio *bio, int err) | |||
1764 | test_bit(BIO_UPTODATE, &bio->bi_flags); | 1788 | test_bit(BIO_UPTODATE, &bio->bi_flags); |
1765 | if (err) | 1789 | if (err) |
1766 | uptodate = 0; | 1790 | uptodate = 0; |
1791 | uncache_state(&cached); | ||
1767 | continue; | 1792 | continue; |
1768 | } | 1793 | } |
1769 | } | 1794 | } |
1770 | 1795 | ||
1771 | if (uptodate) { | 1796 | if (uptodate) { |
1772 | set_extent_uptodate(tree, start, end, | 1797 | set_extent_uptodate(tree, start, end, &cached, |
1773 | GFP_ATOMIC); | 1798 | GFP_ATOMIC); |
1774 | } | 1799 | } |
1775 | unlock_extent(tree, start, end, GFP_ATOMIC); | 1800 | unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC); |
1776 | 1801 | ||
1777 | if (whole_page) { | 1802 | if (whole_page) { |
1778 | if (uptodate) { | 1803 | if (uptodate) { |
@@ -1811,6 +1836,7 @@ static void end_bio_extent_preparewrite(struct bio *bio, int err) | |||
1811 | 1836 | ||
1812 | do { | 1837 | do { |
1813 | struct page *page = bvec->bv_page; | 1838 | struct page *page = bvec->bv_page; |
1839 | struct extent_state *cached = NULL; | ||
1814 | tree = &BTRFS_I(page->mapping->host)->io_tree; | 1840 | tree = &BTRFS_I(page->mapping->host)->io_tree; |
1815 | 1841 | ||
1816 | start = ((u64)page->index << PAGE_CACHE_SHIFT) + | 1842 | start = ((u64)page->index << PAGE_CACHE_SHIFT) + |
@@ -1821,13 +1847,14 @@ static void end_bio_extent_preparewrite(struct bio *bio, int err) | |||
1821 | prefetchw(&bvec->bv_page->flags); | 1847 | prefetchw(&bvec->bv_page->flags); |
1822 | 1848 | ||
1823 | if (uptodate) { | 1849 | if (uptodate) { |
1824 | set_extent_uptodate(tree, start, end, GFP_ATOMIC); | 1850 | set_extent_uptodate(tree, start, end, &cached, |
1851 | GFP_ATOMIC); | ||
1825 | } else { | 1852 | } else { |
1826 | ClearPageUptodate(page); | 1853 | ClearPageUptodate(page); |
1827 | SetPageError(page); | 1854 | SetPageError(page); |
1828 | } | 1855 | } |
1829 | 1856 | ||
1830 | unlock_extent(tree, start, end, GFP_ATOMIC); | 1857 | unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC); |
1831 | 1858 | ||
1832 | } while (bvec >= bio->bi_io_vec); | 1859 | } while (bvec >= bio->bi_io_vec); |
1833 | 1860 | ||
@@ -2016,14 +2043,17 @@ static int __extent_read_full_page(struct extent_io_tree *tree, | |||
2016 | while (cur <= end) { | 2043 | while (cur <= end) { |
2017 | if (cur >= last_byte) { | 2044 | if (cur >= last_byte) { |
2018 | char *userpage; | 2045 | char *userpage; |
2046 | struct extent_state *cached = NULL; | ||
2047 | |||
2019 | iosize = PAGE_CACHE_SIZE - page_offset; | 2048 | iosize = PAGE_CACHE_SIZE - page_offset; |
2020 | userpage = kmap_atomic(page, KM_USER0); | 2049 | userpage = kmap_atomic(page, KM_USER0); |
2021 | memset(userpage + page_offset, 0, iosize); | 2050 | memset(userpage + page_offset, 0, iosize); |
2022 | flush_dcache_page(page); | 2051 | flush_dcache_page(page); |
2023 | kunmap_atomic(userpage, KM_USER0); | 2052 | kunmap_atomic(userpage, KM_USER0); |
2024 | set_extent_uptodate(tree, cur, cur + iosize - 1, | 2053 | set_extent_uptodate(tree, cur, cur + iosize - 1, |
2025 | GFP_NOFS); | 2054 | &cached, GFP_NOFS); |
2026 | unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); | 2055 | unlock_extent_cached(tree, cur, cur + iosize - 1, |
2056 | &cached, GFP_NOFS); | ||
2027 | break; | 2057 | break; |
2028 | } | 2058 | } |
2029 | em = get_extent(inode, page, page_offset, cur, | 2059 | em = get_extent(inode, page, page_offset, cur, |
@@ -2063,14 +2093,17 @@ static int __extent_read_full_page(struct extent_io_tree *tree, | |||
2063 | /* we've found a hole, just zero and go on */ | 2093 | /* we've found a hole, just zero and go on */ |
2064 | if (block_start == EXTENT_MAP_HOLE) { | 2094 | if (block_start == EXTENT_MAP_HOLE) { |
2065 | char *userpage; | 2095 | char *userpage; |
2096 | struct extent_state *cached = NULL; | ||
2097 | |||
2066 | userpage = kmap_atomic(page, KM_USER0); | 2098 | userpage = kmap_atomic(page, KM_USER0); |
2067 | memset(userpage + page_offset, 0, iosize); | 2099 | memset(userpage + page_offset, 0, iosize); |
2068 | flush_dcache_page(page); | 2100 | flush_dcache_page(page); |
2069 | kunmap_atomic(userpage, KM_USER0); | 2101 | kunmap_atomic(userpage, KM_USER0); |
2070 | 2102 | ||
2071 | set_extent_uptodate(tree, cur, cur + iosize - 1, | 2103 | set_extent_uptodate(tree, cur, cur + iosize - 1, |
2072 | GFP_NOFS); | 2104 | &cached, GFP_NOFS); |
2073 | unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); | 2105 | unlock_extent_cached(tree, cur, cur + iosize - 1, |
2106 | &cached, GFP_NOFS); | ||
2074 | cur = cur + iosize; | 2107 | cur = cur + iosize; |
2075 | page_offset += iosize; | 2108 | page_offset += iosize; |
2076 | continue; | 2109 | continue; |
@@ -2789,9 +2822,12 @@ int extent_prepare_write(struct extent_io_tree *tree, | |||
2789 | iocount++; | 2822 | iocount++; |
2790 | block_start = block_start + iosize; | 2823 | block_start = block_start + iosize; |
2791 | } else { | 2824 | } else { |
2792 | set_extent_uptodate(tree, block_start, cur_end, | 2825 | struct extent_state *cached = NULL; |
2826 | |||
2827 | set_extent_uptodate(tree, block_start, cur_end, &cached, | ||
2793 | GFP_NOFS); | 2828 | GFP_NOFS); |
2794 | unlock_extent(tree, block_start, cur_end, GFP_NOFS); | 2829 | unlock_extent_cached(tree, block_start, cur_end, |
2830 | &cached, GFP_NOFS); | ||
2795 | block_start = cur_end + 1; | 2831 | block_start = cur_end + 1; |
2796 | } | 2832 | } |
2797 | page_offset = block_start & (PAGE_CACHE_SIZE - 1); | 2833 | page_offset = block_start & (PAGE_CACHE_SIZE - 1); |
@@ -3457,7 +3493,7 @@ int set_extent_buffer_uptodate(struct extent_io_tree *tree, | |||
3457 | num_pages = num_extent_pages(eb->start, eb->len); | 3493 | num_pages = num_extent_pages(eb->start, eb->len); |
3458 | 3494 | ||
3459 | set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1, | 3495 | set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1, |
3460 | GFP_NOFS); | 3496 | NULL, GFP_NOFS); |
3461 | for (i = 0; i < num_pages; i++) { | 3497 | for (i = 0; i < num_pages; i++) { |
3462 | page = extent_buffer_page(eb, i); | 3498 | page = extent_buffer_page(eb, i); |
3463 | if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) || | 3499 | if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) || |
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index f62c5442835d..af2d7179c372 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h | |||
@@ -208,7 +208,7 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, | |||
208 | int bits, int exclusive_bits, u64 *failed_start, | 208 | int bits, int exclusive_bits, u64 *failed_start, |
209 | struct extent_state **cached_state, gfp_t mask); | 209 | struct extent_state **cached_state, gfp_t mask); |
210 | int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, | 210 | int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, |
211 | gfp_t mask); | 211 | struct extent_state **cached_state, gfp_t mask); |
212 | int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end, | 212 | int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end, |
213 | gfp_t mask); | 213 | gfp_t mask); |
214 | int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, | 214 | int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index edafc28883af..5a993e0ec865 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -5226,7 +5226,7 @@ again: | |||
5226 | btrfs_mark_buffer_dirty(leaf); | 5226 | btrfs_mark_buffer_dirty(leaf); |
5227 | } | 5227 | } |
5228 | set_extent_uptodate(io_tree, em->start, | 5228 | set_extent_uptodate(io_tree, em->start, |
5229 | extent_map_end(em) - 1, GFP_NOFS); | 5229 | extent_map_end(em) - 1, NULL, GFP_NOFS); |
5230 | goto insert; | 5230 | goto insert; |
5231 | } else { | 5231 | } else { |
5232 | printk(KERN_ERR "btrfs unknown found_type %d\n", found_type); | 5232 | printk(KERN_ERR "btrfs unknown found_type %d\n", found_type); |