aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent_map.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2007-12-18 16:15:09 -0500
committerChris Mason <chris.mason@oracle.com>2008-09-25 11:03:58 -0400
commit6da6abae027e2dbc59bca5f4168b0760f25068c7 (patch)
treea1e9c70c930e8b01baead984b706042067148788 /fs/btrfs/extent_map.c
parentc59f8951d48c5eb000926935f3ab063d8181d1ee (diff)
Btrfs: Back port to 2.6.18-el kernels
Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/extent_map.c')
-rw-r--r--fs/btrfs/extent_map.c168
1 files changed, 160 insertions, 8 deletions
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 8326a18413ff..0d1e59a86e49 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -1585,8 +1585,18 @@ extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1585 1585
1586static int submit_one_bio(int rw, struct bio *bio) 1586static int submit_one_bio(int rw, struct bio *bio)
1587{ 1587{
1588 u64 maxsector;
1588 int ret = 0; 1589 int ret = 0;
1590
1589 bio_get(bio); 1591 bio_get(bio);
1592
1593 maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
1594 if (maxsector < bio->bi_sector) {
1595 printk("sector too large max %Lu got %llu\n", maxsector,
1596 (unsigned long long)bio->bi_sector);
1597 WARN_ON(1);
1598 }
1599
1590 submit_bio(rw, bio); 1600 submit_bio(rw, bio);
1591 if (bio_flagged(bio, BIO_EOPNOTSUPP)) 1601 if (bio_flagged(bio, BIO_EOPNOTSUPP))
1592 ret = -EOPNOTSUPP; 1602 ret = -EOPNOTSUPP;
@@ -1678,8 +1688,12 @@ static int __extent_read_full_page(struct extent_map_tree *tree,
1678 1688
1679 while (cur <= end) { 1689 while (cur <= end) {
1680 if (cur >= last_byte) { 1690 if (cur >= last_byte) {
1691 char *userpage;
1681 iosize = PAGE_CACHE_SIZE - page_offset; 1692 iosize = PAGE_CACHE_SIZE - page_offset;
1682 zero_user_page(page, page_offset, iosize, KM_USER0); 1693 userpage = kmap_atomic(page, KM_USER0);
1694 memset(userpage + page_offset, 0, iosize);
1695 flush_dcache_page(page);
1696 kunmap_atomic(userpage, KM_USER0);
1683 set_extent_uptodate(tree, cur, cur + iosize - 1, 1697 set_extent_uptodate(tree, cur, cur + iosize - 1,
1684 GFP_NOFS); 1698 GFP_NOFS);
1685 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); 1699 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
@@ -1707,7 +1721,12 @@ static int __extent_read_full_page(struct extent_map_tree *tree,
1707 1721
1708 /* we've found a hole, just zero and go on */ 1722 /* we've found a hole, just zero and go on */
1709 if (block_start == EXTENT_MAP_HOLE) { 1723 if (block_start == EXTENT_MAP_HOLE) {
1710 zero_user_page(page, page_offset, iosize, KM_USER0); 1724 char *userpage;
1725 userpage = kmap_atomic(page, KM_USER0);
1726 memset(userpage + page_offset, 0, iosize);
1727 flush_dcache_page(page);
1728 kunmap_atomic(userpage, KM_USER0);
1729
1711 set_extent_uptodate(tree, cur, cur + iosize - 1, 1730 set_extent_uptodate(tree, cur, cur + iosize - 1,
1712 GFP_NOFS); 1731 GFP_NOFS);
1713 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); 1732 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
@@ -1804,9 +1823,14 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
1804 } 1823 }
1805 1824
1806 if (page->index == end_index) { 1825 if (page->index == end_index) {
1826 char *userpage;
1827
1807 size_t offset = i_size & (PAGE_CACHE_SIZE - 1); 1828 size_t offset = i_size & (PAGE_CACHE_SIZE - 1);
1808 zero_user_page(page, offset, 1829
1809 PAGE_CACHE_SIZE - offset, KM_USER0); 1830 userpage = kmap_atomic(page, KM_USER0);
1831 memset(userpage + offset, 0, PAGE_CACHE_SIZE - offset);
1832 flush_dcache_page(page);
1833 kunmap_atomic(userpage, KM_USER0);
1810 } 1834 }
1811 1835
1812 set_page_extent_mapped(page); 1836 set_page_extent_mapped(page);
@@ -1921,6 +1945,129 @@ done:
1921 return 0; 1945 return 0;
1922} 1946}
1923 1947
1948#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
1949
1950/* Taken directly from 2.6.23 for 2.6.18 back port */
1951typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
1952 void *data);
1953
1954/**
1955 * write_cache_pages - walk the list of dirty pages of the given address space
1956 * and write all of them.
1957 * @mapping: address space structure to write
1958 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
1959 * @writepage: function called for each page
1960 * @data: data passed to writepage function
1961 *
1962 * If a page is already under I/O, write_cache_pages() skips it, even
1963 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
1964 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
1965 * and msync() need to guarantee that all the data which was dirty at the time
1966 * the call was made get new I/O started against them. If wbc->sync_mode is
1967 * WB_SYNC_ALL then we were called for data integrity and we must wait for
1968 * existing IO to complete.
1969 */
1970static int write_cache_pages(struct address_space *mapping,
1971 struct writeback_control *wbc, writepage_t writepage,
1972 void *data)
1973{
1974 struct backing_dev_info *bdi = mapping->backing_dev_info;
1975 int ret = 0;
1976 int done = 0;
1977 struct pagevec pvec;
1978 int nr_pages;
1979 pgoff_t index;
1980 pgoff_t end; /* Inclusive */
1981 int scanned = 0;
1982 int range_whole = 0;
1983
1984 if (wbc->nonblocking && bdi_write_congested(bdi)) {
1985 wbc->encountered_congestion = 1;
1986 return 0;
1987 }
1988
1989 pagevec_init(&pvec, 0);
1990 if (wbc->range_cyclic) {
1991 index = mapping->writeback_index; /* Start from prev offset */
1992 end = -1;
1993 } else {
1994 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1995 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1996 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1997 range_whole = 1;
1998 scanned = 1;
1999 }
2000retry:
2001 while (!done && (index <= end) &&
2002 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
2003 PAGECACHE_TAG_DIRTY,
2004 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
2005 unsigned i;
2006
2007 scanned = 1;
2008 for (i = 0; i < nr_pages; i++) {
2009 struct page *page = pvec.pages[i];
2010
2011 /*
2012 * At this point we hold neither mapping->tree_lock nor
2013 * lock on the page itself: the page may be truncated or
2014 * invalidated (changing page->mapping to NULL), or even
2015 * swizzled back from swapper_space to tmpfs file
2016 * mapping
2017 */
2018 lock_page(page);
2019
2020 if (unlikely(page->mapping != mapping)) {
2021 unlock_page(page);
2022 continue;
2023 }
2024
2025 if (!wbc->range_cyclic && page->index > end) {
2026 done = 1;
2027 unlock_page(page);
2028 continue;
2029 }
2030
2031 if (wbc->sync_mode != WB_SYNC_NONE)
2032 wait_on_page_writeback(page);
2033
2034 if (PageWriteback(page) ||
2035 !clear_page_dirty_for_io(page)) {
2036 unlock_page(page);
2037 continue;
2038 }
2039
2040 ret = (*writepage)(page, wbc, data);
2041
2042 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
2043 unlock_page(page);
2044 ret = 0;
2045 }
2046 if (ret || (--(wbc->nr_to_write) <= 0))
2047 done = 1;
2048 if (wbc->nonblocking && bdi_write_congested(bdi)) {
2049 wbc->encountered_congestion = 1;
2050 done = 1;
2051 }
2052 }
2053 pagevec_release(&pvec);
2054 cond_resched();
2055 }
2056 if (!scanned && !done) {
2057 /*
2058 * We hit the last page and there is more work to be done: wrap
2059 * back to the start of the file
2060 */
2061 scanned = 1;
2062 index = 0;
2063 goto retry;
2064 }
2065 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2066 mapping->writeback_index = index;
2067 return ret;
2068}
2069#endif
2070
1924int extent_write_full_page(struct extent_map_tree *tree, struct page *page, 2071int extent_write_full_page(struct extent_map_tree *tree, struct page *page,
1925 get_extent_t *get_extent, 2072 get_extent_t *get_extent,
1926 struct writeback_control *wbc) 2073 struct writeback_control *wbc)
@@ -1945,18 +2092,20 @@ int extent_write_full_page(struct extent_map_tree *tree, struct page *page,
1945 ret = __extent_writepage(page, wbc, &epd); 2092 ret = __extent_writepage(page, wbc, &epd);
1946 2093
1947 write_cache_pages(mapping, &wbc_writepages, __extent_writepage, &epd); 2094 write_cache_pages(mapping, &wbc_writepages, __extent_writepage, &epd);
1948 if (epd.bio) 2095 if (epd.bio) {
1949 submit_one_bio(WRITE, epd.bio); 2096 submit_one_bio(WRITE, epd.bio);
2097 }
1950 return ret; 2098 return ret;
1951} 2099}
1952EXPORT_SYMBOL(extent_write_full_page); 2100EXPORT_SYMBOL(extent_write_full_page);
1953 2101
2102
1954int extent_writepages(struct extent_map_tree *tree, 2103int extent_writepages(struct extent_map_tree *tree,
1955 struct address_space *mapping, 2104 struct address_space *mapping,
1956 get_extent_t *get_extent, 2105 get_extent_t *get_extent,
1957 struct writeback_control *wbc) 2106 struct writeback_control *wbc)
1958{ 2107{
1959 int ret; 2108 int ret = 0;
1960 struct extent_page_data epd = { 2109 struct extent_page_data epd = {
1961 .bio = NULL, 2110 .bio = NULL,
1962 .tree = tree, 2111 .tree = tree,
@@ -1964,8 +2113,9 @@ int extent_writepages(struct extent_map_tree *tree,
1964 }; 2113 };
1965 2114
1966 ret = write_cache_pages(mapping, wbc, __extent_writepage, &epd); 2115 ret = write_cache_pages(mapping, wbc, __extent_writepage, &epd);
1967 if (epd.bio) 2116 if (epd.bio) {
1968 submit_one_bio(WRITE, epd.bio); 2117 submit_one_bio(WRITE, epd.bio);
2118 }
1969 return ret; 2119 return ret;
1970} 2120}
1971EXPORT_SYMBOL(extent_writepages); 2121EXPORT_SYMBOL(extent_writepages);
@@ -2106,7 +2256,9 @@ int extent_prepare_write(struct extent_map_tree *tree,
2106 flush_dcache_page(page); 2256 flush_dcache_page(page);
2107 kunmap_atomic(kaddr, KM_USER0); 2257 kunmap_atomic(kaddr, KM_USER0);
2108 } 2258 }
2109 if (!isnew && !PageUptodate(page) && 2259 if ((em->block_start != EXTENT_MAP_HOLE &&
2260 em->block_start != EXTENT_MAP_INLINE) &&
2261 !isnew && !PageUptodate(page) &&
2110 (block_off_end > to || block_off_start < from) && 2262 (block_off_end > to || block_off_start < from) &&
2111 !test_range_bit(tree, block_start, cur_end, 2263 !test_range_bit(tree, block_start, cur_end,
2112 EXTENT_UPTODATE, 1)) { 2264 EXTENT_UPTODATE, 1)) {