aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent_io.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-07-18 12:01:11 -0400
committerChris Mason <chris.mason@oracle.com>2008-09-25 11:04:05 -0400
commit7f3c74fb831fa19bafe087e817c0a5ff3883f1ea (patch)
tree416e95db10e408240916d5061caf1e5d1182b330 /fs/btrfs/extent_io.c
parent211f90e68b679d27fe23c5505f86d6ce62c98bae (diff)
Btrfs: Keep extent mappings in ram until pending ordered extents are done
It was possible for stale mappings from disk to be used instead of the new pending ordered extent. This adds a flag to the extent map struct to keep it pinned until the pending ordered extent is actually on disk. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r--fs/btrfs/extent_io.c27
1 files changed, 15 insertions, 12 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 3153b4fdd0b3..d4a63ae7ed1b 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2000,7 +2000,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2000 struct block_device *bdev; 2000 struct block_device *bdev;
2001 int ret; 2001 int ret;
2002 int nr = 0; 2002 int nr = 0;
2003 size_t page_offset = 0; 2003 size_t pg_offset = 0;
2004 size_t blocksize; 2004 size_t blocksize;
2005 loff_t i_size = i_size_read(inode); 2005 loff_t i_size = i_size_read(inode);
2006 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT; 2006 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
@@ -2008,9 +2008,9 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2008 u64 delalloc_end; 2008 u64 delalloc_end;
2009 2009
2010 WARN_ON(!PageLocked(page)); 2010 WARN_ON(!PageLocked(page));
2011 page_offset = i_size & (PAGE_CACHE_SIZE - 1); 2011 pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
2012 if (page->index > end_index || 2012 if (page->index > end_index ||
2013 (page->index == end_index && !page_offset)) { 2013 (page->index == end_index && !pg_offset)) {
2014 page->mapping->a_ops->invalidatepage(page, 0); 2014 page->mapping->a_ops->invalidatepage(page, 0);
2015 unlock_page(page); 2015 unlock_page(page);
2016 return 0; 2016 return 0;
@@ -2020,12 +2020,12 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2020 char *userpage; 2020 char *userpage;
2021 2021
2022 userpage = kmap_atomic(page, KM_USER0); 2022 userpage = kmap_atomic(page, KM_USER0);
2023 memset(userpage + page_offset, 0, 2023 memset(userpage + pg_offset, 0,
2024 PAGE_CACHE_SIZE - page_offset); 2024 PAGE_CACHE_SIZE - pg_offset);
2025 kunmap_atomic(userpage, KM_USER0); 2025 kunmap_atomic(userpage, KM_USER0);
2026 flush_dcache_page(page); 2026 flush_dcache_page(page);
2027 } 2027 }
2028 page_offset = 0; 2028 pg_offset = 0;
2029 2029
2030 set_page_extent_mapped(page); 2030 set_page_extent_mapped(page);
2031 2031
@@ -2088,7 +2088,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2088 unlock_start = page_end + 1; 2088 unlock_start = page_end + 1;
2089 break; 2089 break;
2090 } 2090 }
2091 em = epd->get_extent(inode, page, page_offset, cur, 2091 em = epd->get_extent(inode, page, pg_offset, cur,
2092 end - cur + 1, 1); 2092 end - cur + 1, 1);
2093 if (IS_ERR(em) || !em) { 2093 if (IS_ERR(em) || !em) {
2094 SetPageError(page); 2094 SetPageError(page);
@@ -2113,12 +2113,13 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2113 2113
2114 unlock_extent(tree, unlock_start, cur + iosize -1, 2114 unlock_extent(tree, unlock_start, cur + iosize -1,
2115 GFP_NOFS); 2115 GFP_NOFS);
2116
2116 if (tree->ops && tree->ops->writepage_end_io_hook) 2117 if (tree->ops && tree->ops->writepage_end_io_hook)
2117 tree->ops->writepage_end_io_hook(page, cur, 2118 tree->ops->writepage_end_io_hook(page, cur,
2118 cur + iosize - 1, 2119 cur + iosize - 1,
2119 NULL, 1); 2120 NULL, 1);
2120 cur = cur + iosize; 2121 cur = cur + iosize;
2121 page_offset += iosize; 2122 pg_offset += iosize;
2122 unlock_start = cur; 2123 unlock_start = cur;
2123 continue; 2124 continue;
2124 } 2125 }
@@ -2127,7 +2128,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2127 if (0 && !test_range_bit(tree, cur, cur + iosize - 1, 2128 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
2128 EXTENT_DIRTY, 0)) { 2129 EXTENT_DIRTY, 0)) {
2129 cur = cur + iosize; 2130 cur = cur + iosize;
2130 page_offset += iosize; 2131 pg_offset += iosize;
2131 continue; 2132 continue;
2132 } 2133 }
2133 clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS); 2134 clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
@@ -2141,6 +2142,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2141 SetPageError(page); 2142 SetPageError(page);
2142 } else { 2143 } else {
2143 unsigned long max_nr = end_index + 1; 2144 unsigned long max_nr = end_index + 1;
2145
2144 set_range_writeback(tree, cur, cur + iosize - 1); 2146 set_range_writeback(tree, cur, cur + iosize - 1);
2145 if (!PageWriteback(page)) { 2147 if (!PageWriteback(page)) {
2146 printk("warning page %lu not writeback, " 2148 printk("warning page %lu not writeback, "
@@ -2150,14 +2152,14 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2150 } 2152 }
2151 2153
2152 ret = submit_extent_page(WRITE, tree, page, sector, 2154 ret = submit_extent_page(WRITE, tree, page, sector,
2153 iosize, page_offset, bdev, 2155 iosize, pg_offset, bdev,
2154 &epd->bio, max_nr, 2156 &epd->bio, max_nr,
2155 end_bio_extent_writepage, 0); 2157 end_bio_extent_writepage, 0);
2156 if (ret) 2158 if (ret)
2157 SetPageError(page); 2159 SetPageError(page);
2158 } 2160 }
2159 cur = cur + iosize; 2161 cur = cur + iosize;
2160 page_offset += iosize; 2162 pg_offset += iosize;
2161 nr++; 2163 nr++;
2162 } 2164 }
2163done: 2165done:
@@ -2579,7 +2581,8 @@ int try_release_extent_mapping(struct extent_map_tree *map,
2579 spin_unlock(&map->lock); 2581 spin_unlock(&map->lock);
2580 break; 2582 break;
2581 } 2583 }
2582 if (em->start != start) { 2584 if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
2585 em->start != start) {
2583 spin_unlock(&map->lock); 2586 spin_unlock(&map->lock);
2584 free_extent_map(em); 2587 free_extent_map(em);
2585 break; 2588 break;