aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-05-15 09:13:45 -0400
committerChris Mason <chris.mason@oracle.com>2008-09-25 11:04:03 -0400
commit211c17f51f46dc6c308c742098273dd46b5ca59c (patch)
treed72de74136af93236f2b37088ff310f08ddeadf5
parenta0af469b58944f6e8c5c8ecbebb42997baf0cb9e (diff)
Fix corners in writepage and btrfs_truncate_page
The extent_io writepage calls needed an extra check for discarding pages that started on th last byte in the file. btrfs_truncate_page needed checks to make sure the page was still part of the file after reading it, and most importantly, needed to wait for all IO to the page to finish before freeing the corresponding extents on disk. Signed-off-by: Chris Mason <chris.mason@oracle.com>
-rw-r--r--fs/btrfs/extent_io.c15
-rw-r--r--fs/btrfs/file.c8
-rw-r--r--fs/btrfs/inode.c9
3 files changed, 17 insertions, 15 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 2a3624adc0cf..17c508a941eb 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1988,9 +1988,12 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
1988 u64 nr_delalloc; 1988 u64 nr_delalloc;
1989 u64 delalloc_end; 1989 u64 delalloc_end;
1990 1990
1991
1991 WARN_ON(!PageLocked(page)); 1992 WARN_ON(!PageLocked(page));
1992 if (page->index > end_index) { 1993 page_offset = i_size & (PAGE_CACHE_SIZE - 1);
1993 clear_extent_dirty(tree, start, page_end, GFP_NOFS); 1994 if (page->index > end_index ||
1995 (page->index == end_index && !page_offset)) {
1996 page->mapping->a_ops->invalidatepage(page, 0);
1994 unlock_page(page); 1997 unlock_page(page);
1995 return 0; 1998 return 0;
1996 } 1999 }
@@ -1998,13 +2001,13 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
1998 if (page->index == end_index) { 2001 if (page->index == end_index) {
1999 char *userpage; 2002 char *userpage;
2000 2003
2001 size_t offset = i_size & (PAGE_CACHE_SIZE - 1);
2002
2003 userpage = kmap_atomic(page, KM_USER0); 2004 userpage = kmap_atomic(page, KM_USER0);
2004 memset(userpage + offset, 0, PAGE_CACHE_SIZE - offset); 2005 memset(userpage + page_offset, 0,
2005 flush_dcache_page(page); 2006 PAGE_CACHE_SIZE - page_offset);
2006 kunmap_atomic(userpage, KM_USER0); 2007 kunmap_atomic(userpage, KM_USER0);
2008 flush_dcache_page(page);
2007 } 2009 }
2010 page_offset = 0;
2008 2011
2009 set_page_extent_mapped(page); 2012 set_page_extent_mapped(page);
2010 2013
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index df97d470812a..d968900c6973 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -305,7 +305,6 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans,
305 (inline_size & (root->sectorsize -1)) == 0 || 305 (inline_size & (root->sectorsize -1)) == 0 ||
306 inline_size >= BTRFS_MAX_INLINE_DATA_SIZE(root)) { 306 inline_size >= BTRFS_MAX_INLINE_DATA_SIZE(root)) {
307 u64 last_end; 307 u64 last_end;
308 u64 existing_delalloc = 0;
309 308
310 for (i = 0; i < num_pages; i++) { 309 for (i = 0; i < num_pages; i++) {
311 struct page *p = pages[i]; 310 struct page *p = pages[i];
@@ -315,13 +314,6 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans,
315 last_end = (u64)(pages[num_pages -1]->index) << 314 last_end = (u64)(pages[num_pages -1]->index) <<
316 PAGE_CACHE_SHIFT; 315 PAGE_CACHE_SHIFT;
317 last_end += PAGE_CACHE_SIZE - 1; 316 last_end += PAGE_CACHE_SIZE - 1;
318 if (start_pos < isize) {
319 u64 delalloc_start = start_pos;
320 existing_delalloc = count_range_bits(io_tree,
321 &delalloc_start,
322 end_of_last_block, (u64)-1,
323 EXTENT_DELALLOC);
324 }
325 set_extent_delalloc(io_tree, start_pos, end_of_last_block, 317 set_extent_delalloc(io_tree, start_pos, end_of_last_block,
326 GFP_NOFS); 318 GFP_NOFS);
327 btrfs_add_ordered_inode(inode); 319 btrfs_add_ordered_inode(inode);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 08760ff9bab7..40b4a8ec17fe 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1180,19 +1180,26 @@ static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
1180 goto out; 1180 goto out;
1181 1181
1182 ret = -ENOMEM; 1182 ret = -ENOMEM;
1183again:
1183 page = grab_cache_page(mapping, index); 1184 page = grab_cache_page(mapping, index);
1184 if (!page) 1185 if (!page)
1185 goto out; 1186 goto out;
1186 if (!PageUptodate(page)) { 1187 if (!PageUptodate(page)) {
1187 ret = btrfs_readpage(NULL, page); 1188 ret = btrfs_readpage(NULL, page);
1188 lock_page(page); 1189 lock_page(page);
1190 if (page->mapping != mapping) {
1191 unlock_page(page);
1192 page_cache_release(page);
1193 goto again;
1194 }
1189 if (!PageUptodate(page)) { 1195 if (!PageUptodate(page)) {
1190 ret = -EIO; 1196 ret = -EIO;
1191 goto out; 1197 goto out;
1192 } 1198 }
1193 } 1199 }
1194 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
1195 1200
1201 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
1202 wait_on_page_writeback(page);
1196 ret = btrfs_cow_one_page(inode, page, offset); 1203 ret = btrfs_cow_one_page(inode, page, offset);
1197 1204
1198 unlock_page(page); 1205 unlock_page(page);