diff options
-rw-r--r-- | fs/btrfs/extent_map.c | 40 | ||||
-rw-r--r-- | fs/btrfs/extent_map.h | 1 | ||||
-rw-r--r-- | fs/btrfs/file.c | 7 | ||||
-rw-r--r-- | fs/btrfs/inode.c | 7 |
4 files changed, 18 insertions, 37 deletions
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index ab5bde31b687..0ab368e091f9 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c | |||
@@ -1379,6 +1379,16 @@ static int submit_extent_page(int rw, struct extent_map_tree *tree, | |||
1379 | return ret; | 1379 | return ret; |
1380 | } | 1380 | } |
1381 | 1381 | ||
1382 | void set_page_extent_mapped(struct page *page) | ||
1383 | { | ||
1384 | if (!PagePrivate(page)) { | ||
1385 | SetPagePrivate(page); | ||
1386 | WARN_ON(!page->mapping->a_ops->invalidatepage); | ||
1387 | set_page_private(page, 1); | ||
1388 | page_cache_get(page); | ||
1389 | } | ||
1390 | } | ||
1391 | |||
1382 | /* | 1392 | /* |
1383 | * basic readpage implementation. Locked extent state structs are inserted | 1393 | * basic readpage implementation. Locked extent state structs are inserted |
1384 | * into the tree that are removed when the IO is done (by the end_io | 1394 | * into the tree that are removed when the IO is done (by the end_io |
@@ -1405,12 +1415,7 @@ int extent_read_full_page(struct extent_map_tree *tree, struct page *page, | |||
1405 | size_t iosize; | 1415 | size_t iosize; |
1406 | size_t blocksize = inode->i_sb->s_blocksize; | 1416 | size_t blocksize = inode->i_sb->s_blocksize; |
1407 | 1417 | ||
1408 | if (!PagePrivate(page)) { | 1418 | set_page_extent_mapped(page); |
1409 | SetPagePrivate(page); | ||
1410 | WARN_ON(!page->mapping->a_ops->invalidatepage); | ||
1411 | set_page_private(page, 1); | ||
1412 | page_cache_get(page); | ||
1413 | } | ||
1414 | 1419 | ||
1415 | end = page_end; | 1420 | end = page_end; |
1416 | lock_extent(tree, start, end, GFP_NOFS); | 1421 | lock_extent(tree, start, end, GFP_NOFS); |
@@ -1531,12 +1536,7 @@ int extent_write_full_page(struct extent_map_tree *tree, struct page *page, | |||
1531 | PAGE_CACHE_SIZE - offset, KM_USER0); | 1536 | PAGE_CACHE_SIZE - offset, KM_USER0); |
1532 | } | 1537 | } |
1533 | 1538 | ||
1534 | if (!PagePrivate(page)) { | 1539 | set_page_extent_mapped(page); |
1535 | SetPagePrivate(page); | ||
1536 | set_page_private(page, 1); | ||
1537 | WARN_ON(!page->mapping->a_ops->invalidatepage); | ||
1538 | page_cache_get(page); | ||
1539 | } | ||
1540 | 1540 | ||
1541 | lock_extent(tree, start, page_end, GFP_NOFS); | 1541 | lock_extent(tree, start, page_end, GFP_NOFS); |
1542 | nr_delalloc = find_lock_delalloc_range(tree, start, page_end + 1, | 1542 | nr_delalloc = find_lock_delalloc_range(tree, start, page_end + 1, |
@@ -1672,13 +1672,7 @@ int extent_commit_write(struct extent_map_tree *tree, | |||
1672 | { | 1672 | { |
1673 | loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to; | 1673 | loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to; |
1674 | 1674 | ||
1675 | if (!PagePrivate(page)) { | 1675 | set_page_extent_mapped(page); |
1676 | SetPagePrivate(page); | ||
1677 | set_page_private(page, 1); | ||
1678 | WARN_ON(!page->mapping->a_ops->invalidatepage); | ||
1679 | page_cache_get(page); | ||
1680 | } | ||
1681 | |||
1682 | set_page_dirty(page); | 1676 | set_page_dirty(page); |
1683 | 1677 | ||
1684 | if (pos > inode->i_size) { | 1678 | if (pos > inode->i_size) { |
@@ -1709,12 +1703,8 @@ int extent_prepare_write(struct extent_map_tree *tree, | |||
1709 | int ret = 0; | 1703 | int ret = 0; |
1710 | int isnew; | 1704 | int isnew; |
1711 | 1705 | ||
1712 | if (!PagePrivate(page)) { | 1706 | set_page_extent_mapped(page); |
1713 | SetPagePrivate(page); | 1707 | |
1714 | set_page_private(page, 1); | ||
1715 | WARN_ON(!page->mapping->a_ops->invalidatepage); | ||
1716 | page_cache_get(page); | ||
1717 | } | ||
1718 | block_start = (page_start + from) & ~((u64)blocksize - 1); | 1708 | block_start = (page_start + from) & ~((u64)blocksize - 1); |
1719 | block_end = (page_start + to - 1) | (blocksize - 1); | 1709 | block_end = (page_start + to - 1) | (blocksize - 1); |
1720 | orig_block_start = block_start; | 1710 | orig_block_start = block_start; |
diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h index 40b53ee274fb..d913ce01248d 100644 --- a/fs/btrfs/extent_map.h +++ b/fs/btrfs/extent_map.h | |||
@@ -105,4 +105,5 @@ sector_t extent_bmap(struct address_space *mapping, sector_t iblock, | |||
105 | int set_range_dirty(struct extent_map_tree *tree, u64 start, u64 end); | 105 | int set_range_dirty(struct extent_map_tree *tree, u64 start, u64 end); |
106 | int set_state_private(struct extent_map_tree *tree, u64 start, u64 private); | 106 | int set_state_private(struct extent_map_tree *tree, u64 start, u64 private); |
107 | int get_state_private(struct extent_map_tree *tree, u64 start, u64 *private); | 107 | int get_state_private(struct extent_map_tree *tree, u64 start, u64 *private); |
108 | void set_page_extent_mapped(struct page *page); | ||
108 | #endif | 109 | #endif |
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 698eaea612f1..4cc459c943ec 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c | |||
@@ -543,12 +543,7 @@ static int prepare_pages(struct btrfs_root *root, | |||
543 | } | 543 | } |
544 | cancel_dirty_page(pages[i], PAGE_CACHE_SIZE); | 544 | cancel_dirty_page(pages[i], PAGE_CACHE_SIZE); |
545 | wait_on_page_writeback(pages[i]); | 545 | wait_on_page_writeback(pages[i]); |
546 | if (!PagePrivate(pages[i])) { | 546 | set_page_extent_mapped(pages[i]); |
547 | SetPagePrivate(pages[i]); | ||
548 | set_page_private(pages[i], 1); | ||
549 | WARN_ON(!pages[i]->mapping->a_ops->invalidatepage); | ||
550 | page_cache_get(pages[i]); | ||
551 | } | ||
552 | WARN_ON(!PageLocked(pages[i])); | 547 | WARN_ON(!PageLocked(pages[i])); |
553 | } | 548 | } |
554 | return 0; | 549 | return 0; |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 88b3ef20be69..0df325f89d62 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -652,12 +652,7 @@ static int btrfs_cow_one_page(struct inode *inode, struct page *page, | |||
652 | u64 page_start = page->index << PAGE_CACHE_SHIFT; | 652 | u64 page_start = page->index << PAGE_CACHE_SHIFT; |
653 | u64 page_end = page_start + PAGE_CACHE_SIZE - 1; | 653 | u64 page_end = page_start + PAGE_CACHE_SIZE - 1; |
654 | 654 | ||
655 | if (!PagePrivate(page)) { | 655 | set_page_extent_mapped(page); |
656 | SetPagePrivate(page); | ||
657 | set_page_private(page, 1); | ||
658 | WARN_ON(!page->mapping->a_ops->invalidatepage); | ||
659 | page_cache_get(page); | ||
660 | } | ||
661 | 656 | ||
662 | lock_extent(em_tree, page_start, page_end, GFP_NOFS); | 657 | lock_extent(em_tree, page_start, page_end, GFP_NOFS); |
663 | set_extent_delalloc(&BTRFS_I(inode)->extent_tree, page_start, | 658 | set_extent_delalloc(&BTRFS_I(inode)->extent_tree, page_start, |