aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent-tree.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-07-24 11:57:52 -0400
committerChris Mason <chris.mason@oracle.com>2008-09-25 11:04:05 -0400
commit3eaa2885276fd6dac7b076a793932428b7168e74 (patch)
treeb06382bec68bf1755597a74ac8225f3bcddda5e5 /fs/btrfs/extent-tree.c
parent64f26f745084872b916cd1bef6054e21b15c5784 (diff)
Btrfs: Fix the defragmention code and the block relocation code for data=ordered
Before setting an extent to delalloc, the code needs to wait for pending ordered extents. Also, the relocation code needs to wait for ordered IO before scanning the block group again. This is because the extents are not removed until the IO for the new extents is finished Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/extent-tree.c')
-rw-r--r--fs/btrfs/extent-tree.c39
1 files changed, 28 insertions, 11 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index febc6295c7a..f92b297e7da 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2640,6 +2640,7 @@ static int noinline relocate_inode_pages(struct inode *inode, u64 start,
2640 struct file_ra_state *ra; 2640 struct file_ra_state *ra;
2641 unsigned long total_read = 0; 2641 unsigned long total_read = 0;
2642 unsigned long ra_pages; 2642 unsigned long ra_pages;
2643 struct btrfs_ordered_extent *ordered;
2643 struct btrfs_trans_handle *trans; 2644 struct btrfs_trans_handle *trans;
2644 2645
2645 ra = kzalloc(sizeof(*ra), GFP_NOFS); 2646 ra = kzalloc(sizeof(*ra), GFP_NOFS);
@@ -2658,9 +2659,9 @@ static int noinline relocate_inode_pages(struct inode *inode, u64 start,
2658 calc_ra(i, last_index, ra_pages)); 2659 calc_ra(i, last_index, ra_pages));
2659 } 2660 }
2660 total_read++; 2661 total_read++;
2661 if (((u64)i << PAGE_CACHE_SHIFT) > inode->i_size) 2662again:
2663 if (((u64)i << PAGE_CACHE_SHIFT) > i_size_read(inode))
2662 goto truncate_racing; 2664 goto truncate_racing;
2663
2664 page = grab_cache_page(inode->i_mapping, i); 2665 page = grab_cache_page(inode->i_mapping, i);
2665 if (!page) { 2666 if (!page) {
2666 goto out_unlock; 2667 goto out_unlock;
@@ -2674,18 +2675,24 @@ static int noinline relocate_inode_pages(struct inode *inode, u64 start,
2674 goto out_unlock; 2675 goto out_unlock;
2675 } 2676 }
2676 } 2677 }
2677#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
2678 ClearPageDirty(page);
2679#else
2680 cancel_dirty_page(page, PAGE_CACHE_SIZE);
2681#endif
2682 wait_on_page_writeback(page); 2678 wait_on_page_writeback(page);
2683 set_page_extent_mapped(page); 2679
2684 page_start = (u64)page->index << PAGE_CACHE_SHIFT; 2680 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2685 page_end = page_start + PAGE_CACHE_SIZE - 1; 2681 page_end = page_start + PAGE_CACHE_SIZE - 1;
2686
2687 lock_extent(io_tree, page_start, page_end, GFP_NOFS); 2682 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
2688 2683
2684 ordered = btrfs_lookup_ordered_extent(inode, page_start);
2685 if (ordered) {
2686 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2687 unlock_page(page);
2688 page_cache_release(page);
2689 btrfs_start_ordered_extent(inode, ordered, 1);
2690 btrfs_put_ordered_extent(ordered);
2691 goto again;
2692 }
2693 set_page_extent_mapped(page);
2694
2695
2689 set_extent_delalloc(io_tree, page_start, 2696 set_extent_delalloc(io_tree, page_start,
2690 page_end, GFP_NOFS); 2697 page_end, GFP_NOFS);
2691 set_page_dirty(page); 2698 set_page_dirty(page);
@@ -2694,10 +2701,18 @@ static int noinline relocate_inode_pages(struct inode *inode, u64 start,
2694 unlock_page(page); 2701 unlock_page(page);
2695 page_cache_release(page); 2702 page_cache_release(page);
2696 } 2703 }
2697 balance_dirty_pages_ratelimited_nr(inode->i_mapping,
2698 total_read);
2699 2704
2700out_unlock: 2705out_unlock:
2706 /* we have to start the IO in order to get the ordered extents
2707 * instantiated. This allows the relocation to code to wait
2708 * for all the ordered extents to hit the disk.
2709 *
2710 * Otherwise, it would constantly loop over the same extents
2711 * because the old ones don't get deleted until the IO is
2712 * started
2713 */
2714 btrfs_fdatawrite_range(inode->i_mapping, start, start + len - 1,
2715 WB_SYNC_NONE);
2701 kfree(ra); 2716 kfree(ra);
2702 trans = btrfs_start_transaction(BTRFS_I(inode)->root, 1); 2717 trans = btrfs_start_transaction(BTRFS_I(inode)->root, 1);
2703 if (trans) { 2718 if (trans) {
@@ -3238,6 +3253,8 @@ next:
3238 3253
3239 btrfs_clean_old_snapshots(tree_root); 3254 btrfs_clean_old_snapshots(tree_root);
3240 3255
3256 btrfs_wait_ordered_extents(tree_root);
3257
3241 trans = btrfs_start_transaction(tree_root, 1); 3258 trans = btrfs_start_transaction(tree_root, 1);
3242 btrfs_commit_transaction(trans, tree_root); 3259 btrfs_commit_transaction(trans, tree_root);
3243 mutex_lock(&root->fs_info->alloc_mutex); 3260 mutex_lock(&root->fs_info->alloc_mutex);