aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent-tree.c
diff options
context:
space:
mode:
authorMiao Xie <miaox@cn.fujitsu.com>2012-12-20 06:19:09 -0500
committerJosef Bacik <jbacik@fusionio.com>2013-02-20 09:36:42 -0500
commitda633a42170165cbf20a2d3886c7480ccc832ec3 (patch)
tree006423477542cea861ad3189645b46209d09cccd /fs/btrfs/extent-tree.c
parent093486c453a55230ccdad4b48863b872fe68c46e (diff)
Btrfs: flush all dirty inodes if writeback can not start
We may try to flush some dirty pages when there is no enough space to reserve. But it is possible that this operation fails, in order to get enough space to reserve successfully, we will sync all the delalloc file. This operation is safe, we needn't worry about the case that the filesystem goes from r/w to r/o. because the filesystem should guarantee all the dirty pages have been written into the disk after it becomes readonly, so the sync operation will do nothing if the filesystem is already readonly. Though it may waste lots of time, as a corner case, we needn't care. Signed-off-by: Miao Xie <miaox@cn.fujitsu.com> Signed-off-by: Josef Bacik <jbacik@fusionio.com>
Diffstat (limited to 'fs/btrfs/extent-tree.c')
-rw-r--r--fs/btrfs/extent-tree.c40
1 files changed, 31 insertions, 9 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index b4cb8186035f..d5e60d25ca51 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3695,12 +3695,15 @@ static int can_overcommit(struct btrfs_root *root,
3695 return 0; 3695 return 0;
3696} 3696}
3697 3697
3698static int writeback_inodes_sb_nr_if_idle_safe(struct super_block *sb, 3698static inline int writeback_inodes_sb_nr_if_idle_safe(struct super_block *sb,
3699 unsigned long nr_pages, 3699 unsigned long nr_pages,
3700 enum wb_reason reason) 3700 enum wb_reason reason)
3701{ 3701{
3702 if (!writeback_in_progress(sb->s_bdi) && 3702 /* the flusher is dealing with the dirty inodes now. */
3703 down_read_trylock(&sb->s_umount)) { 3703 if (writeback_in_progress(sb->s_bdi))
3704 return 1;
3705
3706 if (down_read_trylock(&sb->s_umount)) {
3704 writeback_inodes_sb_nr(sb, nr_pages, reason); 3707 writeback_inodes_sb_nr(sb, nr_pages, reason);
3705 up_read(&sb->s_umount); 3708 up_read(&sb->s_umount);
3706 return 1; 3709 return 1;
@@ -3709,6 +3712,28 @@ static int writeback_inodes_sb_nr_if_idle_safe(struct super_block *sb,
3709 return 0; 3712 return 0;
3710} 3713}
3711 3714
3715void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
3716 unsigned long nr_pages)
3717{
3718 struct super_block *sb = root->fs_info->sb;
3719 int started;
3720
3721 /* If we can not start writeback, just sync all the delalloc file. */
3722 started = writeback_inodes_sb_nr_if_idle_safe(sb, nr_pages,
3723 WB_REASON_FS_FREE_SPACE);
3724 if (!started) {
3725 /*
3726 * We needn't worry the filesystem going from r/w to r/o though
3727 * we don't acquire ->s_umount mutex, because the filesystem
3728 * should guarantee the delalloc inodes list be empty after
3729 * the filesystem is readonly(all dirty pages are written to
3730 * the disk).
3731 */
3732 btrfs_start_delalloc_inodes(root, 0);
3733 btrfs_wait_ordered_extents(root, 0);
3734 }
3735}
3736
3712/* 3737/*
3713 * shrink metadata reservation for delalloc 3738 * shrink metadata reservation for delalloc
3714 */ 3739 */
@@ -3741,10 +3766,7 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
3741 while (delalloc_bytes && loops < 3) { 3766 while (delalloc_bytes && loops < 3) {
3742 max_reclaim = min(delalloc_bytes, to_reclaim); 3767 max_reclaim = min(delalloc_bytes, to_reclaim);
3743 nr_pages = max_reclaim >> PAGE_CACHE_SHIFT; 3768 nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
3744 writeback_inodes_sb_nr_if_idle_safe(root->fs_info->sb, 3769 btrfs_writeback_inodes_sb_nr(root, nr_pages);
3745 nr_pages,
3746 WB_REASON_FS_FREE_SPACE);
3747
3748 /* 3770 /*
3749 * We need to wait for the async pages to actually start before 3771 * We need to wait for the async pages to actually start before
3750 * we do anything. 3772 * we do anything.