aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent-tree.c
diff options
context:
space:
mode:
authorJosef Bacik <josef@redhat.com>2009-10-07 20:44:34 -0400
committerChris Mason <chris.mason@oracle.com>2009-10-08 15:21:23 -0400
commite3ccfa989752c083ceb23c823a84f7ce3a081e61 (patch)
tree197558cbee7b773b8270cd861f882a37beacd2ed /fs/btrfs/extent-tree.c
parent32c00aff718bb54a214b39146bdd9ac01511cd25 (diff)
Btrfs: async delalloc flushing under space pressure
This patch moves the delalloc flushing that occurs when we are under space pressure off to a async thread pool. This helps since we only free up metadata space when we actually insert the extent item, which means it takes quite a while for space to be free'ed up if we wait on all ordered extents. However, if space is freed up due to inline extents being inserted, we can wake people who are waiting up early, and they can finish their work. Signed-off-by: Josef Bacik <jbacik@redhat.com> Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/extent-tree.c')
-rw-r--r--fs/btrfs/extent-tree.c80
1 files changed, 75 insertions, 5 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 3d1be0b77f8f..53026806ae9e 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2866,9 +2866,66 @@ static void check_force_delalloc(struct btrfs_space_info *meta_sinfo)
2866 meta_sinfo->force_delalloc = 0; 2866 meta_sinfo->force_delalloc = 0;
2867} 2867}
2868 2868
2869struct async_flush {
2870 struct btrfs_root *root;
2871 struct btrfs_space_info *info;
2872 struct btrfs_work work;
2873};
2874
2875static noinline void flush_delalloc_async(struct btrfs_work *work)
2876{
2877 struct async_flush *async;
2878 struct btrfs_root *root;
2879 struct btrfs_space_info *info;
2880
2881 async = container_of(work, struct async_flush, work);
2882 root = async->root;
2883 info = async->info;
2884
2885 btrfs_start_delalloc_inodes(root);
2886 wake_up(&info->flush_wait);
2887 btrfs_wait_ordered_extents(root, 0);
2888
2889 spin_lock(&info->lock);
2890 info->flushing = 0;
2891 spin_unlock(&info->lock);
2892 wake_up(&info->flush_wait);
2893
2894 kfree(async);
2895}
2896
2897static void wait_on_flush(struct btrfs_space_info *info)
2898{
2899 DEFINE_WAIT(wait);
2900 u64 used;
2901
2902 while (1) {
2903 prepare_to_wait(&info->flush_wait, &wait,
2904 TASK_UNINTERRUPTIBLE);
2905 spin_lock(&info->lock);
2906 if (!info->flushing) {
2907 spin_unlock(&info->lock);
2908 break;
2909 }
2910
2911 used = info->bytes_used + info->bytes_reserved +
2912 info->bytes_pinned + info->bytes_readonly +
2913 info->bytes_super + info->bytes_root +
2914 info->bytes_may_use + info->bytes_delalloc;
2915 if (used < info->total_bytes) {
2916 spin_unlock(&info->lock);
2917 break;
2918 }
2919 spin_unlock(&info->lock);
2920 schedule();
2921 }
2922 finish_wait(&info->flush_wait, &wait);
2923}
2924
2869static void flush_delalloc(struct btrfs_root *root, 2925static void flush_delalloc(struct btrfs_root *root,
2870 struct btrfs_space_info *info) 2926 struct btrfs_space_info *info)
2871{ 2927{
2928 struct async_flush *async;
2872 bool wait = false; 2929 bool wait = false;
2873 2930
2874 spin_lock(&info->lock); 2931 spin_lock(&info->lock);
@@ -2883,11 +2940,24 @@ static void flush_delalloc(struct btrfs_root *root,
2883 spin_unlock(&info->lock); 2940 spin_unlock(&info->lock);
2884 2941
2885 if (wait) { 2942 if (wait) {
2886 wait_event(info->flush_wait, 2943 wait_on_flush(info);
2887 !info->flushing);
2888 return; 2944 return;
2889 } 2945 }
2890 2946
2947 async = kzalloc(sizeof(*async), GFP_NOFS);
2948 if (!async)
2949 goto flush;
2950
2951 async->root = root;
2952 async->info = info;
2953 async->work.func = flush_delalloc_async;
2954
2955 btrfs_queue_worker(&root->fs_info->enospc_workers,
2956 &async->work);
2957 wait_on_flush(info);
2958 return;
2959
2960flush:
2891 btrfs_start_delalloc_inodes(root); 2961 btrfs_start_delalloc_inodes(root);
2892 btrfs_wait_ordered_extents(root, 0); 2962 btrfs_wait_ordered_extents(root, 0);
2893 2963
@@ -2927,7 +2997,7 @@ static int maybe_allocate_chunk(struct btrfs_root *root,
2927 if (!info->allocating_chunk) { 2997 if (!info->allocating_chunk) {
2928 info->force_alloc = 1; 2998 info->force_alloc = 1;
2929 info->allocating_chunk = 1; 2999 info->allocating_chunk = 1;
2930 init_waitqueue_head(&info->wait); 3000 init_waitqueue_head(&info->allocate_wait);
2931 } else { 3001 } else {
2932 wait = true; 3002 wait = true;
2933 } 3003 }
@@ -2935,7 +3005,7 @@ static int maybe_allocate_chunk(struct btrfs_root *root,
2935 spin_unlock(&info->lock); 3005 spin_unlock(&info->lock);
2936 3006
2937 if (wait) { 3007 if (wait) {
2938 wait_event(info->wait, 3008 wait_event(info->allocate_wait,
2939 !info->allocating_chunk); 3009 !info->allocating_chunk);
2940 return 1; 3010 return 1;
2941 } 3011 }
@@ -2956,7 +3026,7 @@ out:
2956 spin_lock(&info->lock); 3026 spin_lock(&info->lock);
2957 info->allocating_chunk = 0; 3027 info->allocating_chunk = 0;
2958 spin_unlock(&info->lock); 3028 spin_unlock(&info->lock);
2959 wake_up(&info->wait); 3029 wake_up(&info->allocate_wait);
2960 3030
2961 if (ret) 3031 if (ret)
2962 return 0; 3032 return 0;