aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/ctree.h13
-rw-r--r--fs/btrfs/disk-io.c6
-rw-r--r--fs/btrfs/extent-tree.c80
3 files changed, 88 insertions, 11 deletions
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index dbdada56950..a362dd617e9 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -691,17 +691,17 @@ struct btrfs_space_info {
691 691
692 struct list_head list; 692 struct list_head list;
693 693
694 /* for controlling how we free up space for allocations */
695 wait_queue_head_t allocate_wait;
696 wait_queue_head_t flush_wait;
697 int allocating_chunk;
698 int flushing;
699
694 /* for block groups in our same type */ 700 /* for block groups in our same type */
695 struct list_head block_groups; 701 struct list_head block_groups;
696 spinlock_t lock; 702 spinlock_t lock;
697 struct rw_semaphore groups_sem; 703 struct rw_semaphore groups_sem;
698 atomic_t caching_threads; 704 atomic_t caching_threads;
699
700 int allocating_chunk;
701 wait_queue_head_t wait;
702
703 int flushing;
704 wait_queue_head_t flush_wait;
705}; 705};
706 706
707/* 707/*
@@ -918,6 +918,7 @@ struct btrfs_fs_info {
918 struct btrfs_workers endio_meta_write_workers; 918 struct btrfs_workers endio_meta_write_workers;
919 struct btrfs_workers endio_write_workers; 919 struct btrfs_workers endio_write_workers;
920 struct btrfs_workers submit_workers; 920 struct btrfs_workers submit_workers;
921 struct btrfs_workers enospc_workers;
921 /* 922 /*
922 * fixup workers take dirty pages that didn't properly go through 923 * fixup workers take dirty pages that didn't properly go through
923 * the cow mechanism and make them safe to write. It happens 924 * the cow mechanism and make them safe to write. It happens
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 9903f042765..ac8927bdc33 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1762,6 +1762,9 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1762 min_t(u64, fs_devices->num_devices, 1762 min_t(u64, fs_devices->num_devices,
1763 fs_info->thread_pool_size), 1763 fs_info->thread_pool_size),
1764 &fs_info->generic_worker); 1764 &fs_info->generic_worker);
1765 btrfs_init_workers(&fs_info->enospc_workers, "enospc",
1766 fs_info->thread_pool_size,
1767 &fs_info->generic_worker);
1765 1768
1766 /* a higher idle thresh on the submit workers makes it much more 1769 /* a higher idle thresh on the submit workers makes it much more
1767 * likely that bios will be send down in a sane order to the 1770 * likely that bios will be send down in a sane order to the
@@ -1809,6 +1812,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1809 btrfs_start_workers(&fs_info->endio_meta_workers, 1); 1812 btrfs_start_workers(&fs_info->endio_meta_workers, 1);
1810 btrfs_start_workers(&fs_info->endio_meta_write_workers, 1); 1813 btrfs_start_workers(&fs_info->endio_meta_write_workers, 1);
1811 btrfs_start_workers(&fs_info->endio_write_workers, 1); 1814 btrfs_start_workers(&fs_info->endio_write_workers, 1);
1815 btrfs_start_workers(&fs_info->enospc_workers, 1);
1812 1816
1813 fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super); 1817 fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
1814 fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages, 1818 fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
@@ -2023,6 +2027,7 @@ fail_sb_buffer:
2023 btrfs_stop_workers(&fs_info->endio_meta_write_workers); 2027 btrfs_stop_workers(&fs_info->endio_meta_write_workers);
2024 btrfs_stop_workers(&fs_info->endio_write_workers); 2028 btrfs_stop_workers(&fs_info->endio_write_workers);
2025 btrfs_stop_workers(&fs_info->submit_workers); 2029 btrfs_stop_workers(&fs_info->submit_workers);
2030 btrfs_stop_workers(&fs_info->enospc_workers);
2026fail_iput: 2031fail_iput:
2027 invalidate_inode_pages2(fs_info->btree_inode->i_mapping); 2032 invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
2028 iput(fs_info->btree_inode); 2033 iput(fs_info->btree_inode);
@@ -2449,6 +2454,7 @@ int close_ctree(struct btrfs_root *root)
2449 btrfs_stop_workers(&fs_info->endio_meta_write_workers); 2454 btrfs_stop_workers(&fs_info->endio_meta_write_workers);
2450 btrfs_stop_workers(&fs_info->endio_write_workers); 2455 btrfs_stop_workers(&fs_info->endio_write_workers);
2451 btrfs_stop_workers(&fs_info->submit_workers); 2456 btrfs_stop_workers(&fs_info->submit_workers);
2457 btrfs_stop_workers(&fs_info->enospc_workers);
2452 2458
2453 btrfs_close_devices(fs_info->fs_devices); 2459 btrfs_close_devices(fs_info->fs_devices);
2454 btrfs_mapping_tree_free(&fs_info->mapping_tree); 2460 btrfs_mapping_tree_free(&fs_info->mapping_tree);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 3d1be0b77f8..53026806ae9 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2866,9 +2866,66 @@ static void check_force_delalloc(struct btrfs_space_info *meta_sinfo)
2866 meta_sinfo->force_delalloc = 0; 2866 meta_sinfo->force_delalloc = 0;
2867} 2867}
2868 2868
2869struct async_flush {
2870 struct btrfs_root *root;
2871 struct btrfs_space_info *info;
2872 struct btrfs_work work;
2873};
2874
2875static noinline void flush_delalloc_async(struct btrfs_work *work)
2876{
2877 struct async_flush *async;
2878 struct btrfs_root *root;
2879 struct btrfs_space_info *info;
2880
2881 async = container_of(work, struct async_flush, work);
2882 root = async->root;
2883 info = async->info;
2884
2885 btrfs_start_delalloc_inodes(root);
2886 wake_up(&info->flush_wait);
2887 btrfs_wait_ordered_extents(root, 0);
2888
2889 spin_lock(&info->lock);
2890 info->flushing = 0;
2891 spin_unlock(&info->lock);
2892 wake_up(&info->flush_wait);
2893
2894 kfree(async);
2895}
2896
2897static void wait_on_flush(struct btrfs_space_info *info)
2898{
2899 DEFINE_WAIT(wait);
2900 u64 used;
2901
2902 while (1) {
2903 prepare_to_wait(&info->flush_wait, &wait,
2904 TASK_UNINTERRUPTIBLE);
2905 spin_lock(&info->lock);
2906 if (!info->flushing) {
2907 spin_unlock(&info->lock);
2908 break;
2909 }
2910
2911 used = info->bytes_used + info->bytes_reserved +
2912 info->bytes_pinned + info->bytes_readonly +
2913 info->bytes_super + info->bytes_root +
2914 info->bytes_may_use + info->bytes_delalloc;
2915 if (used < info->total_bytes) {
2916 spin_unlock(&info->lock);
2917 break;
2918 }
2919 spin_unlock(&info->lock);
2920 schedule();
2921 }
2922 finish_wait(&info->flush_wait, &wait);
2923}
2924
2869static void flush_delalloc(struct btrfs_root *root, 2925static void flush_delalloc(struct btrfs_root *root,
2870 struct btrfs_space_info *info) 2926 struct btrfs_space_info *info)
2871{ 2927{
2928 struct async_flush *async;
2872 bool wait = false; 2929 bool wait = false;
2873 2930
2874 spin_lock(&info->lock); 2931 spin_lock(&info->lock);
@@ -2883,11 +2940,24 @@ static void flush_delalloc(struct btrfs_root *root,
2883 spin_unlock(&info->lock); 2940 spin_unlock(&info->lock);
2884 2941
2885 if (wait) { 2942 if (wait) {
2886 wait_event(info->flush_wait, 2943 wait_on_flush(info);
2887 !info->flushing);
2888 return; 2944 return;
2889 } 2945 }
2890 2946
2947 async = kzalloc(sizeof(*async), GFP_NOFS);
2948 if (!async)
2949 goto flush;
2950
2951 async->root = root;
2952 async->info = info;
2953 async->work.func = flush_delalloc_async;
2954
2955 btrfs_queue_worker(&root->fs_info->enospc_workers,
2956 &async->work);
2957 wait_on_flush(info);
2958 return;
2959
2960flush:
2891 btrfs_start_delalloc_inodes(root); 2961 btrfs_start_delalloc_inodes(root);
2892 btrfs_wait_ordered_extents(root, 0); 2962 btrfs_wait_ordered_extents(root, 0);
2893 2963
@@ -2927,7 +2997,7 @@ static int maybe_allocate_chunk(struct btrfs_root *root,
2927 if (!info->allocating_chunk) { 2997 if (!info->allocating_chunk) {
2928 info->force_alloc = 1; 2998 info->force_alloc = 1;
2929 info->allocating_chunk = 1; 2999 info->allocating_chunk = 1;
2930 init_waitqueue_head(&info->wait); 3000 init_waitqueue_head(&info->allocate_wait);
2931 } else { 3001 } else {
2932 wait = true; 3002 wait = true;
2933 } 3003 }
@@ -2935,7 +3005,7 @@ static int maybe_allocate_chunk(struct btrfs_root *root,
2935 spin_unlock(&info->lock); 3005 spin_unlock(&info->lock);
2936 3006
2937 if (wait) { 3007 if (wait) {
2938 wait_event(info->wait, 3008 wait_event(info->allocate_wait,
2939 !info->allocating_chunk); 3009 !info->allocating_chunk);
2940 return 1; 3010 return 1;
2941 } 3011 }
@@ -2956,7 +3026,7 @@ out:
2956 spin_lock(&info->lock); 3026 spin_lock(&info->lock);
2957 info->allocating_chunk = 0; 3027 info->allocating_chunk = 0;
2958 spin_unlock(&info->lock); 3028 spin_unlock(&info->lock);
2959 wake_up(&info->wait); 3029 wake_up(&info->allocate_wait);
2960 3030
2961 if (ret) 3031 if (ret)
2962 return 0; 3032 return 0;