aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent-tree.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/extent-tree.c')
-rw-r--r--fs/btrfs/extent-tree.c58
1 files changed, 43 insertions, 15 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 32312e09f0f5..c6b6a6e3e735 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -549,7 +549,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
549 caching_ctl->block_group = cache; 549 caching_ctl->block_group = cache;
550 caching_ctl->progress = cache->key.objectid; 550 caching_ctl->progress = cache->key.objectid;
551 atomic_set(&caching_ctl->count, 1); 551 atomic_set(&caching_ctl->count, 1);
552 caching_ctl->work.func = caching_thread; 552 btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL);
553 553
554 spin_lock(&cache->lock); 554 spin_lock(&cache->lock);
555 /* 555 /*
@@ -640,7 +640,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
640 640
641 btrfs_get_block_group(cache); 641 btrfs_get_block_group(cache);
642 642
643 btrfs_queue_worker(&fs_info->caching_workers, &caching_ctl->work); 643 btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
644 644
645 return ret; 645 return ret;
646} 646}
@@ -3971,7 +3971,7 @@ static int can_overcommit(struct btrfs_root *root,
3971} 3971}
3972 3972
3973static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root, 3973static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
3974 unsigned long nr_pages) 3974 unsigned long nr_pages, int nr_items)
3975{ 3975{
3976 struct super_block *sb = root->fs_info->sb; 3976 struct super_block *sb = root->fs_info->sb;
3977 3977
@@ -3986,9 +3986,9 @@ static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
3986 * the filesystem is readonly(all dirty pages are written to 3986 * the filesystem is readonly(all dirty pages are written to
3987 * the disk). 3987 * the disk).
3988 */ 3988 */
3989 btrfs_start_delalloc_roots(root->fs_info, 0); 3989 btrfs_start_delalloc_roots(root->fs_info, 0, nr_items);
3990 if (!current->journal_info) 3990 if (!current->journal_info)
3991 btrfs_wait_ordered_roots(root->fs_info, -1); 3991 btrfs_wait_ordered_roots(root->fs_info, nr_items);
3992 } 3992 }
3993} 3993}
3994 3994
@@ -4045,7 +4045,7 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
4045 while (delalloc_bytes && loops < 3) { 4045 while (delalloc_bytes && loops < 3) {
4046 max_reclaim = min(delalloc_bytes, to_reclaim); 4046 max_reclaim = min(delalloc_bytes, to_reclaim);
4047 nr_pages = max_reclaim >> PAGE_CACHE_SHIFT; 4047 nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
4048 btrfs_writeback_inodes_sb_nr(root, nr_pages); 4048 btrfs_writeback_inodes_sb_nr(root, nr_pages, items);
4049 /* 4049 /*
4050 * We need to wait for the async pages to actually start before 4050 * We need to wait for the async pages to actually start before
4051 * we do anything. 4051 * we do anything.
@@ -4112,13 +4112,9 @@ static int may_commit_transaction(struct btrfs_root *root,
4112 goto commit; 4112 goto commit;
4113 4113
4114 /* See if there is enough pinned space to make this reservation */ 4114 /* See if there is enough pinned space to make this reservation */
4115 spin_lock(&space_info->lock);
4116 if (percpu_counter_compare(&space_info->total_bytes_pinned, 4115 if (percpu_counter_compare(&space_info->total_bytes_pinned,
4117 bytes) >= 0) { 4116 bytes) >= 0)
4118 spin_unlock(&space_info->lock);
4119 goto commit; 4117 goto commit;
4120 }
4121 spin_unlock(&space_info->lock);
4122 4118
4123 /* 4119 /*
4124 * See if there is some space in the delayed insertion reservation for 4120 * See if there is some space in the delayed insertion reservation for
@@ -4127,16 +4123,13 @@ static int may_commit_transaction(struct btrfs_root *root,
4127 if (space_info != delayed_rsv->space_info) 4123 if (space_info != delayed_rsv->space_info)
4128 return -ENOSPC; 4124 return -ENOSPC;
4129 4125
4130 spin_lock(&space_info->lock);
4131 spin_lock(&delayed_rsv->lock); 4126 spin_lock(&delayed_rsv->lock);
4132 if (percpu_counter_compare(&space_info->total_bytes_pinned, 4127 if (percpu_counter_compare(&space_info->total_bytes_pinned,
4133 bytes - delayed_rsv->size) >= 0) { 4128 bytes - delayed_rsv->size) >= 0) {
4134 spin_unlock(&delayed_rsv->lock); 4129 spin_unlock(&delayed_rsv->lock);
4135 spin_unlock(&space_info->lock);
4136 return -ENOSPC; 4130 return -ENOSPC;
4137 } 4131 }
4138 spin_unlock(&delayed_rsv->lock); 4132 spin_unlock(&delayed_rsv->lock);
4139 spin_unlock(&space_info->lock);
4140 4133
4141commit: 4134commit:
4142 trans = btrfs_join_transaction(root); 4135 trans = btrfs_join_transaction(root);
@@ -4181,7 +4174,7 @@ static int flush_space(struct btrfs_root *root,
4181 break; 4174 break;
4182 case FLUSH_DELALLOC: 4175 case FLUSH_DELALLOC:
4183 case FLUSH_DELALLOC_WAIT: 4176 case FLUSH_DELALLOC_WAIT:
4184 shrink_delalloc(root, num_bytes, orig_bytes, 4177 shrink_delalloc(root, num_bytes * 2, orig_bytes,
4185 state == FLUSH_DELALLOC_WAIT); 4178 state == FLUSH_DELALLOC_WAIT);
4186 break; 4179 break;
4187 case ALLOC_CHUNK: 4180 case ALLOC_CHUNK:
@@ -8938,3 +8931,38 @@ int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
8938 range->len = trimmed; 8931 range->len = trimmed;
8939 return ret; 8932 return ret;
8940} 8933}
8934
8935/*
8936 * btrfs_{start,end}_write() is similar to mnt_{want, drop}_write(),
8937 * they are used to prevent the some tasks writing data into the page cache
8938 * by nocow before the subvolume is snapshoted, but flush the data into
8939 * the disk after the snapshot creation.
8940 */
8941void btrfs_end_nocow_write(struct btrfs_root *root)
8942{
8943 percpu_counter_dec(&root->subv_writers->counter);
8944 /*
8945 * Make sure counter is updated before we wake up
8946 * waiters.
8947 */
8948 smp_mb();
8949 if (waitqueue_active(&root->subv_writers->wait))
8950 wake_up(&root->subv_writers->wait);
8951}
8952
8953int btrfs_start_nocow_write(struct btrfs_root *root)
8954{
8955 if (unlikely(atomic_read(&root->will_be_snapshoted)))
8956 return 0;
8957
8958 percpu_counter_inc(&root->subv_writers->counter);
8959 /*
8960 * Make sure counter is updated before we check for snapshot creation.
8961 */
8962 smp_mb();
8963 if (unlikely(atomic_read(&root->will_be_snapshoted))) {
8964 btrfs_end_nocow_write(root);
8965 return 0;
8966 }
8967 return 1;
8968}