aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/transaction.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/transaction.c')
-rw-r--r--fs/btrfs/transaction.c137
1 files changed, 1 insertions, 136 deletions
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 8e7e72341555..33679fc710c6 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -81,8 +81,7 @@ static noinline int join_transaction(struct btrfs_root *root)
81 INIT_LIST_HEAD(&cur_trans->pending_snapshots); 81 INIT_LIST_HEAD(&cur_trans->pending_snapshots);
82 list_add_tail(&cur_trans->list, &root->fs_info->trans_list); 82 list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
83 extent_io_tree_init(&cur_trans->dirty_pages, 83 extent_io_tree_init(&cur_trans->dirty_pages,
84 root->fs_info->btree_inode->i_mapping, 84 root->fs_info->btree_inode->i_mapping);
85 GFP_NOFS);
86 spin_lock(&root->fs_info->new_trans_lock); 85 spin_lock(&root->fs_info->new_trans_lock);
87 root->fs_info->running_transaction = cur_trans; 86 root->fs_info->running_transaction = cur_trans;
88 spin_unlock(&root->fs_info->new_trans_lock); 87 spin_unlock(&root->fs_info->new_trans_lock);
@@ -348,49 +347,6 @@ out_unlock:
348 return ret; 347 return ret;
349} 348}
350 349
351#if 0
352/*
353 * rate limit against the drop_snapshot code. This helps to slow down new
354 * operations if the drop_snapshot code isn't able to keep up.
355 */
356static void throttle_on_drops(struct btrfs_root *root)
357{
358 struct btrfs_fs_info *info = root->fs_info;
359 int harder_count = 0;
360
361harder:
362 if (atomic_read(&info->throttles)) {
363 DEFINE_WAIT(wait);
364 int thr;
365 thr = atomic_read(&info->throttle_gen);
366
367 do {
368 prepare_to_wait(&info->transaction_throttle,
369 &wait, TASK_UNINTERRUPTIBLE);
370 if (!atomic_read(&info->throttles)) {
371 finish_wait(&info->transaction_throttle, &wait);
372 break;
373 }
374 schedule();
375 finish_wait(&info->transaction_throttle, &wait);
376 } while (thr == atomic_read(&info->throttle_gen));
377 harder_count++;
378
379 if (root->fs_info->total_ref_cache_size > 1 * 1024 * 1024 &&
380 harder_count < 2)
381 goto harder;
382
383 if (root->fs_info->total_ref_cache_size > 5 * 1024 * 1024 &&
384 harder_count < 10)
385 goto harder;
386
387 if (root->fs_info->total_ref_cache_size > 10 * 1024 * 1024 &&
388 harder_count < 20)
389 goto harder;
390 }
391}
392#endif
393
394void btrfs_throttle(struct btrfs_root *root) 350void btrfs_throttle(struct btrfs_root *root)
395{ 351{
396 mutex_lock(&root->fs_info->trans_mutex); 352 mutex_lock(&root->fs_info->trans_mutex);
@@ -837,97 +793,6 @@ int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
837 return ret; 793 return ret;
838} 794}
839 795
840#if 0
841/*
842 * when dropping snapshots, we generate a ton of delayed refs, and it makes
843 * sense not to join the transaction while it is trying to flush the current
844 * queue of delayed refs out.
845 *
846 * This is used by the drop snapshot code only
847 */
848static noinline int wait_transaction_pre_flush(struct btrfs_fs_info *info)
849{
850 DEFINE_WAIT(wait);
851
852 mutex_lock(&info->trans_mutex);
853 while (info->running_transaction &&
854 info->running_transaction->delayed_refs.flushing) {
855 prepare_to_wait(&info->transaction_wait, &wait,
856 TASK_UNINTERRUPTIBLE);
857 mutex_unlock(&info->trans_mutex);
858
859 schedule();
860
861 mutex_lock(&info->trans_mutex);
862 finish_wait(&info->transaction_wait, &wait);
863 }
864 mutex_unlock(&info->trans_mutex);
865 return 0;
866}
867
868/*
869 * Given a list of roots that need to be deleted, call btrfs_drop_snapshot on
870 * all of them
871 */
872int btrfs_drop_dead_root(struct btrfs_root *root)
873{
874 struct btrfs_trans_handle *trans;
875 struct btrfs_root *tree_root = root->fs_info->tree_root;
876 unsigned long nr;
877 int ret;
878
879 while (1) {
880 /*
881 * we don't want to jump in and create a bunch of
882 * delayed refs if the transaction is starting to close
883 */
884 wait_transaction_pre_flush(tree_root->fs_info);
885 trans = btrfs_start_transaction(tree_root, 1);
886
887 /*
888 * we've joined a transaction, make sure it isn't
889 * closing right now
890 */
891 if (trans->transaction->delayed_refs.flushing) {
892 btrfs_end_transaction(trans, tree_root);
893 continue;
894 }
895
896 ret = btrfs_drop_snapshot(trans, root);
897 if (ret != -EAGAIN)
898 break;
899
900 ret = btrfs_update_root(trans, tree_root,
901 &root->root_key,
902 &root->root_item);
903 if (ret)
904 break;
905
906 nr = trans->blocks_used;
907 ret = btrfs_end_transaction(trans, tree_root);
908 BUG_ON(ret);
909
910 btrfs_btree_balance_dirty(tree_root, nr);
911 cond_resched();
912 }
913 BUG_ON(ret);
914
915 ret = btrfs_del_root(trans, tree_root, &root->root_key);
916 BUG_ON(ret);
917
918 nr = trans->blocks_used;
919 ret = btrfs_end_transaction(trans, tree_root);
920 BUG_ON(ret);
921
922 free_extent_buffer(root->node);
923 free_extent_buffer(root->commit_root);
924 kfree(root);
925
926 btrfs_btree_balance_dirty(tree_root, nr);
927 return ret;
928}
929#endif
930
931/* 796/*
932 * new snapshots need to be created at a very specific time in the 797 * new snapshots need to be created at a very specific time in the
933 * transaction commit. This does the actual creation 798 * transaction commit. This does the actual creation