diff options
author | David Sterba <dsterba@suse.cz> | 2011-05-05 07:13:16 -0400 |
---|---|---|
committer | David Sterba <dsterba@suse.cz> | 2011-05-06 06:34:10 -0400 |
commit | 182608c8294b5fe90d7bbd4b026c82bf0a24b736 (patch) | |
tree | 5705e84960e66caa84ac059a3528a31493e35d16 /fs/btrfs/transaction.c | |
parent | f2a97a9dbd86eb1ef956bdf20e05c507b32beb96 (diff) |
btrfs: remove old unused commented out code
Remove code which has been #if0-ed out for a very long time and does not
seem to be related to current codebase anymore.
Signed-off-by: David Sterba <dsterba@suse.cz>
Diffstat (limited to 'fs/btrfs/transaction.c')
-rw-r--r-- | fs/btrfs/transaction.c | 134 |
1 files changed, 0 insertions, 134 deletions
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 955f76eb0fa8..211aceeb9ea0 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c | |||
@@ -346,49 +346,6 @@ out_unlock: | |||
346 | return ret; | 346 | return ret; |
347 | } | 347 | } |
348 | 348 | ||
349 | #if 0 | ||
350 | /* | ||
351 | * rate limit against the drop_snapshot code. This helps to slow down new | ||
352 | * operations if the drop_snapshot code isn't able to keep up. | ||
353 | */ | ||
354 | static void throttle_on_drops(struct btrfs_root *root) | ||
355 | { | ||
356 | struct btrfs_fs_info *info = root->fs_info; | ||
357 | int harder_count = 0; | ||
358 | |||
359 | harder: | ||
360 | if (atomic_read(&info->throttles)) { | ||
361 | DEFINE_WAIT(wait); | ||
362 | int thr; | ||
363 | thr = atomic_read(&info->throttle_gen); | ||
364 | |||
365 | do { | ||
366 | prepare_to_wait(&info->transaction_throttle, | ||
367 | &wait, TASK_UNINTERRUPTIBLE); | ||
368 | if (!atomic_read(&info->throttles)) { | ||
369 | finish_wait(&info->transaction_throttle, &wait); | ||
370 | break; | ||
371 | } | ||
372 | schedule(); | ||
373 | finish_wait(&info->transaction_throttle, &wait); | ||
374 | } while (thr == atomic_read(&info->throttle_gen)); | ||
375 | harder_count++; | ||
376 | |||
377 | if (root->fs_info->total_ref_cache_size > 1 * 1024 * 1024 && | ||
378 | harder_count < 2) | ||
379 | goto harder; | ||
380 | |||
381 | if (root->fs_info->total_ref_cache_size > 5 * 1024 * 1024 && | ||
382 | harder_count < 10) | ||
383 | goto harder; | ||
384 | |||
385 | if (root->fs_info->total_ref_cache_size > 10 * 1024 * 1024 && | ||
386 | harder_count < 20) | ||
387 | goto harder; | ||
388 | } | ||
389 | } | ||
390 | #endif | ||
391 | |||
392 | void btrfs_throttle(struct btrfs_root *root) | 349 | void btrfs_throttle(struct btrfs_root *root) |
393 | { | 350 | { |
394 | mutex_lock(&root->fs_info->trans_mutex); | 351 | mutex_lock(&root->fs_info->trans_mutex); |
@@ -808,97 +765,6 @@ int btrfs_defrag_root(struct btrfs_root *root, int cacheonly) | |||
808 | return ret; | 765 | return ret; |
809 | } | 766 | } |
810 | 767 | ||
811 | #if 0 | ||
812 | /* | ||
813 | * when dropping snapshots, we generate a ton of delayed refs, and it makes | ||
814 | * sense not to join the transaction while it is trying to flush the current | ||
815 | * queue of delayed refs out. | ||
816 | * | ||
817 | * This is used by the drop snapshot code only | ||
818 | */ | ||
819 | static noinline int wait_transaction_pre_flush(struct btrfs_fs_info *info) | ||
820 | { | ||
821 | DEFINE_WAIT(wait); | ||
822 | |||
823 | mutex_lock(&info->trans_mutex); | ||
824 | while (info->running_transaction && | ||
825 | info->running_transaction->delayed_refs.flushing) { | ||
826 | prepare_to_wait(&info->transaction_wait, &wait, | ||
827 | TASK_UNINTERRUPTIBLE); | ||
828 | mutex_unlock(&info->trans_mutex); | ||
829 | |||
830 | schedule(); | ||
831 | |||
832 | mutex_lock(&info->trans_mutex); | ||
833 | finish_wait(&info->transaction_wait, &wait); | ||
834 | } | ||
835 | mutex_unlock(&info->trans_mutex); | ||
836 | return 0; | ||
837 | } | ||
838 | |||
839 | /* | ||
840 | * Given a list of roots that need to be deleted, call btrfs_drop_snapshot on | ||
841 | * all of them | ||
842 | */ | ||
843 | int btrfs_drop_dead_root(struct btrfs_root *root) | ||
844 | { | ||
845 | struct btrfs_trans_handle *trans; | ||
846 | struct btrfs_root *tree_root = root->fs_info->tree_root; | ||
847 | unsigned long nr; | ||
848 | int ret; | ||
849 | |||
850 | while (1) { | ||
851 | /* | ||
852 | * we don't want to jump in and create a bunch of | ||
853 | * delayed refs if the transaction is starting to close | ||
854 | */ | ||
855 | wait_transaction_pre_flush(tree_root->fs_info); | ||
856 | trans = btrfs_start_transaction(tree_root, 1); | ||
857 | |||
858 | /* | ||
859 | * we've joined a transaction, make sure it isn't | ||
860 | * closing right now | ||
861 | */ | ||
862 | if (trans->transaction->delayed_refs.flushing) { | ||
863 | btrfs_end_transaction(trans, tree_root); | ||
864 | continue; | ||
865 | } | ||
866 | |||
867 | ret = btrfs_drop_snapshot(trans, root); | ||
868 | if (ret != -EAGAIN) | ||
869 | break; | ||
870 | |||
871 | ret = btrfs_update_root(trans, tree_root, | ||
872 | &root->root_key, | ||
873 | &root->root_item); | ||
874 | if (ret) | ||
875 | break; | ||
876 | |||
877 | nr = trans->blocks_used; | ||
878 | ret = btrfs_end_transaction(trans, tree_root); | ||
879 | BUG_ON(ret); | ||
880 | |||
881 | btrfs_btree_balance_dirty(tree_root, nr); | ||
882 | cond_resched(); | ||
883 | } | ||
884 | BUG_ON(ret); | ||
885 | |||
886 | ret = btrfs_del_root(trans, tree_root, &root->root_key); | ||
887 | BUG_ON(ret); | ||
888 | |||
889 | nr = trans->blocks_used; | ||
890 | ret = btrfs_end_transaction(trans, tree_root); | ||
891 | BUG_ON(ret); | ||
892 | |||
893 | free_extent_buffer(root->node); | ||
894 | free_extent_buffer(root->commit_root); | ||
895 | kfree(root); | ||
896 | |||
897 | btrfs_btree_balance_dirty(tree_root, nr); | ||
898 | return ret; | ||
899 | } | ||
900 | #endif | ||
901 | |||
902 | /* | 768 | /* |
903 | * new snapshots need to be created at a very specific time in the | 769 | * new snapshots need to be created at a very specific time in the |
904 | * transaction commit. This does the actual creation | 770 | * transaction commit. This does the actual creation |