aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/ctree.h
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/ctree.h')
-rw-r--r--fs/btrfs/ctree.h35
1 files changed, 35 insertions, 0 deletions
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 2737facbd341..f48905ee5240 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -45,6 +45,13 @@ struct btrfs_ordered_sum;
45 45
46#define BTRFS_MAX_LEVEL 8 46#define BTRFS_MAX_LEVEL 8
47 47
48/*
49 * files bigger than this get some pre-flushing when they are added
50 * to the ordered operations list. That way we limit the total
51 * work done by the commit
52 */
53#define BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT (8 * 1024 * 1024)
54
48/* holds pointers to all of the tree roots */ 55/* holds pointers to all of the tree roots */
49#define BTRFS_ROOT_TREE_OBJECTID 1ULL 56#define BTRFS_ROOT_TREE_OBJECTID 1ULL
50 57
@@ -727,6 +734,15 @@ struct btrfs_fs_info {
727 struct mutex volume_mutex; 734 struct mutex volume_mutex;
728 struct mutex tree_reloc_mutex; 735 struct mutex tree_reloc_mutex;
729 736
737 /*
738 * this protects the ordered operations list only while we are
739 * processing all of the entries on it. This way we make
740 * sure the commit code doesn't find the list temporarily empty
741 * because another function happens to be doing non-waiting preflush
742 * before jumping into the main commit.
743 */
744 struct mutex ordered_operations_mutex;
745
730 struct list_head trans_list; 746 struct list_head trans_list;
731 struct list_head hashers; 747 struct list_head hashers;
732 struct list_head dead_roots; 748 struct list_head dead_roots;
@@ -741,10 +757,29 @@ struct btrfs_fs_info {
741 * ordered extents 757 * ordered extents
742 */ 758 */
743 spinlock_t ordered_extent_lock; 759 spinlock_t ordered_extent_lock;
760
761 /*
762 * all of the data=ordered extents pending writeback
763 * these can span multiple transactions and basically include
764 * every dirty data page that isn't from nodatacow
765 */
744 struct list_head ordered_extents; 766 struct list_head ordered_extents;
767
768 /*
769 * all of the inodes that have delalloc bytes. It is possible for
770 * this list to be empty even when there is still dirty data=ordered
771 * extents waiting to finish IO.
772 */
745 struct list_head delalloc_inodes; 773 struct list_head delalloc_inodes;
746 774
747 /* 775 /*
776 * special rename and truncate targets that must be on disk before
777 * we're allowed to commit. This is basically the ext3 style
778 * data=ordered list.
779 */
780 struct list_head ordered_operations;
781
782 /*
748 * there is a pool of worker threads for checksumming during writes 783 * there is a pool of worker threads for checksumming during writes
749 * and a pool for checksumming after reads. This is because readers 784 * and a pool for checksumming after reads. This is because readers
750 * can run with FS locks held, and the writers may be waiting for 785 * can run with FS locks held, and the writers may be waiting for