aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/delayed-ref.h
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2009-03-13 10:10:06 -0400
committerChris Mason <chris.mason@oracle.com>2009-03-24 16:14:25 -0400
commit56bec294dea971335d4466b30f2d959f28f6e36d (patch)
treefc0b5bbf4bb6ab35582a4c7f58f5ac88f71c38bf /fs/btrfs/delayed-ref.h
parent9fa8cfe706f9c20067c042a064999d5825a35330 (diff)
Btrfs: do extent allocation and reference count updates in the background
The extent allocation tree maintains a reference count and full back reference information for every extent allocated in the filesystem. For subvolume and snapshot trees, every time a block goes through COW, the new copy of the block adds a reference on every block it points to. If a btree node points to 150 leaves, then the COW code needs to go and add backrefs on 150 different extents, which might be spread all over the extent allocation tree. These updates currently happen during btrfs_cow_block, and most COWs happen during btrfs_search_slot. btrfs_search_slot has locks held on both the parent and the node we are COWing, and so we really want to avoid IO during the COW if we can. This commit adds an rbtree of pending reference count updates and extent allocations. The tree is ordered by byte number of the extent and byte number of the parent for the back reference. The tree allows us to: 1) Modify back references in something close to disk order, reducing seeks 2) Significantly reduce the number of modifications made as block pointers are balanced around 3) Do all of the extent insertion and back reference modifications outside of the performance critical btrfs_search_slot code. #3 has the added benefit of greatly reducing the btrfs stack footprint. The extent allocation tree modifications are done without the deep (and somewhat recursive) call chains used in the past. These delayed back reference updates must be done before the transaction commits, and so the rbtree is tied to the transaction. Throttling is implemented to help keep the queue of backrefs at a reasonable size. Since there was a similar mechanism in place for the extent tree extents, that is removed and replaced by the delayed reference tree. Yan Zheng <yan.zheng@oracle.com> helped review and fixup this code. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/delayed-ref.h')
-rw-r--r--fs/btrfs/delayed-ref.h182
1 files changed, 182 insertions, 0 deletions
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
new file mode 100644
index 000000000000..37919e5c007f
--- /dev/null
+++ b/fs/btrfs/delayed-ref.h
@@ -0,0 +1,182 @@
1/*
2 * Copyright (C) 2008 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18#ifndef __DELAYED_REF__
19#define __DELAYED_REF__
20
21/* these are the possible values of struct btrfs_delayed_ref->action */
22#define BTRFS_ADD_DELAYED_REF 1 /* add one backref to the tree */
23#define BTRFS_DROP_DELAYED_REF 2 /* delete one backref from the tree */
24#define BTRFS_ADD_DELAYED_EXTENT 3 /* record a full extent allocation */
25
26struct btrfs_delayed_ref_node {
27 struct rb_node rb_node;
28
29 /* the starting bytenr of the extent */
30 u64 bytenr;
31
32 /* the parent our backref will point to */
33 u64 parent;
34
35 /* the size of the extent */
36 u64 num_bytes;
37
38 /* ref count on this data structure */
39 atomic_t refs;
40
41 /*
42 * how many refs is this entry adding or deleting. For
43 * head refs, this may be a negative number because it is keeping
44 * track of the total mods done to the reference count.
45 * For individual refs, this will always be a positive number
46 *
47 * It may be more than one, since it is possible for a single
48 * parent to have more than one ref on an extent
49 */
50 int ref_mod;
51
52 /* is this node still in the rbtree? */
53 unsigned int in_tree:1;
54};
55
56/*
57 * the head refs are used to hold a lock on a given extent, which allows us
58 * to make sure that only one process is running the delayed refs
59 * at a time for a single extent. They also store the sum of all the
60 * reference count modifications we've queued up.
61 */
62struct btrfs_delayed_ref_head {
63 struct btrfs_delayed_ref_node node;
64
65 /*
66 * the mutex is held while running the refs, and it is also
67 * held when checking the sum of reference modifications.
68 */
69 struct mutex mutex;
70
71 /*
72 * when a new extent is allocated, it is just reserved in memory
73 * The actual extent isn't inserted into the extent allocation tree
74 * until the delayed ref is processed. must_insert_reserved is
75 * used to flag a delayed ref so the accounting can be updated
76 * when a full insert is done.
77 *
78 * It is possible the extent will be freed before it is ever
79 * inserted into the extent allocation tree. In this case
80 * we need to update the in ram accounting to properly reflect
81 * the free has happened.
82 */
83 unsigned int must_insert_reserved:1;
84};
85
86struct btrfs_delayed_ref {
87 struct btrfs_delayed_ref_node node;
88
89 /* the root objectid our ref will point to */
90 u64 root;
91
92 /* the generation for the backref */
93 u64 generation;
94
95 /* owner_objectid of the backref */
96 u64 owner_objectid;
97
98 /* operation done by this entry in the rbtree */
99 u8 action;
100
101 /* if pin == 1, when the extent is freed it will be pinned until
102 * transaction commit
103 */
104 unsigned int pin:1;
105};
106
107struct btrfs_delayed_ref_root {
108 struct rb_root root;
109
110 /* this spin lock protects the rbtree and the entries inside */
111 spinlock_t lock;
112
113 /* how many delayed ref updates we've queued, used by the
114 * throttling code
115 */
116 unsigned long num_entries;
117
118 /*
119 * set when the tree is flushing before a transaction commit,
120 * used by the throttling code to decide if new updates need
121 * to be run right away
122 */
123 int flushing;
124};
125
126static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
127{
128 WARN_ON(atomic_read(&ref->refs) == 0);
129 if (atomic_dec_and_test(&ref->refs)) {
130 WARN_ON(ref->in_tree);
131 kfree(ref);
132 }
133}
134
135int btrfs_add_delayed_ref(struct btrfs_trans_handle *trans,
136 u64 bytenr, u64 num_bytes, u64 parent, u64 ref_root,
137 u64 ref_generation, u64 owner_objectid, int action,
138 int pin);
139
140struct btrfs_delayed_ref *
141btrfs_find_delayed_ref(struct btrfs_trans_handle *trans, u64 bytenr,
142 u64 parent);
143int btrfs_delayed_ref_pending(struct btrfs_trans_handle *trans, u64 bytenr);
144int btrfs_lock_delayed_ref(struct btrfs_trans_handle *trans,
145 struct btrfs_delayed_ref_node *ref,
146 struct btrfs_delayed_ref_head **next_ret);
147int btrfs_lookup_extent_ref(struct btrfs_trans_handle *trans,
148 struct btrfs_root *root, u64 bytenr,
149 u64 num_bytes, u32 *refs);
150int btrfs_update_delayed_ref(struct btrfs_trans_handle *trans,
151 u64 bytenr, u64 num_bytes, u64 orig_parent,
152 u64 parent, u64 orig_ref_root, u64 ref_root,
153 u64 orig_ref_generation, u64 ref_generation,
154 u64 owner_objectid, int pin);
155/*
156 * a node might live in a head or a regular ref, this lets you
157 * test for the proper type to use.
158 */
159static int btrfs_delayed_ref_is_head(struct btrfs_delayed_ref_node *node)
160{
161 return node->parent == (u64)-1;
162}
163
164/*
165 * helper functions to cast a node into its container
166 */
167static inline struct btrfs_delayed_ref *
168btrfs_delayed_node_to_ref(struct btrfs_delayed_ref_node *node)
169{
170 WARN_ON(btrfs_delayed_ref_is_head(node));
171 return container_of(node, struct btrfs_delayed_ref, node);
172
173}
174
175static inline struct btrfs_delayed_ref_head *
176btrfs_delayed_node_to_head(struct btrfs_delayed_ref_node *node)
177{
178 WARN_ON(!btrfs_delayed_ref_is_head(node));
179 return container_of(node, struct btrfs_delayed_ref_head, node);
180
181}
182#endif