aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/inode-map.c
diff options
context:
space:
mode:
authorLi Zefan <lizf@cn.fujitsu.com>2011-04-19 22:06:11 -0400
committerLi Zefan <lizf@cn.fujitsu.com>2011-04-25 04:46:04 -0400
commit581bb050941b4f220f84d3e5ed6dace3d42dd382 (patch)
tree5ebd56af5eb3612f508419b188dfc18e959e7c94 /fs/btrfs/inode-map.c
parent34d52cb6c50b5a43901709998f59fb1c5a43dc4a (diff)
Btrfs: Cache free inode numbers in memory
Currently btrfs stores the highest objectid of the fs tree, and it always returns (highest+1) inode number when we create a file, so inode numbers won't be reclaimed when we delete files, so we'll run out of inode numbers as we keep create/delete files in 32bits machines. This fixes it, and it works similarly to how we cache free space in block cgroups. We start a kernel thread to read the file tree. By scanning inode items, we know which chunks of inode numbers are free, and we cache them in an rb-tree. Because we are searching the commit root, we have to carefully handle the cross-transaction case. The rb-tree is a hybrid extent+bitmap tree, so if we have too many small chunks of inode numbers, we'll use bitmaps. Initially we allow 16K ram of extents, and a bitmap will be used if we exceed this threshold. The extents threshold is adjusted in runtime. Signed-off-by: Li Zefan <lizf@cn.fujitsu.com>
Diffstat (limited to 'fs/btrfs/inode-map.c')
-rw-r--r--fs/btrfs/inode-map.c341
1 files changed, 336 insertions, 5 deletions
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index c05a08f4c411..5be62df90c4f 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -16,11 +16,343 @@
16 * Boston, MA 021110-1307, USA. 16 * Boston, MA 021110-1307, USA.
17 */ 17 */
18 18
19#include <linux/delay.h>
20#include <linux/kthread.h>
21#include <linux/pagemap.h>
22
19#include "ctree.h" 23#include "ctree.h"
20#include "disk-io.h" 24#include "disk-io.h"
25#include "free-space-cache.h"
26#include "inode-map.h"
21#include "transaction.h" 27#include "transaction.h"
22 28
23int btrfs_find_highest_inode(struct btrfs_root *root, u64 *objectid) 29static int caching_kthread(void *data)
30{
31 struct btrfs_root *root = data;
32 struct btrfs_fs_info *fs_info = root->fs_info;
33 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
34 struct btrfs_key key;
35 struct btrfs_path *path;
36 struct extent_buffer *leaf;
37 u64 last = (u64)-1;
38 int slot;
39 int ret;
40
41 path = btrfs_alloc_path();
42 if (!path)
43 return -ENOMEM;
44
45 /* Since the commit root is read-only, we can safely skip locking. */
46 path->skip_locking = 1;
47 path->search_commit_root = 1;
48 path->reada = 2;
49
50 key.objectid = BTRFS_FIRST_FREE_OBJECTID;
51 key.offset = 0;
52 key.type = BTRFS_INODE_ITEM_KEY;
53again:
54 /* need to make sure the commit_root doesn't disappear */
55 mutex_lock(&root->fs_commit_mutex);
56
57 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
58 if (ret < 0)
59 goto out;
60
61 while (1) {
62 smp_mb();
63 if (fs_info->closing > 1)
64 goto out;
65
66 leaf = path->nodes[0];
67 slot = path->slots[0];
68 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
69 ret = btrfs_next_leaf(root, path);
70 if (ret < 0)
71 goto out;
72 else if (ret > 0)
73 break;
74
75 if (need_resched() ||
76 btrfs_transaction_in_commit(fs_info)) {
77 leaf = path->nodes[0];
78
79 if (btrfs_header_nritems(leaf) == 0) {
80 WARN_ON(1);
81 break;
82 }
83
84 /*
85 * Save the key so we can advances forward
86 * in the next search.
87 */
88 btrfs_item_key_to_cpu(leaf, &key, 0);
89 btrfs_release_path(root, path);
90 root->cache_progress = last;
91 mutex_unlock(&root->fs_commit_mutex);
92 schedule_timeout(1);
93 goto again;
94 } else
95 continue;
96 }
97
98 btrfs_item_key_to_cpu(leaf, &key, slot);
99
100 if (key.type != BTRFS_INODE_ITEM_KEY)
101 goto next;
102
103 if (key.objectid >= BTRFS_LAST_FREE_OBJECTID)
104 break;
105
106 if (last != (u64)-1 && last + 1 != key.objectid) {
107 __btrfs_add_free_space(ctl, last + 1,
108 key.objectid - last - 1);
109 wake_up(&root->cache_wait);
110 }
111
112 last = key.objectid;
113next:
114 path->slots[0]++;
115 }
116
117 if (last < BTRFS_LAST_FREE_OBJECTID - 1) {
118 __btrfs_add_free_space(ctl, last + 1,
119 BTRFS_LAST_FREE_OBJECTID - last - 1);
120 }
121
122 spin_lock(&root->cache_lock);
123 root->cached = BTRFS_CACHE_FINISHED;
124 spin_unlock(&root->cache_lock);
125
126 root->cache_progress = (u64)-1;
127 btrfs_unpin_free_ino(root);
128out:
129 wake_up(&root->cache_wait);
130 mutex_unlock(&root->fs_commit_mutex);
131
132 btrfs_free_path(path);
133
134 return ret;
135}
136
137static void start_caching(struct btrfs_root *root)
138{
139 struct task_struct *tsk;
140
141 spin_lock(&root->cache_lock);
142 if (root->cached != BTRFS_CACHE_NO) {
143 spin_unlock(&root->cache_lock);
144 return;
145 }
146
147 root->cached = BTRFS_CACHE_STARTED;
148 spin_unlock(&root->cache_lock);
149
150 tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu\n",
151 root->root_key.objectid);
152 BUG_ON(IS_ERR(tsk));
153}
154
155int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid)
156{
157again:
158 *objectid = btrfs_find_ino_for_alloc(root);
159
160 if (*objectid != 0)
161 return 0;
162
163 start_caching(root);
164
165 wait_event(root->cache_wait,
166 root->cached == BTRFS_CACHE_FINISHED ||
167 root->free_ino_ctl->free_space > 0);
168
169 if (root->cached == BTRFS_CACHE_FINISHED &&
170 root->free_ino_ctl->free_space == 0)
171 return -ENOSPC;
172 else
173 goto again;
174}
175
176void btrfs_return_ino(struct btrfs_root *root, u64 objectid)
177{
178 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
179 struct btrfs_free_space_ctl *pinned = root->free_ino_pinned;
180again:
181 if (root->cached == BTRFS_CACHE_FINISHED) {
182 __btrfs_add_free_space(ctl, objectid, 1);
183 } else {
184 /*
185 * If we are in the process of caching free ino chunks,
186 * to avoid adding the same inode number to the free_ino
187 * tree twice due to cross transaction, we'll leave it
188 * in the pinned tree until a transaction is committed
189 * or the caching work is done.
190 */
191
192 mutex_lock(&root->fs_commit_mutex);
193 spin_lock(&root->cache_lock);
194 if (root->cached == BTRFS_CACHE_FINISHED) {
195 spin_unlock(&root->cache_lock);
196 mutex_unlock(&root->fs_commit_mutex);
197 goto again;
198 }
199 spin_unlock(&root->cache_lock);
200
201 start_caching(root);
202
203 if (objectid <= root->cache_progress)
204 __btrfs_add_free_space(ctl, objectid, 1);
205 else
206 __btrfs_add_free_space(pinned, objectid, 1);
207
208 mutex_unlock(&root->fs_commit_mutex);
209 }
210}
211
212/*
213 * When a transaction is committed, we'll move those inode numbers which
214 * are smaller than root->cache_progress from pinned tree to free_ino tree,
215 * and others will just be dropped, because the commit root we were
216 * searching has changed.
217 *
218 * Must be called with root->fs_commit_mutex held
219 */
220void btrfs_unpin_free_ino(struct btrfs_root *root)
221{
222 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
223 struct rb_root *rbroot = &root->free_ino_pinned->free_space_offset;
224 struct btrfs_free_space *info;
225 struct rb_node *n;
226 u64 count;
227
228 while (1) {
229 n = rb_first(rbroot);
230 if (!n)
231 break;
232
233 info = rb_entry(n, struct btrfs_free_space, offset_index);
234 BUG_ON(info->bitmap);
235
236 if (info->offset > root->cache_progress)
237 goto free;
238 else if (info->offset + info->bytes > root->cache_progress)
239 count = root->cache_progress - info->offset + 1;
240 else
241 count = info->bytes;
242
243 __btrfs_add_free_space(ctl, info->offset, count);
244free:
245 rb_erase(&info->offset_index, rbroot);
246 kfree(info);
247 }
248}
249
250#define INIT_THRESHOLD (((1024 * 32) / 2) / sizeof(struct btrfs_free_space))
251#define INODES_PER_BITMAP (PAGE_CACHE_SIZE * 8)
252
253/*
254 * The goal is to keep the memory used by the free_ino tree won't
255 * exceed the memory if we use bitmaps only.
256 */
257static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
258{
259 struct btrfs_free_space *info;
260 struct rb_node *n;
261 int max_ino;
262 int max_bitmaps;
263
264 n = rb_last(&ctl->free_space_offset);
265 if (!n) {
266 ctl->extents_thresh = INIT_THRESHOLD;
267 return;
268 }
269 info = rb_entry(n, struct btrfs_free_space, offset_index);
270
271 /*
272 * Find the maximum inode number in the filesystem. Note we
273 * ignore the fact that this can be a bitmap, because we are
274 * not doing precise calculation.
275 */
276 max_ino = info->bytes - 1;
277
278 max_bitmaps = ALIGN(max_ino, INODES_PER_BITMAP) / INODES_PER_BITMAP;
279 if (max_bitmaps <= ctl->total_bitmaps) {
280 ctl->extents_thresh = 0;
281 return;
282 }
283
284 ctl->extents_thresh = (max_bitmaps - ctl->total_bitmaps) *
285 PAGE_CACHE_SIZE / sizeof(*info);
286}
287
288/*
289 * We don't fall back to bitmap, if we are below the extents threshold
290 * or this chunk of inode numbers is a big one.
291 */
292static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
293 struct btrfs_free_space *info)
294{
295 if (ctl->free_extents < ctl->extents_thresh ||
296 info->bytes > INODES_PER_BITMAP / 10)
297 return false;
298
299 return true;
300}
301
302static struct btrfs_free_space_op free_ino_op = {
303 .recalc_thresholds = recalculate_thresholds,
304 .use_bitmap = use_bitmap,
305};
306
307static void pinned_recalc_thresholds(struct btrfs_free_space_ctl *ctl)
308{
309}
310
311static bool pinned_use_bitmap(struct btrfs_free_space_ctl *ctl,
312 struct btrfs_free_space *info)
313{
314 /*
315 * We always use extents for two reasons:
316 *
317 * - The pinned tree is only used during the process of caching
318 * work.
319 * - Make code simpler. See btrfs_unpin_free_ino().
320 */
321 return false;
322}
323
324static struct btrfs_free_space_op pinned_free_ino_op = {
325 .recalc_thresholds = pinned_recalc_thresholds,
326 .use_bitmap = pinned_use_bitmap,
327};
328
329void btrfs_init_free_ino_ctl(struct btrfs_root *root)
330{
331 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
332 struct btrfs_free_space_ctl *pinned = root->free_ino_pinned;
333
334 spin_lock_init(&ctl->tree_lock);
335 ctl->unit = 1;
336 ctl->start = 0;
337 ctl->private = NULL;
338 ctl->op = &free_ino_op;
339
340 /*
341 * Initially we allow to use 16K of ram to cache chunks of
342 * inode numbers before we resort to bitmaps. This is somewhat
343 * arbitrary, but it will be adjusted in runtime.
344 */
345 ctl->extents_thresh = INIT_THRESHOLD;
346
347 spin_lock_init(&pinned->tree_lock);
348 pinned->unit = 1;
349 pinned->start = 0;
350 pinned->private = NULL;
351 pinned->extents_thresh = 0;
352 pinned->op = &pinned_free_ino_op;
353}
354
355static int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid)
24{ 356{
25 struct btrfs_path *path; 357 struct btrfs_path *path;
26 int ret; 358 int ret;
@@ -55,15 +387,14 @@ error:
55 return ret; 387 return ret;
56} 388}
57 389
58int btrfs_find_free_objectid(struct btrfs_trans_handle *trans, 390int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid)
59 struct btrfs_root *root,
60 u64 dirid, u64 *objectid)
61{ 391{
62 int ret; 392 int ret;
63 mutex_lock(&root->objectid_mutex); 393 mutex_lock(&root->objectid_mutex);
64 394
65 if (unlikely(root->highest_objectid < BTRFS_FIRST_FREE_OBJECTID)) { 395 if (unlikely(root->highest_objectid < BTRFS_FIRST_FREE_OBJECTID)) {
66 ret = btrfs_find_highest_inode(root, &root->highest_objectid); 396 ret = btrfs_find_highest_objectid(root,
397 &root->highest_objectid);
67 if (ret) 398 if (ret)
68 goto out; 399 goto out;
69 } 400 }