diff options
author | Li Zefan <lizf@cn.fujitsu.com> | 2011-04-19 22:33:24 -0400 |
---|---|---|
committer | Li Zefan <lizf@cn.fujitsu.com> | 2011-04-25 04:46:11 -0400 |
commit | 82d5902d9c681be37ffa9d70482907f9f0b7ec1f (patch) | |
tree | c9c99f0b60004ac14d09d277d3216667df09c32d /fs/btrfs/inode-map.c | |
parent | 33345d01522f8152f99dc84a3e7a1a45707f387f (diff) |
Btrfs: Support reading/writing on disk free ino cache
This is similar to block group caching.
We dedicate a special inode in fs tree to save free ino cache.
At the very first time we create/delete a file after mount, the free ino
cache will be loaded from disk into memory. When the fs tree is commited,
the cache will be written back to disk.
To keep compatibility, we check the root generation against the generation
of the special inode when loading the cache, so the loading will fail
if the btrfs filesystem was mounted in an older kernel before.
Signed-off-by: Li Zefan <lizf@cn.fujitsu.com>
Diffstat (limited to 'fs/btrfs/inode-map.c')
-rw-r--r-- | fs/btrfs/inode-map.c | 87 |
1 files changed, 87 insertions, 0 deletions
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c index 5be62df90c4f..7967e85c72f5 100644 --- a/fs/btrfs/inode-map.c +++ b/fs/btrfs/inode-map.c | |||
@@ -137,6 +137,7 @@ out: | |||
137 | static void start_caching(struct btrfs_root *root) | 137 | static void start_caching(struct btrfs_root *root) |
138 | { | 138 | { |
139 | struct task_struct *tsk; | 139 | struct task_struct *tsk; |
140 | int ret; | ||
140 | 141 | ||
141 | spin_lock(&root->cache_lock); | 142 | spin_lock(&root->cache_lock); |
142 | if (root->cached != BTRFS_CACHE_NO) { | 143 | if (root->cached != BTRFS_CACHE_NO) { |
@@ -147,6 +148,14 @@ static void start_caching(struct btrfs_root *root) | |||
147 | root->cached = BTRFS_CACHE_STARTED; | 148 | root->cached = BTRFS_CACHE_STARTED; |
148 | spin_unlock(&root->cache_lock); | 149 | spin_unlock(&root->cache_lock); |
149 | 150 | ||
151 | ret = load_free_ino_cache(root->fs_info, root); | ||
152 | if (ret == 1) { | ||
153 | spin_lock(&root->cache_lock); | ||
154 | root->cached = BTRFS_CACHE_FINISHED; | ||
155 | spin_unlock(&root->cache_lock); | ||
156 | return; | ||
157 | } | ||
158 | |||
150 | tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu\n", | 159 | tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu\n", |
151 | root->root_key.objectid); | 160 | root->root_key.objectid); |
152 | BUG_ON(IS_ERR(tsk)); | 161 | BUG_ON(IS_ERR(tsk)); |
@@ -352,6 +361,84 @@ void btrfs_init_free_ino_ctl(struct btrfs_root *root) | |||
352 | pinned->op = &pinned_free_ino_op; | 361 | pinned->op = &pinned_free_ino_op; |
353 | } | 362 | } |
354 | 363 | ||
364 | int btrfs_save_ino_cache(struct btrfs_root *root, | ||
365 | struct btrfs_trans_handle *trans) | ||
366 | { | ||
367 | struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; | ||
368 | struct btrfs_path *path; | ||
369 | struct inode *inode; | ||
370 | u64 alloc_hint = 0; | ||
371 | int ret; | ||
372 | int prealloc; | ||
373 | bool retry = false; | ||
374 | |||
375 | path = btrfs_alloc_path(); | ||
376 | if (!path) | ||
377 | return -ENOMEM; | ||
378 | again: | ||
379 | inode = lookup_free_ino_inode(root, path); | ||
380 | if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { | ||
381 | ret = PTR_ERR(inode); | ||
382 | goto out; | ||
383 | } | ||
384 | |||
385 | if (IS_ERR(inode)) { | ||
386 | BUG_ON(retry); | ||
387 | retry = true; | ||
388 | |||
389 | ret = create_free_ino_inode(root, trans, path); | ||
390 | if (ret) | ||
391 | goto out; | ||
392 | goto again; | ||
393 | } | ||
394 | |||
395 | BTRFS_I(inode)->generation = 0; | ||
396 | ret = btrfs_update_inode(trans, root, inode); | ||
397 | WARN_ON(ret); | ||
398 | |||
399 | if (i_size_read(inode) > 0) { | ||
400 | ret = btrfs_truncate_free_space_cache(root, trans, path, inode); | ||
401 | if (ret) | ||
402 | goto out_put; | ||
403 | } | ||
404 | |||
405 | spin_lock(&root->cache_lock); | ||
406 | if (root->cached != BTRFS_CACHE_FINISHED) { | ||
407 | ret = -1; | ||
408 | spin_unlock(&root->cache_lock); | ||
409 | goto out_put; | ||
410 | } | ||
411 | spin_unlock(&root->cache_lock); | ||
412 | |||
413 | spin_lock(&ctl->tree_lock); | ||
414 | prealloc = sizeof(struct btrfs_free_space) * ctl->free_extents; | ||
415 | prealloc = ALIGN(prealloc, PAGE_CACHE_SIZE); | ||
416 | prealloc += ctl->total_bitmaps * PAGE_CACHE_SIZE; | ||
417 | spin_unlock(&ctl->tree_lock); | ||
418 | |||
419 | /* Just to make sure we have enough space */ | ||
420 | prealloc += 8 * PAGE_CACHE_SIZE; | ||
421 | |||
422 | ret = btrfs_check_data_free_space(inode, prealloc); | ||
423 | if (ret) | ||
424 | goto out_put; | ||
425 | |||
426 | ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, prealloc, | ||
427 | prealloc, prealloc, &alloc_hint); | ||
428 | if (ret) | ||
429 | goto out_put; | ||
430 | btrfs_free_reserved_data_space(inode, prealloc); | ||
431 | |||
432 | out_put: | ||
433 | iput(inode); | ||
434 | out: | ||
435 | if (ret == 0) | ||
436 | ret = btrfs_write_out_ino_cache(root, trans, path); | ||
437 | |||
438 | btrfs_free_path(path); | ||
439 | return ret; | ||
440 | } | ||
441 | |||
355 | static int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid) | 442 | static int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid) |
356 | { | 443 | { |
357 | struct btrfs_path *path; | 444 | struct btrfs_path *path; |