aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs
diff options
context:
space:
mode:
authorYan Zheng <zheng.yan@oracle.com>2009-07-22 10:07:05 -0400
committerChris Mason <chris.mason@oracle.com>2009-07-22 10:07:05 -0400
commit4a8c9a62d7f7f058eed4b8a6f2c890a887778093 (patch)
treed3d099197e5ddc94f737dbad273810b902b05307 /fs/btrfs
parent33c66f430bfa3a033e70470e4c93f967156b696d (diff)
Btrfs: make sure all dirty blocks are written at commit time
Write dirty block groups may allocate new block, and so may add new delayed back ref. btrfs_run_delayed_refs may make some block groups dirty. commit_cowonly_roots does not handle the recursion properly, and some dirty blocks can be left unwritten at commit time. This patch moves btrfs_run_delayed_refs into the loop that writes dirty block groups, and makes the code not break out of the loop until there are no dirty block groups or delayed back refs. Signed-off-by: Yan Zheng <zheng.yan@oracle.com> Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/extent-tree.c70
-rw-r--r--fs/btrfs/transaction.c9
2 files changed, 42 insertions, 37 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index a5aca3997d42..62a332d34fdb 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2387,13 +2387,29 @@ fail:
2387 2387
2388} 2388}
2389 2389
2390static struct btrfs_block_group_cache *
2391next_block_group(struct btrfs_root *root,
2392 struct btrfs_block_group_cache *cache)
2393{
2394 struct rb_node *node;
2395 spin_lock(&root->fs_info->block_group_cache_lock);
2396 node = rb_next(&cache->cache_node);
2397 btrfs_put_block_group(cache);
2398 if (node) {
2399 cache = rb_entry(node, struct btrfs_block_group_cache,
2400 cache_node);
2401 atomic_inc(&cache->count);
2402 } else
2403 cache = NULL;
2404 spin_unlock(&root->fs_info->block_group_cache_lock);
2405 return cache;
2406}
2407
2390int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, 2408int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
2391 struct btrfs_root *root) 2409 struct btrfs_root *root)
2392{ 2410{
2393 struct btrfs_block_group_cache *cache, *entry; 2411 struct btrfs_block_group_cache *cache;
2394 struct rb_node *n;
2395 int err = 0; 2412 int err = 0;
2396 int werr = 0;
2397 struct btrfs_path *path; 2413 struct btrfs_path *path;
2398 u64 last = 0; 2414 u64 last = 0;
2399 2415
@@ -2402,39 +2418,35 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
2402 return -ENOMEM; 2418 return -ENOMEM;
2403 2419
2404 while (1) { 2420 while (1) {
2405 cache = NULL; 2421 if (last == 0) {
2406 spin_lock(&root->fs_info->block_group_cache_lock); 2422 err = btrfs_run_delayed_refs(trans, root,
2407 for (n = rb_first(&root->fs_info->block_group_cache_tree); 2423 (unsigned long)-1);
2408 n; n = rb_next(n)) { 2424 BUG_ON(err);
2409 entry = rb_entry(n, struct btrfs_block_group_cache,
2410 cache_node);
2411 if (entry->dirty) {
2412 cache = entry;
2413 break;
2414 }
2415 } 2425 }
2416 spin_unlock(&root->fs_info->block_group_cache_lock);
2417 2426
2418 if (!cache) 2427 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2419 break; 2428 while (cache) {
2429 if (cache->dirty)
2430 break;
2431 cache = next_block_group(root, cache);
2432 }
2433 if (!cache) {
2434 if (last == 0)
2435 break;
2436 last = 0;
2437 continue;
2438 }
2420 2439
2421 cache->dirty = 0; 2440 cache->dirty = 0;
2422 last += cache->key.offset; 2441 last = cache->key.objectid + cache->key.offset;
2423 2442
2424 err = write_one_cache_group(trans, root, 2443 err = write_one_cache_group(trans, root, path, cache);
2425 path, cache); 2444 BUG_ON(err);
2426 /* 2445 btrfs_put_block_group(cache);
2427 * if we fail to write the cache group, we want
2428 * to keep it marked dirty in hopes that a later
2429 * write will work
2430 */
2431 if (err) {
2432 werr = err;
2433 continue;
2434 }
2435 } 2446 }
2447
2436 btrfs_free_path(path); 2448 btrfs_free_path(path);
2437 return werr; 2449 return 0;
2438} 2450}
2439 2451
2440int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr) 2452int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 2dbf1c1f56ee..81f7124c3051 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -444,9 +444,6 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans,
444 444
445 btrfs_write_dirty_block_groups(trans, root); 445 btrfs_write_dirty_block_groups(trans, root);
446 446
447 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
448 BUG_ON(ret);
449
450 while (1) { 447 while (1) {
451 old_root_bytenr = btrfs_root_bytenr(&root->root_item); 448 old_root_bytenr = btrfs_root_bytenr(&root->root_item);
452 if (old_root_bytenr == root->node->start) 449 if (old_root_bytenr == root->node->start)
@@ -457,9 +454,8 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans,
457 &root->root_key, 454 &root->root_key,
458 &root->root_item); 455 &root->root_item);
459 BUG_ON(ret); 456 BUG_ON(ret);
460 btrfs_write_dirty_block_groups(trans, root);
461 457
462 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 458 ret = btrfs_write_dirty_block_groups(trans, root);
463 BUG_ON(ret); 459 BUG_ON(ret);
464 } 460 }
465 free_extent_buffer(root->commit_root); 461 free_extent_buffer(root->commit_root);
@@ -495,9 +491,6 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
495 root = list_entry(next, struct btrfs_root, dirty_list); 491 root = list_entry(next, struct btrfs_root, dirty_list);
496 492
497 update_cowonly_root(trans, root); 493 update_cowonly_root(trans, root);
498
499 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
500 BUG_ON(ret);
501 } 494 }
502 return 0; 495 return 0;
503} 496}