diff options
Diffstat (limited to 'fs/btrfs/extent-tree.c')
-rw-r--r-- | fs/btrfs/extent-tree.c | 70 |
1 files changed, 41 insertions, 29 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index a5aca3997d42..62a332d34fdb 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -2387,13 +2387,29 @@ fail: | |||
2387 | 2387 | ||
2388 | } | 2388 | } |
2389 | 2389 | ||
2390 | static struct btrfs_block_group_cache * | ||
2391 | next_block_group(struct btrfs_root *root, | ||
2392 | struct btrfs_block_group_cache *cache) | ||
2393 | { | ||
2394 | struct rb_node *node; | ||
2395 | spin_lock(&root->fs_info->block_group_cache_lock); | ||
2396 | node = rb_next(&cache->cache_node); | ||
2397 | btrfs_put_block_group(cache); | ||
2398 | if (node) { | ||
2399 | cache = rb_entry(node, struct btrfs_block_group_cache, | ||
2400 | cache_node); | ||
2401 | atomic_inc(&cache->count); | ||
2402 | } else | ||
2403 | cache = NULL; | ||
2404 | spin_unlock(&root->fs_info->block_group_cache_lock); | ||
2405 | return cache; | ||
2406 | } | ||
2407 | |||
2390 | int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, | 2408 | int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, |
2391 | struct btrfs_root *root) | 2409 | struct btrfs_root *root) |
2392 | { | 2410 | { |
2393 | struct btrfs_block_group_cache *cache, *entry; | 2411 | struct btrfs_block_group_cache *cache; |
2394 | struct rb_node *n; | ||
2395 | int err = 0; | 2412 | int err = 0; |
2396 | int werr = 0; | ||
2397 | struct btrfs_path *path; | 2413 | struct btrfs_path *path; |
2398 | u64 last = 0; | 2414 | u64 last = 0; |
2399 | 2415 | ||
@@ -2402,39 +2418,35 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, | |||
2402 | return -ENOMEM; | 2418 | return -ENOMEM; |
2403 | 2419 | ||
2404 | while (1) { | 2420 | while (1) { |
2405 | cache = NULL; | 2421 | if (last == 0) { |
2406 | spin_lock(&root->fs_info->block_group_cache_lock); | 2422 | err = btrfs_run_delayed_refs(trans, root, |
2407 | for (n = rb_first(&root->fs_info->block_group_cache_tree); | 2423 | (unsigned long)-1); |
2408 | n; n = rb_next(n)) { | 2424 | BUG_ON(err); |
2409 | entry = rb_entry(n, struct btrfs_block_group_cache, | ||
2410 | cache_node); | ||
2411 | if (entry->dirty) { | ||
2412 | cache = entry; | ||
2413 | break; | ||
2414 | } | ||
2415 | } | 2425 | } |
2416 | spin_unlock(&root->fs_info->block_group_cache_lock); | ||
2417 | 2426 | ||
2418 | if (!cache) | 2427 | cache = btrfs_lookup_first_block_group(root->fs_info, last); |
2419 | break; | 2428 | while (cache) { |
2429 | if (cache->dirty) | ||
2430 | break; | ||
2431 | cache = next_block_group(root, cache); | ||
2432 | } | ||
2433 | if (!cache) { | ||
2434 | if (last == 0) | ||
2435 | break; | ||
2436 | last = 0; | ||
2437 | continue; | ||
2438 | } | ||
2420 | 2439 | ||
2421 | cache->dirty = 0; | 2440 | cache->dirty = 0; |
2422 | last += cache->key.offset; | 2441 | last = cache->key.objectid + cache->key.offset; |
2423 | 2442 | ||
2424 | err = write_one_cache_group(trans, root, | 2443 | err = write_one_cache_group(trans, root, path, cache); |
2425 | path, cache); | 2444 | BUG_ON(err); |
2426 | /* | 2445 | btrfs_put_block_group(cache); |
2427 | * if we fail to write the cache group, we want | ||
2428 | * to keep it marked dirty in hopes that a later | ||
2429 | * write will work | ||
2430 | */ | ||
2431 | if (err) { | ||
2432 | werr = err; | ||
2433 | continue; | ||
2434 | } | ||
2435 | } | 2446 | } |
2447 | |||
2436 | btrfs_free_path(path); | 2448 | btrfs_free_path(path); |
2437 | return werr; | 2449 | return 0; |
2438 | } | 2450 | } |
2439 | 2451 | ||
2440 | int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr) | 2452 | int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr) |