aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorLiu Bo <bo.li.liu@oracle.com>2013-04-26 22:56:57 -0400
committerJosef Bacik <jbacik@fusionio.com>2013-05-06 15:55:26 -0400
commit625f1c8dc66d77878e1a563d6dd5722404968fbf (patch)
tree76277ca036ae9910b5ec94a5263907c4c5ffd48b /fs
parent55793c0d0381176e727389325d9a47f7f0b5387f (diff)
Btrfs: improve the loop of scrub_stripe
1) Right now scrub_stripe() is looping in some unnecessary cases: * when the found extent item's objectid has been out of the dev extent's range but we haven't finish scanning all the range within the dev extent * when all the items has been processed but we haven't finish scanning all the range within the dev extent In both cases, we can just finish the loop to save costs. 2) Besides, when the found extent item's length is larger than the stripe len(64k), we don't have to release the path and search again as it'll get at the same key used in the last loop, we can instead increase the logical cursor in place till all space of the extent is scanned. 3) And we use 0 as the key's offset to search btree, then get to previous item to find a smaller item, and again have to move to the next one to get the right item. Setting offset=-1 and previous_item() is the correct way. 4) As we won't find any checksum at offset unless this 'offset' is in a data extent, we can just find checksum when we're really going to scrub an extent. Signed-off-by: Liu Bo <bo.li.liu@oracle.com> Signed-off-by: Josef Bacik <jbacik@fusionio.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/scrub.c83
1 files changed, 57 insertions, 26 deletions
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 47500c25262e..f489e24659a4 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -2233,12 +2233,12 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
2233 u64 flags; 2233 u64 flags;
2234 int ret; 2234 int ret;
2235 int slot; 2235 int slot;
2236 int i;
2237 u64 nstripes; 2236 u64 nstripes;
2238 struct extent_buffer *l; 2237 struct extent_buffer *l;
2239 struct btrfs_key key; 2238 struct btrfs_key key;
2240 u64 physical; 2239 u64 physical;
2241 u64 logical; 2240 u64 logical;
2241 u64 logic_end;
2242 u64 generation; 2242 u64 generation;
2243 int mirror_num; 2243 int mirror_num;
2244 struct reada_control *reada1; 2244 struct reada_control *reada1;
@@ -2252,6 +2252,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
2252 u64 extent_len; 2252 u64 extent_len;
2253 struct btrfs_device *extent_dev; 2253 struct btrfs_device *extent_dev;
2254 int extent_mirror_num; 2254 int extent_mirror_num;
2255 int stop_loop;
2255 2256
2256 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | 2257 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
2257 BTRFS_BLOCK_GROUP_RAID6)) { 2258 BTRFS_BLOCK_GROUP_RAID6)) {
@@ -2351,8 +2352,9 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
2351 */ 2352 */
2352 logical = base + offset; 2353 logical = base + offset;
2353 physical = map->stripes[num].physical; 2354 physical = map->stripes[num].physical;
2355 logic_end = logical + increment * nstripes;
2354 ret = 0; 2356 ret = 0;
2355 for (i = 0; i < nstripes; ++i) { 2357 while (logical < logic_end) {
2356 /* 2358 /*
2357 * canceled? 2359 * canceled?
2358 */ 2360 */
@@ -2388,15 +2390,9 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
2388 wake_up(&fs_info->scrub_pause_wait); 2390 wake_up(&fs_info->scrub_pause_wait);
2389 } 2391 }
2390 2392
2391 ret = btrfs_lookup_csums_range(csum_root, logical,
2392 logical + map->stripe_len - 1,
2393 &sctx->csum_list, 1);
2394 if (ret)
2395 goto out;
2396
2397 key.objectid = logical; 2393 key.objectid = logical;
2398 key.type = BTRFS_EXTENT_ITEM_KEY; 2394 key.type = BTRFS_EXTENT_ITEM_KEY;
2399 key.offset = (u64)0; 2395 key.offset = (u64)-1;
2400 2396
2401 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 2397 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2402 if (ret < 0) 2398 if (ret < 0)
@@ -2418,6 +2414,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
2418 } 2414 }
2419 } 2415 }
2420 2416
2417 stop_loop = 0;
2421 while (1) { 2418 while (1) {
2422 u64 bytes; 2419 u64 bytes;
2423 2420
@@ -2430,14 +2427,11 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
2430 if (ret < 0) 2427 if (ret < 0)
2431 goto out; 2428 goto out;
2432 2429
2430 stop_loop = 1;
2433 break; 2431 break;
2434 } 2432 }
2435 btrfs_item_key_to_cpu(l, &key, slot); 2433 btrfs_item_key_to_cpu(l, &key, slot);
2436 2434
2437 if (key.type != BTRFS_EXTENT_ITEM_KEY &&
2438 key.type != BTRFS_METADATA_ITEM_KEY)
2439 goto next;
2440
2441 if (key.type == BTRFS_METADATA_ITEM_KEY) 2435 if (key.type == BTRFS_METADATA_ITEM_KEY)
2442 bytes = root->leafsize; 2436 bytes = root->leafsize;
2443 else 2437 else
@@ -2446,9 +2440,16 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
2446 if (key.objectid + bytes <= logical) 2440 if (key.objectid + bytes <= logical)
2447 goto next; 2441 goto next;
2448 2442
2449 if (key.objectid >= logical + map->stripe_len) 2443 if (key.type != BTRFS_EXTENT_ITEM_KEY &&
2450 break; 2444 key.type != BTRFS_METADATA_ITEM_KEY)
2445 goto next;
2451 2446
2447 if (key.objectid >= logical + map->stripe_len) {
2448 /* out of this device extent */
2449 if (key.objectid >= logic_end)
2450 stop_loop = 1;
2451 break;
2452 }
2452 2453
2453 extent = btrfs_item_ptr(l, slot, 2454 extent = btrfs_item_ptr(l, slot,
2454 struct btrfs_extent_item); 2455 struct btrfs_extent_item);
@@ -2465,22 +2466,24 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
2465 goto next; 2466 goto next;
2466 } 2467 }
2467 2468
2469again:
2470 extent_logical = key.objectid;
2471 extent_len = bytes;
2472
2468 /* 2473 /*
2469 * trim extent to this stripe 2474 * trim extent to this stripe
2470 */ 2475 */
2471 if (key.objectid < logical) { 2476 if (extent_logical < logical) {
2472 bytes -= logical - key.objectid; 2477 extent_len -= logical - extent_logical;
2473 key.objectid = logical; 2478 extent_logical = logical;
2474 } 2479 }
2475 if (key.objectid + bytes > 2480 if (extent_logical + extent_len >
2476 logical + map->stripe_len) { 2481 logical + map->stripe_len) {
2477 bytes = logical + map->stripe_len - 2482 extent_len = logical + map->stripe_len -
2478 key.objectid; 2483 extent_logical;
2479 } 2484 }
2480 2485
2481 extent_logical = key.objectid; 2486 extent_physical = extent_logical - logical + physical;
2482 extent_physical = key.objectid - logical + physical;
2483 extent_len = bytes;
2484 extent_dev = scrub_dev; 2487 extent_dev = scrub_dev;
2485 extent_mirror_num = mirror_num; 2488 extent_mirror_num = mirror_num;
2486 if (is_dev_replace) 2489 if (is_dev_replace)
@@ -2488,13 +2491,35 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
2488 extent_len, &extent_physical, 2491 extent_len, &extent_physical,
2489 &extent_dev, 2492 &extent_dev,
2490 &extent_mirror_num); 2493 &extent_mirror_num);
2494
2495 ret = btrfs_lookup_csums_range(csum_root, logical,
2496 logical + map->stripe_len - 1,
2497 &sctx->csum_list, 1);
2498 if (ret)
2499 goto out;
2500
2491 ret = scrub_extent(sctx, extent_logical, extent_len, 2501 ret = scrub_extent(sctx, extent_logical, extent_len,
2492 extent_physical, extent_dev, flags, 2502 extent_physical, extent_dev, flags,
2493 generation, extent_mirror_num, 2503 generation, extent_mirror_num,
2494 key.objectid - logical + physical); 2504 extent_physical);
2495 if (ret) 2505 if (ret)
2496 goto out; 2506 goto out;
2497 2507
2508 if (extent_logical + extent_len <
2509 key.objectid + bytes) {
2510 logical += increment;
2511 physical += map->stripe_len;
2512
2513 if (logical < key.objectid + bytes) {
2514 cond_resched();
2515 goto again;
2516 }
2517
2518 if (logical >= logic_end) {
2519 stop_loop = 1;
2520 break;
2521 }
2522 }
2498next: 2523next:
2499 path->slots[0]++; 2524 path->slots[0]++;
2500 } 2525 }
@@ -2502,8 +2527,14 @@ next:
2502 logical += increment; 2527 logical += increment;
2503 physical += map->stripe_len; 2528 physical += map->stripe_len;
2504 spin_lock(&sctx->stat_lock); 2529 spin_lock(&sctx->stat_lock);
2505 sctx->stat.last_physical = physical; 2530 if (stop_loop)
2531 sctx->stat.last_physical = map->stripes[num].physical +
2532 length;
2533 else
2534 sctx->stat.last_physical = physical;
2506 spin_unlock(&sctx->stat_lock); 2535 spin_unlock(&sctx->stat_lock);
2536 if (stop_loop)
2537 break;
2507 } 2538 }
2508out: 2539out:
2509 /* push queued extents */ 2540 /* push queued extents */