diff options
author | Chris Mason <chris.mason@oracle.com> | 2012-01-16 15:25:42 -0500 |
---|---|---|
committer | Chris Mason <chris.mason@oracle.com> | 2012-01-16 15:25:42 -0500 |
commit | 64e05503ab5c73b2ffb8d55d2f7aab74f34fc691 (patch) | |
tree | a754d3790b8dd59ecaed54095c6e09e04f0d1dd2 | |
parent | 805a6af8dba5dfdd35ec35dc52ec0122400b2610 (diff) | |
parent | 1bb91902dc90e25449893e693ad45605cb08fbe5 (diff) |
Merge branch 'allocation-fixes' into integration
-rw-r--r-- | fs/btrfs/extent-tree.c | 72 | ||||
-rw-r--r-- | fs/btrfs/free-space-cache.c | 112 | ||||
-rw-r--r-- | fs/btrfs/transaction.c | 9 | ||||
-rw-r--r-- | fs/btrfs/volumes.c | 6 |
4 files changed, 91 insertions, 108 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index f5fbe576d2ba..37594e4bf660 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -2267,9 +2267,7 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans, | |||
2267 | BUG_ON(ret); | 2267 | BUG_ON(ret); |
2268 | kfree(extent_op); | 2268 | kfree(extent_op); |
2269 | 2269 | ||
2270 | cond_resched(); | 2270 | goto next; |
2271 | spin_lock(&delayed_refs->lock); | ||
2272 | continue; | ||
2273 | } | 2271 | } |
2274 | 2272 | ||
2275 | list_del_init(&locked_ref->cluster); | 2273 | list_del_init(&locked_ref->cluster); |
@@ -2289,7 +2287,11 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans, | |||
2289 | btrfs_put_delayed_ref(ref); | 2287 | btrfs_put_delayed_ref(ref); |
2290 | kfree(extent_op); | 2288 | kfree(extent_op); |
2291 | count++; | 2289 | count++; |
2292 | 2290 | next: | |
2291 | do_chunk_alloc(trans, root->fs_info->extent_root, | ||
2292 | 2 * 1024 * 1024, | ||
2293 | btrfs_get_alloc_profile(root, 0), | ||
2294 | CHUNK_ALLOC_NO_FORCE); | ||
2293 | cond_resched(); | 2295 | cond_resched(); |
2294 | spin_lock(&delayed_refs->lock); | 2296 | spin_lock(&delayed_refs->lock); |
2295 | } | 2297 | } |
@@ -2317,6 +2319,10 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, | |||
2317 | if (root == root->fs_info->extent_root) | 2319 | if (root == root->fs_info->extent_root) |
2318 | root = root->fs_info->tree_root; | 2320 | root = root->fs_info->tree_root; |
2319 | 2321 | ||
2322 | do_chunk_alloc(trans, root->fs_info->extent_root, | ||
2323 | 2 * 1024 * 1024, btrfs_get_alloc_profile(root, 0), | ||
2324 | CHUNK_ALLOC_NO_FORCE); | ||
2325 | |||
2320 | delayed_refs = &trans->transaction->delayed_refs; | 2326 | delayed_refs = &trans->transaction->delayed_refs; |
2321 | INIT_LIST_HEAD(&cluster); | 2327 | INIT_LIST_HEAD(&cluster); |
2322 | again: | 2328 | again: |
@@ -3257,27 +3263,12 @@ static int should_alloc_chunk(struct btrfs_root *root, | |||
3257 | if (num_bytes - num_allocated < thresh) | 3263 | if (num_bytes - num_allocated < thresh) |
3258 | return 1; | 3264 | return 1; |
3259 | } | 3265 | } |
3260 | |||
3261 | /* | ||
3262 | * we have two similar checks here, one based on percentage | ||
3263 | * and once based on a hard number of 256MB. The idea | ||
3264 | * is that if we have a good amount of free | ||
3265 | * room, don't allocate a chunk. A good mount is | ||
3266 | * less than 80% utilized of the chunks we have allocated, | ||
3267 | * or more than 256MB free | ||
3268 | */ | ||
3269 | if (num_allocated + alloc_bytes + 256 * 1024 * 1024 < num_bytes) | ||
3270 | return 0; | ||
3271 | |||
3272 | if (num_allocated + alloc_bytes < div_factor(num_bytes, 8)) | ||
3273 | return 0; | ||
3274 | |||
3275 | thresh = btrfs_super_total_bytes(root->fs_info->super_copy); | 3266 | thresh = btrfs_super_total_bytes(root->fs_info->super_copy); |
3276 | 3267 | ||
3277 | /* 256MB or 5% of the FS */ | 3268 | /* 256MB or 2% of the FS */ |
3278 | thresh = max_t(u64, 256 * 1024 * 1024, div_factor_fine(thresh, 5)); | 3269 | thresh = max_t(u64, 256 * 1024 * 1024, div_factor_fine(thresh, 2)); |
3279 | 3270 | ||
3280 | if (num_bytes > thresh && sinfo->bytes_used < div_factor(num_bytes, 3)) | 3271 | if (num_bytes > thresh && sinfo->bytes_used < div_factor(num_bytes, 8)) |
3281 | return 0; | 3272 | return 0; |
3282 | return 1; | 3273 | return 1; |
3283 | } | 3274 | } |
@@ -5295,15 +5286,6 @@ alloc: | |||
5295 | if (unlikely(block_group->ro)) | 5286 | if (unlikely(block_group->ro)) |
5296 | goto loop; | 5287 | goto loop; |
5297 | 5288 | ||
5298 | spin_lock(&block_group->free_space_ctl->tree_lock); | ||
5299 | if (cached && | ||
5300 | block_group->free_space_ctl->free_space < | ||
5301 | num_bytes + empty_cluster + empty_size) { | ||
5302 | spin_unlock(&block_group->free_space_ctl->tree_lock); | ||
5303 | goto loop; | ||
5304 | } | ||
5305 | spin_unlock(&block_group->free_space_ctl->tree_lock); | ||
5306 | |||
5307 | /* | 5289 | /* |
5308 | * Ok we want to try and use the cluster allocator, so | 5290 | * Ok we want to try and use the cluster allocator, so |
5309 | * lets look there | 5291 | * lets look there |
@@ -5349,8 +5331,15 @@ refill_cluster: | |||
5349 | * plenty of times and not have found | 5331 | * plenty of times and not have found |
5350 | * anything, so we are likely way too | 5332 | * anything, so we are likely way too |
5351 | * fragmented for the clustering stuff to find | 5333 | * fragmented for the clustering stuff to find |
5352 | * anything. */ | 5334 | * anything. |
5353 | if (loop >= LOOP_NO_EMPTY_SIZE) { | 5335 | * |
5336 | * However, if the cluster is taken from the | ||
5337 | * current block group, release the cluster | ||
5338 | * first, so that we stand a better chance of | ||
5339 | * succeeding in the unclustered | ||
5340 | * allocation. */ | ||
5341 | if (loop >= LOOP_NO_EMPTY_SIZE && | ||
5342 | last_ptr->block_group != block_group) { | ||
5354 | spin_unlock(&last_ptr->refill_lock); | 5343 | spin_unlock(&last_ptr->refill_lock); |
5355 | goto unclustered_alloc; | 5344 | goto unclustered_alloc; |
5356 | } | 5345 | } |
@@ -5361,6 +5350,11 @@ refill_cluster: | |||
5361 | */ | 5350 | */ |
5362 | btrfs_return_cluster_to_free_space(NULL, last_ptr); | 5351 | btrfs_return_cluster_to_free_space(NULL, last_ptr); |
5363 | 5352 | ||
5353 | if (loop >= LOOP_NO_EMPTY_SIZE) { | ||
5354 | spin_unlock(&last_ptr->refill_lock); | ||
5355 | goto unclustered_alloc; | ||
5356 | } | ||
5357 | |||
5364 | /* allocate a cluster in this block group */ | 5358 | /* allocate a cluster in this block group */ |
5365 | ret = btrfs_find_space_cluster(trans, root, | 5359 | ret = btrfs_find_space_cluster(trans, root, |
5366 | block_group, last_ptr, | 5360 | block_group, last_ptr, |
@@ -5401,6 +5395,15 @@ refill_cluster: | |||
5401 | } | 5395 | } |
5402 | 5396 | ||
5403 | unclustered_alloc: | 5397 | unclustered_alloc: |
5398 | spin_lock(&block_group->free_space_ctl->tree_lock); | ||
5399 | if (cached && | ||
5400 | block_group->free_space_ctl->free_space < | ||
5401 | num_bytes + empty_cluster + empty_size) { | ||
5402 | spin_unlock(&block_group->free_space_ctl->tree_lock); | ||
5403 | goto loop; | ||
5404 | } | ||
5405 | spin_unlock(&block_group->free_space_ctl->tree_lock); | ||
5406 | |||
5404 | offset = btrfs_find_space_for_alloc(block_group, search_start, | 5407 | offset = btrfs_find_space_for_alloc(block_group, search_start, |
5405 | num_bytes, empty_size); | 5408 | num_bytes, empty_size); |
5406 | /* | 5409 | /* |
@@ -5438,9 +5441,6 @@ checks: | |||
5438 | goto loop; | 5441 | goto loop; |
5439 | } | 5442 | } |
5440 | 5443 | ||
5441 | ins->objectid = search_start; | ||
5442 | ins->offset = num_bytes; | ||
5443 | |||
5444 | if (offset < search_start) | 5444 | if (offset < search_start) |
5445 | btrfs_add_free_space(used_block_group, offset, | 5445 | btrfs_add_free_space(used_block_group, offset, |
5446 | search_start - offset); | 5446 | search_start - offset); |
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index ec23d43d0c35..ce40db59c706 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c | |||
@@ -2283,23 +2283,23 @@ out: | |||
2283 | static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group, | 2283 | static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group, |
2284 | struct btrfs_free_space *entry, | 2284 | struct btrfs_free_space *entry, |
2285 | struct btrfs_free_cluster *cluster, | 2285 | struct btrfs_free_cluster *cluster, |
2286 | u64 offset, u64 bytes, u64 min_bytes) | 2286 | u64 offset, u64 bytes, |
2287 | u64 cont1_bytes, u64 min_bytes) | ||
2287 | { | 2288 | { |
2288 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | 2289 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
2289 | unsigned long next_zero; | 2290 | unsigned long next_zero; |
2290 | unsigned long i; | 2291 | unsigned long i; |
2291 | unsigned long search_bits; | 2292 | unsigned long want_bits; |
2292 | unsigned long total_bits; | 2293 | unsigned long min_bits; |
2293 | unsigned long found_bits; | 2294 | unsigned long found_bits; |
2294 | unsigned long start = 0; | 2295 | unsigned long start = 0; |
2295 | unsigned long total_found = 0; | 2296 | unsigned long total_found = 0; |
2296 | int ret; | 2297 | int ret; |
2297 | bool found = false; | ||
2298 | 2298 | ||
2299 | i = offset_to_bit(entry->offset, block_group->sectorsize, | 2299 | i = offset_to_bit(entry->offset, block_group->sectorsize, |
2300 | max_t(u64, offset, entry->offset)); | 2300 | max_t(u64, offset, entry->offset)); |
2301 | search_bits = bytes_to_bits(bytes, block_group->sectorsize); | 2301 | want_bits = bytes_to_bits(bytes, block_group->sectorsize); |
2302 | total_bits = bytes_to_bits(min_bytes, block_group->sectorsize); | 2302 | min_bits = bytes_to_bits(min_bytes, block_group->sectorsize); |
2303 | 2303 | ||
2304 | again: | 2304 | again: |
2305 | found_bits = 0; | 2305 | found_bits = 0; |
@@ -2308,7 +2308,7 @@ again: | |||
2308 | i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, i + 1)) { | 2308 | i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, i + 1)) { |
2309 | next_zero = find_next_zero_bit(entry->bitmap, | 2309 | next_zero = find_next_zero_bit(entry->bitmap, |
2310 | BITS_PER_BITMAP, i); | 2310 | BITS_PER_BITMAP, i); |
2311 | if (next_zero - i >= search_bits) { | 2311 | if (next_zero - i >= min_bits) { |
2312 | found_bits = next_zero - i; | 2312 | found_bits = next_zero - i; |
2313 | break; | 2313 | break; |
2314 | } | 2314 | } |
@@ -2318,10 +2318,9 @@ again: | |||
2318 | if (!found_bits) | 2318 | if (!found_bits) |
2319 | return -ENOSPC; | 2319 | return -ENOSPC; |
2320 | 2320 | ||
2321 | if (!found) { | 2321 | if (!total_found) { |
2322 | start = i; | 2322 | start = i; |
2323 | cluster->max_size = 0; | 2323 | cluster->max_size = 0; |
2324 | found = true; | ||
2325 | } | 2324 | } |
2326 | 2325 | ||
2327 | total_found += found_bits; | 2326 | total_found += found_bits; |
@@ -2329,13 +2328,8 @@ again: | |||
2329 | if (cluster->max_size < found_bits * block_group->sectorsize) | 2328 | if (cluster->max_size < found_bits * block_group->sectorsize) |
2330 | cluster->max_size = found_bits * block_group->sectorsize; | 2329 | cluster->max_size = found_bits * block_group->sectorsize; |
2331 | 2330 | ||
2332 | if (total_found < total_bits) { | 2331 | if (total_found < want_bits || cluster->max_size < cont1_bytes) { |
2333 | i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, next_zero); | 2332 | i = next_zero + 1; |
2334 | if (i - start > total_bits * 2) { | ||
2335 | total_found = 0; | ||
2336 | cluster->max_size = 0; | ||
2337 | found = false; | ||
2338 | } | ||
2339 | goto again; | 2333 | goto again; |
2340 | } | 2334 | } |
2341 | 2335 | ||
@@ -2351,23 +2345,23 @@ again: | |||
2351 | 2345 | ||
2352 | /* | 2346 | /* |
2353 | * This searches the block group for just extents to fill the cluster with. | 2347 | * This searches the block group for just extents to fill the cluster with. |
2348 | * Try to find a cluster with at least bytes total bytes, at least one | ||
2349 | * extent of cont1_bytes, and other clusters of at least min_bytes. | ||
2354 | */ | 2350 | */ |
2355 | static noinline int | 2351 | static noinline int |
2356 | setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, | 2352 | setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, |
2357 | struct btrfs_free_cluster *cluster, | 2353 | struct btrfs_free_cluster *cluster, |
2358 | struct list_head *bitmaps, u64 offset, u64 bytes, | 2354 | struct list_head *bitmaps, u64 offset, u64 bytes, |
2359 | u64 min_bytes) | 2355 | u64 cont1_bytes, u64 min_bytes) |
2360 | { | 2356 | { |
2361 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | 2357 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
2362 | struct btrfs_free_space *first = NULL; | 2358 | struct btrfs_free_space *first = NULL; |
2363 | struct btrfs_free_space *entry = NULL; | 2359 | struct btrfs_free_space *entry = NULL; |
2364 | struct btrfs_free_space *prev = NULL; | ||
2365 | struct btrfs_free_space *last; | 2360 | struct btrfs_free_space *last; |
2366 | struct rb_node *node; | 2361 | struct rb_node *node; |
2367 | u64 window_start; | 2362 | u64 window_start; |
2368 | u64 window_free; | 2363 | u64 window_free; |
2369 | u64 max_extent; | 2364 | u64 max_extent; |
2370 | u64 max_gap = 128 * 1024; | ||
2371 | 2365 | ||
2372 | entry = tree_search_offset(ctl, offset, 0, 1); | 2366 | entry = tree_search_offset(ctl, offset, 0, 1); |
2373 | if (!entry) | 2367 | if (!entry) |
@@ -2377,8 +2371,8 @@ setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, | |||
2377 | * We don't want bitmaps, so just move along until we find a normal | 2371 | * We don't want bitmaps, so just move along until we find a normal |
2378 | * extent entry. | 2372 | * extent entry. |
2379 | */ | 2373 | */ |
2380 | while (entry->bitmap) { | 2374 | while (entry->bitmap || entry->bytes < min_bytes) { |
2381 | if (list_empty(&entry->list)) | 2375 | if (entry->bitmap && list_empty(&entry->list)) |
2382 | list_add_tail(&entry->list, bitmaps); | 2376 | list_add_tail(&entry->list, bitmaps); |
2383 | node = rb_next(&entry->offset_index); | 2377 | node = rb_next(&entry->offset_index); |
2384 | if (!node) | 2378 | if (!node) |
@@ -2391,12 +2385,9 @@ setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, | |||
2391 | max_extent = entry->bytes; | 2385 | max_extent = entry->bytes; |
2392 | first = entry; | 2386 | first = entry; |
2393 | last = entry; | 2387 | last = entry; |
2394 | prev = entry; | ||
2395 | 2388 | ||
2396 | while (window_free <= min_bytes) { | 2389 | for (node = rb_next(&entry->offset_index); node; |
2397 | node = rb_next(&entry->offset_index); | 2390 | node = rb_next(&entry->offset_index)) { |
2398 | if (!node) | ||
2399 | return -ENOSPC; | ||
2400 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | 2391 | entry = rb_entry(node, struct btrfs_free_space, offset_index); |
2401 | 2392 | ||
2402 | if (entry->bitmap) { | 2393 | if (entry->bitmap) { |
@@ -2405,26 +2396,18 @@ setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, | |||
2405 | continue; | 2396 | continue; |
2406 | } | 2397 | } |
2407 | 2398 | ||
2408 | /* | 2399 | if (entry->bytes < min_bytes) |
2409 | * we haven't filled the empty size and the window is | 2400 | continue; |
2410 | * very large. reset and try again | 2401 | |
2411 | */ | 2402 | last = entry; |
2412 | if (entry->offset - (prev->offset + prev->bytes) > max_gap || | 2403 | window_free += entry->bytes; |
2413 | entry->offset - window_start > (min_bytes * 2)) { | 2404 | if (entry->bytes > max_extent) |
2414 | first = entry; | ||
2415 | window_start = entry->offset; | ||
2416 | window_free = entry->bytes; | ||
2417 | last = entry; | ||
2418 | max_extent = entry->bytes; | 2405 | max_extent = entry->bytes; |
2419 | } else { | ||
2420 | last = entry; | ||
2421 | window_free += entry->bytes; | ||
2422 | if (entry->bytes > max_extent) | ||
2423 | max_extent = entry->bytes; | ||
2424 | } | ||
2425 | prev = entry; | ||
2426 | } | 2406 | } |
2427 | 2407 | ||
2408 | if (window_free < bytes || max_extent < cont1_bytes) | ||
2409 | return -ENOSPC; | ||
2410 | |||
2428 | cluster->window_start = first->offset; | 2411 | cluster->window_start = first->offset; |
2429 | 2412 | ||
2430 | node = &first->offset_index; | 2413 | node = &first->offset_index; |
@@ -2438,7 +2421,7 @@ setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, | |||
2438 | 2421 | ||
2439 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | 2422 | entry = rb_entry(node, struct btrfs_free_space, offset_index); |
2440 | node = rb_next(&entry->offset_index); | 2423 | node = rb_next(&entry->offset_index); |
2441 | if (entry->bitmap) | 2424 | if (entry->bitmap || entry->bytes < min_bytes) |
2442 | continue; | 2425 | continue; |
2443 | 2426 | ||
2444 | rb_erase(&entry->offset_index, &ctl->free_space_offset); | 2427 | rb_erase(&entry->offset_index, &ctl->free_space_offset); |
@@ -2460,7 +2443,7 @@ static noinline int | |||
2460 | setup_cluster_bitmap(struct btrfs_block_group_cache *block_group, | 2443 | setup_cluster_bitmap(struct btrfs_block_group_cache *block_group, |
2461 | struct btrfs_free_cluster *cluster, | 2444 | struct btrfs_free_cluster *cluster, |
2462 | struct list_head *bitmaps, u64 offset, u64 bytes, | 2445 | struct list_head *bitmaps, u64 offset, u64 bytes, |
2463 | u64 min_bytes) | 2446 | u64 cont1_bytes, u64 min_bytes) |
2464 | { | 2447 | { |
2465 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | 2448 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
2466 | struct btrfs_free_space *entry; | 2449 | struct btrfs_free_space *entry; |
@@ -2485,7 +2468,7 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group, | |||
2485 | if (entry->bytes < min_bytes) | 2468 | if (entry->bytes < min_bytes) |
2486 | continue; | 2469 | continue; |
2487 | ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset, | 2470 | ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset, |
2488 | bytes, min_bytes); | 2471 | bytes, cont1_bytes, min_bytes); |
2489 | if (!ret) | 2472 | if (!ret) |
2490 | return 0; | 2473 | return 0; |
2491 | } | 2474 | } |
@@ -2499,7 +2482,7 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group, | |||
2499 | 2482 | ||
2500 | /* | 2483 | /* |
2501 | * here we try to find a cluster of blocks in a block group. The goal | 2484 | * here we try to find a cluster of blocks in a block group. The goal |
2502 | * is to find at least bytes free and up to empty_size + bytes free. | 2485 | * is to find at least bytes+empty_size. |
2503 | * We might not find them all in one contiguous area. | 2486 | * We might not find them all in one contiguous area. |
2504 | * | 2487 | * |
2505 | * returns zero and sets up cluster if things worked out, otherwise | 2488 | * returns zero and sets up cluster if things worked out, otherwise |
@@ -2515,23 +2498,24 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, | |||
2515 | struct btrfs_free_space *entry, *tmp; | 2498 | struct btrfs_free_space *entry, *tmp; |
2516 | LIST_HEAD(bitmaps); | 2499 | LIST_HEAD(bitmaps); |
2517 | u64 min_bytes; | 2500 | u64 min_bytes; |
2501 | u64 cont1_bytes; | ||
2518 | int ret; | 2502 | int ret; |
2519 | 2503 | ||
2520 | /* for metadata, allow allocates with more holes */ | 2504 | /* |
2505 | * Choose the minimum extent size we'll require for this | ||
2506 | * cluster. For SSD_SPREAD, don't allow any fragmentation. | ||
2507 | * For metadata, allow allocates with smaller extents. For | ||
2508 | * data, keep it dense. | ||
2509 | */ | ||
2521 | if (btrfs_test_opt(root, SSD_SPREAD)) { | 2510 | if (btrfs_test_opt(root, SSD_SPREAD)) { |
2522 | min_bytes = bytes + empty_size; | 2511 | cont1_bytes = min_bytes = bytes + empty_size; |
2523 | } else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) { | 2512 | } else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) { |
2524 | /* | 2513 | cont1_bytes = bytes; |
2525 | * we want to do larger allocations when we are | 2514 | min_bytes = block_group->sectorsize; |
2526 | * flushing out the delayed refs, it helps prevent | 2515 | } else { |
2527 | * making more work as we go along. | 2516 | cont1_bytes = max(bytes, (bytes + empty_size) >> 2); |
2528 | */ | 2517 | min_bytes = block_group->sectorsize; |
2529 | if (trans->transaction->delayed_refs.flushing) | 2518 | } |
2530 | min_bytes = max(bytes, (bytes + empty_size) >> 1); | ||
2531 | else | ||
2532 | min_bytes = max(bytes, (bytes + empty_size) >> 4); | ||
2533 | } else | ||
2534 | min_bytes = max(bytes, (bytes + empty_size) >> 2); | ||
2535 | 2519 | ||
2536 | spin_lock(&ctl->tree_lock); | 2520 | spin_lock(&ctl->tree_lock); |
2537 | 2521 | ||
@@ -2539,7 +2523,7 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, | |||
2539 | * If we know we don't have enough space to make a cluster don't even | 2523 | * If we know we don't have enough space to make a cluster don't even |
2540 | * bother doing all the work to try and find one. | 2524 | * bother doing all the work to try and find one. |
2541 | */ | 2525 | */ |
2542 | if (ctl->free_space < min_bytes) { | 2526 | if (ctl->free_space < bytes) { |
2543 | spin_unlock(&ctl->tree_lock); | 2527 | spin_unlock(&ctl->tree_lock); |
2544 | return -ENOSPC; | 2528 | return -ENOSPC; |
2545 | } | 2529 | } |
@@ -2553,10 +2537,12 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, | |||
2553 | } | 2537 | } |
2554 | 2538 | ||
2555 | ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset, | 2539 | ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset, |
2556 | bytes, min_bytes); | 2540 | bytes + empty_size, |
2541 | cont1_bytes, min_bytes); | ||
2557 | if (ret) | 2542 | if (ret) |
2558 | ret = setup_cluster_bitmap(block_group, cluster, &bitmaps, | 2543 | ret = setup_cluster_bitmap(block_group, cluster, &bitmaps, |
2559 | offset, bytes, min_bytes); | 2544 | offset, bytes + empty_size, |
2545 | cont1_bytes, min_bytes); | ||
2560 | 2546 | ||
2561 | /* Clear our temporary list */ | 2547 | /* Clear our temporary list */ |
2562 | list_for_each_entry_safe(entry, tmp, &bitmaps, list) | 2548 | list_for_each_entry_safe(entry, tmp, &bitmaps, list) |
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 81376d94cd3c..360c2dfd1ee6 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c | |||
@@ -467,19 +467,12 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, | |||
467 | 467 | ||
468 | btrfs_trans_release_metadata(trans, root); | 468 | btrfs_trans_release_metadata(trans, root); |
469 | trans->block_rsv = NULL; | 469 | trans->block_rsv = NULL; |
470 | while (count < 4) { | 470 | while (count < 2) { |
471 | unsigned long cur = trans->delayed_ref_updates; | 471 | unsigned long cur = trans->delayed_ref_updates; |
472 | trans->delayed_ref_updates = 0; | 472 | trans->delayed_ref_updates = 0; |
473 | if (cur && | 473 | if (cur && |
474 | trans->transaction->delayed_refs.num_heads_ready > 64) { | 474 | trans->transaction->delayed_refs.num_heads_ready > 64) { |
475 | trans->delayed_ref_updates = 0; | 475 | trans->delayed_ref_updates = 0; |
476 | |||
477 | /* | ||
478 | * do a full flush if the transaction is trying | ||
479 | * to close | ||
480 | */ | ||
481 | if (trans->transaction->delayed_refs.flushing) | ||
482 | cur = 0; | ||
483 | btrfs_run_delayed_refs(trans, root, cur); | 476 | btrfs_run_delayed_refs(trans, root, cur); |
484 | } else { | 477 | } else { |
485 | break; | 478 | break; |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index f4b839fd3c9d..ac00e3aa80a1 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
@@ -2441,7 +2441,11 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, | |||
2441 | max_stripe_size = 1024 * 1024 * 1024; | 2441 | max_stripe_size = 1024 * 1024 * 1024; |
2442 | max_chunk_size = 10 * max_stripe_size; | 2442 | max_chunk_size = 10 * max_stripe_size; |
2443 | } else if (type & BTRFS_BLOCK_GROUP_METADATA) { | 2443 | } else if (type & BTRFS_BLOCK_GROUP_METADATA) { |
2444 | max_stripe_size = 256 * 1024 * 1024; | 2444 | /* for larger filesystems, use larger metadata chunks */ |
2445 | if (fs_devices->total_rw_bytes > 50ULL * 1024 * 1024 * 1024) | ||
2446 | max_stripe_size = 1024 * 1024 * 1024; | ||
2447 | else | ||
2448 | max_stripe_size = 256 * 1024 * 1024; | ||
2445 | max_chunk_size = max_stripe_size; | 2449 | max_chunk_size = max_stripe_size; |
2446 | } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { | 2450 | } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { |
2447 | max_stripe_size = 8 * 1024 * 1024; | 2451 | max_stripe_size = 8 * 1024 * 1024; |