aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2008-07-22 23:06:41 -0400
committerChris Mason <chris.mason@oracle.com>2008-09-25 11:04:05 -0400
commitc286ac48ed7aaf53586f575af6053ae2a0f8554a (patch)
tree794cb0e7269c62568e39c160b0f0f03e181c1d93 /fs
parente34a5b4f77b8448cf2863ad0cbac35e2c2a86a0a (diff)
Btrfs: alloc_mutex latency reduction
This releases the alloc_mutex in a few places that hold it for over long operations. btrfs_lookup_block_group is changed so that it doesn't need the mutex at all. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/ctree.h1
-rw-r--r--fs/btrfs/extent-tree.c100
2 files changed, 81 insertions, 20 deletions
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index eeb5afa6e9b1..90504ba7f838 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -483,6 +483,7 @@ struct btrfs_block_group_cache {
483 struct btrfs_key key; 483 struct btrfs_key key;
484 struct btrfs_block_group_item item; 484 struct btrfs_block_group_item item;
485 struct btrfs_space_info *space_info; 485 struct btrfs_space_info *space_info;
486 spinlock_t lock;
486 u64 pinned; 487 u64 pinned;
487 u64 flags; 488 u64 flags;
488 int cached; 489 int cached;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 72fa28236e5d..febc6295c7a9 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -319,7 +319,7 @@ no_cache:
319 cache = btrfs_lookup_first_block_group(root->fs_info, last); 319 cache = btrfs_lookup_first_block_group(root->fs_info, last);
320 } 320 }
321 cache_miss = 0; 321 cache_miss = 0;
322 cache = __btrfs_find_block_group(root, cache, last, data, 0); 322 cache = btrfs_find_block_group(root, cache, last, data, 0);
323 if (!cache) 323 if (!cache)
324 goto no_cache; 324 goto no_cache;
325 *cache_ret = cache; 325 *cache_ret = cache;
@@ -379,19 +379,25 @@ __btrfs_find_block_group(struct btrfs_root *root,
379 struct btrfs_block_group_cache *shint; 379 struct btrfs_block_group_cache *shint;
380 shint = btrfs_lookup_first_block_group(info, search_start); 380 shint = btrfs_lookup_first_block_group(info, search_start);
381 if (shint && block_group_bits(shint, data) && !shint->ro) { 381 if (shint && block_group_bits(shint, data) && !shint->ro) {
382 spin_lock(&shint->lock);
382 used = btrfs_block_group_used(&shint->item); 383 used = btrfs_block_group_used(&shint->item);
383 if (used + shint->pinned < 384 if (used + shint->pinned <
384 div_factor(shint->key.offset, factor)) { 385 div_factor(shint->key.offset, factor)) {
386 spin_unlock(&shint->lock);
385 return shint; 387 return shint;
386 } 388 }
389 spin_unlock(&shint->lock);
387 } 390 }
388 } 391 }
389 if (hint && !hint->ro && block_group_bits(hint, data)) { 392 if (hint && !hint->ro && block_group_bits(hint, data)) {
393 spin_lock(&hint->lock);
390 used = btrfs_block_group_used(&hint->item); 394 used = btrfs_block_group_used(&hint->item);
391 if (used + hint->pinned < 395 if (used + hint->pinned <
392 div_factor(hint->key.offset, factor)) { 396 div_factor(hint->key.offset, factor)) {
397 spin_unlock(&hint->lock);
393 return hint; 398 return hint;
394 } 399 }
400 spin_unlock(&hint->lock);
395 last = hint->key.objectid + hint->key.offset; 401 last = hint->key.objectid + hint->key.offset;
396 } else { 402 } else {
397 if (hint) 403 if (hint)
@@ -413,6 +419,7 @@ again:
413 } 419 }
414 420
415 cache = (struct btrfs_block_group_cache *)(unsigned long)ptr; 421 cache = (struct btrfs_block_group_cache *)(unsigned long)ptr;
422 spin_lock(&cache->lock);
416 last = cache->key.objectid + cache->key.offset; 423 last = cache->key.objectid + cache->key.offset;
417 used = btrfs_block_group_used(&cache->item); 424 used = btrfs_block_group_used(&cache->item);
418 425
@@ -420,9 +427,11 @@ again:
420 free_check = div_factor(cache->key.offset, factor); 427 free_check = div_factor(cache->key.offset, factor);
421 if (used + cache->pinned < free_check) { 428 if (used + cache->pinned < free_check) {
422 found_group = cache; 429 found_group = cache;
430 spin_unlock(&cache->lock);
423 goto found; 431 goto found;
424 } 432 }
425 } 433 }
434 spin_unlock(&cache->lock);
426 cond_resched(); 435 cond_resched();
427 } 436 }
428 if (!wrapped) { 437 if (!wrapped) {
@@ -447,9 +456,7 @@ struct btrfs_block_group_cache *btrfs_find_block_group(struct btrfs_root *root,
447{ 456{
448 457
449 struct btrfs_block_group_cache *ret; 458 struct btrfs_block_group_cache *ret;
450 mutex_lock(&root->fs_info->alloc_mutex);
451 ret = __btrfs_find_block_group(root, hint, search_start, data, owner); 459 ret = __btrfs_find_block_group(root, hint, search_start, data, owner);
452 mutex_unlock(&root->fs_info->alloc_mutex);
453 return ret; 460 return ret;
454} 461}
455static u64 hash_extent_ref(u64 root_objectid, u64 ref_generation, 462static u64 hash_extent_ref(u64 root_objectid, u64 ref_generation,
@@ -1262,21 +1269,25 @@ static int update_block_group(struct btrfs_trans_handle *trans,
1262 set_extent_bits(&info->block_group_cache, start, end, 1269 set_extent_bits(&info->block_group_cache, start, end,
1263 BLOCK_GROUP_DIRTY, GFP_NOFS); 1270 BLOCK_GROUP_DIRTY, GFP_NOFS);
1264 1271
1272 spin_lock(&cache->lock);
1265 old_val = btrfs_block_group_used(&cache->item); 1273 old_val = btrfs_block_group_used(&cache->item);
1266 num_bytes = min(total, cache->key.offset - byte_in_group); 1274 num_bytes = min(total, cache->key.offset - byte_in_group);
1267 if (alloc) { 1275 if (alloc) {
1268 old_val += num_bytes; 1276 old_val += num_bytes;
1269 cache->space_info->bytes_used += num_bytes; 1277 cache->space_info->bytes_used += num_bytes;
1278 btrfs_set_block_group_used(&cache->item, old_val);
1279 spin_unlock(&cache->lock);
1270 } else { 1280 } else {
1271 old_val -= num_bytes; 1281 old_val -= num_bytes;
1272 cache->space_info->bytes_used -= num_bytes; 1282 cache->space_info->bytes_used -= num_bytes;
1283 btrfs_set_block_group_used(&cache->item, old_val);
1284 spin_unlock(&cache->lock);
1273 if (mark_free) { 1285 if (mark_free) {
1274 set_extent_dirty(&info->free_space_cache, 1286 set_extent_dirty(&info->free_space_cache,
1275 bytenr, bytenr + num_bytes - 1, 1287 bytenr, bytenr + num_bytes - 1,
1276 GFP_NOFS); 1288 GFP_NOFS);
1277 } 1289 }
1278 } 1290 }
1279 btrfs_set_block_group_used(&cache->item, old_val);
1280 total -= num_bytes; 1291 total -= num_bytes;
1281 bytenr += num_bytes; 1292 bytenr += num_bytes;
1282 } 1293 }
@@ -1325,14 +1336,18 @@ static int update_pinned_extents(struct btrfs_root *root,
1325 } 1336 }
1326 if (pin) { 1337 if (pin) {
1327 if (cache) { 1338 if (cache) {
1339 spin_lock(&cache->lock);
1328 cache->pinned += len; 1340 cache->pinned += len;
1329 cache->space_info->bytes_pinned += len; 1341 cache->space_info->bytes_pinned += len;
1342 spin_unlock(&cache->lock);
1330 } 1343 }
1331 fs_info->total_pinned += len; 1344 fs_info->total_pinned += len;
1332 } else { 1345 } else {
1333 if (cache) { 1346 if (cache) {
1347 spin_lock(&cache->lock);
1334 cache->pinned -= len; 1348 cache->pinned -= len;
1335 cache->space_info->bytes_pinned -= len; 1349 cache->space_info->bytes_pinned -= len;
1350 spin_unlock(&cache->lock);
1336 } 1351 }
1337 fs_info->total_pinned -= len; 1352 fs_info->total_pinned -= len;
1338 } 1353 }
@@ -1380,6 +1395,11 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
1380 update_pinned_extents(root, start, end + 1 - start, 0); 1395 update_pinned_extents(root, start, end + 1 - start, 0);
1381 clear_extent_dirty(unpin, start, end, GFP_NOFS); 1396 clear_extent_dirty(unpin, start, end, GFP_NOFS);
1382 set_extent_dirty(free_space_cache, start, end, GFP_NOFS); 1397 set_extent_dirty(free_space_cache, start, end, GFP_NOFS);
1398 if (need_resched()) {
1399 mutex_unlock(&root->fs_info->alloc_mutex);
1400 cond_resched();
1401 mutex_lock(&root->fs_info->alloc_mutex);
1402 }
1383 } 1403 }
1384 mutex_unlock(&root->fs_info->alloc_mutex); 1404 mutex_unlock(&root->fs_info->alloc_mutex);
1385 return 0; 1405 return 0;
@@ -1417,8 +1437,16 @@ static int finish_current_insert(struct btrfs_trans_handle *trans,
1417 &extent_item, sizeof(extent_item)); 1437 &extent_item, sizeof(extent_item));
1418 clear_extent_bits(&info->extent_ins, start, end, EXTENT_LOCKED, 1438 clear_extent_bits(&info->extent_ins, start, end, EXTENT_LOCKED,
1419 GFP_NOFS); 1439 GFP_NOFS);
1420 eb = read_tree_block(extent_root, ins.objectid, ins.offset, 1440
1421 trans->transid); 1441 eb = btrfs_find_tree_block(extent_root, ins.objectid,
1442 ins.offset);
1443
1444 if (!btrfs_buffer_uptodate(eb, trans->transid)) {
1445 mutex_unlock(&extent_root->fs_info->alloc_mutex);
1446 btrfs_read_buffer(eb, trans->transid);
1447 mutex_lock(&extent_root->fs_info->alloc_mutex);
1448 }
1449
1422 btrfs_tree_lock(eb); 1450 btrfs_tree_lock(eb);
1423 level = btrfs_header_level(eb); 1451 level = btrfs_header_level(eb);
1424 if (level == 0) { 1452 if (level == 0) {
@@ -1437,6 +1465,11 @@ static int finish_current_insert(struct btrfs_trans_handle *trans,
1437 0, level, 1465 0, level,
1438 btrfs_disk_key_objectid(&first)); 1466 btrfs_disk_key_objectid(&first));
1439 BUG_ON(err); 1467 BUG_ON(err);
1468 if (need_resched()) {
1469 mutex_unlock(&extent_root->fs_info->alloc_mutex);
1470 cond_resched();
1471 mutex_lock(&extent_root->fs_info->alloc_mutex);
1472 }
1440 } 1473 }
1441 btrfs_free_path(path); 1474 btrfs_free_path(path);
1442 return 0; 1475 return 0;
@@ -1640,15 +1673,28 @@ static int del_pending_extents(struct btrfs_trans_handle *trans, struct
1640 EXTENT_LOCKED); 1673 EXTENT_LOCKED);
1641 if (ret) 1674 if (ret)
1642 break; 1675 break;
1643 update_pinned_extents(extent_root, start, end + 1 - start, 1);
1644 clear_extent_bits(pending_del, start, end, EXTENT_LOCKED, 1676 clear_extent_bits(pending_del, start, end, EXTENT_LOCKED,
1645 GFP_NOFS); 1677 GFP_NOFS);
1646 ret = __free_extent(trans, extent_root, 1678 if (!test_range_bit(&extent_root->fs_info->extent_ins,
1647 start, end + 1 - start, 1679 start, end, EXTENT_LOCKED, 0)) {
1648 extent_root->root_key.objectid, 1680 update_pinned_extents(extent_root, start,
1649 0, 0, 0, 0, 0); 1681 end + 1 - start, 1);
1682 ret = __free_extent(trans, extent_root,
1683 start, end + 1 - start,
1684 extent_root->root_key.objectid,
1685 0, 0, 0, 0, 0);
1686 } else {
1687 clear_extent_bits(&extent_root->fs_info->extent_ins,
1688 start, end, EXTENT_LOCKED, GFP_NOFS);
1689 }
1650 if (ret) 1690 if (ret)
1651 err = ret; 1691 err = ret;
1692
1693 if (need_resched()) {
1694 mutex_unlock(&extent_root->fs_info->alloc_mutex);
1695 cond_resched();
1696 mutex_lock(&extent_root->fs_info->alloc_mutex);
1697 }
1652 } 1698 }
1653 return err; 1699 return err;
1654} 1700}
@@ -1768,12 +1814,12 @@ static int noinline find_free_extent(struct btrfs_trans_handle *trans,
1768 block_group = btrfs_lookup_first_block_group(info, hint_byte); 1814 block_group = btrfs_lookup_first_block_group(info, hint_byte);
1769 if (!block_group) 1815 if (!block_group)
1770 hint_byte = search_start; 1816 hint_byte = search_start;
1771 block_group = __btrfs_find_block_group(root, block_group, 1817 block_group = btrfs_find_block_group(root, block_group,
1772 hint_byte, data, 1); 1818 hint_byte, data, 1);
1773 if (last_ptr && *last_ptr == 0 && block_group) 1819 if (last_ptr && *last_ptr == 0 && block_group)
1774 hint_byte = block_group->key.objectid; 1820 hint_byte = block_group->key.objectid;
1775 } else { 1821 } else {
1776 block_group = __btrfs_find_block_group(root, 1822 block_group = btrfs_find_block_group(root,
1777 trans->block_group, 1823 trans->block_group,
1778 search_start, data, 1); 1824 search_start, data, 1);
1779 } 1825 }
@@ -1895,7 +1941,7 @@ enospc:
1895 } 1941 }
1896 block_group = btrfs_lookup_first_block_group(info, search_start); 1942 block_group = btrfs_lookup_first_block_group(info, search_start);
1897 cond_resched(); 1943 cond_resched();
1898 block_group = __btrfs_find_block_group(root, block_group, 1944 block_group = btrfs_find_block_group(root, block_group,
1899 search_start, data, 0); 1945 search_start, data, 0);
1900 goto check_failed; 1946 goto check_failed;
1901 1947
@@ -3032,11 +3078,14 @@ int __alloc_chunk_for_shrink(struct btrfs_root *root,
3032 u64 new_alloc_flags; 3078 u64 new_alloc_flags;
3033 u64 calc; 3079 u64 calc;
3034 3080
3081 spin_lock(&shrink_block_group->lock);
3035 if (btrfs_block_group_used(&shrink_block_group->item) > 0) { 3082 if (btrfs_block_group_used(&shrink_block_group->item) > 0) {
3036 3083 spin_unlock(&shrink_block_group->lock);
3037 mutex_unlock(&root->fs_info->alloc_mutex); 3084 mutex_unlock(&root->fs_info->alloc_mutex);
3085
3038 trans = btrfs_start_transaction(root, 1); 3086 trans = btrfs_start_transaction(root, 1);
3039 mutex_lock(&root->fs_info->alloc_mutex); 3087 mutex_lock(&root->fs_info->alloc_mutex);
3088 spin_lock(&shrink_block_group->lock);
3040 3089
3041 new_alloc_flags = update_block_group_flags(root, 3090 new_alloc_flags = update_block_group_flags(root,
3042 shrink_block_group->flags); 3091 shrink_block_group->flags);
@@ -3046,13 +3095,16 @@ int __alloc_chunk_for_shrink(struct btrfs_root *root,
3046 } else { 3095 } else {
3047 calc = shrink_block_group->key.offset; 3096 calc = shrink_block_group->key.offset;
3048 } 3097 }
3098 spin_unlock(&shrink_block_group->lock);
3099
3049 do_chunk_alloc(trans, root->fs_info->extent_root, 3100 do_chunk_alloc(trans, root->fs_info->extent_root,
3050 calc + 2 * 1024 * 1024, new_alloc_flags, force); 3101 calc + 2 * 1024 * 1024, new_alloc_flags, force);
3051 3102
3052 mutex_unlock(&root->fs_info->alloc_mutex); 3103 mutex_unlock(&root->fs_info->alloc_mutex);
3053 btrfs_end_transaction(trans, root); 3104 btrfs_end_transaction(trans, root);
3054 mutex_lock(&root->fs_info->alloc_mutex); 3105 mutex_lock(&root->fs_info->alloc_mutex);
3055 } 3106 } else
3107 spin_unlock(&shrink_block_group->lock);
3056 return 0; 3108 return 0;
3057} 3109}
3058 3110
@@ -3199,6 +3251,7 @@ next:
3199 mutex_unlock(&root->fs_info->alloc_mutex); 3251 mutex_unlock(&root->fs_info->alloc_mutex);
3200 3252
3201 trans = btrfs_start_transaction(root, 1); 3253 trans = btrfs_start_transaction(root, 1);
3254
3202 mutex_lock(&root->fs_info->alloc_mutex); 3255 mutex_lock(&root->fs_info->alloc_mutex);
3203 memcpy(&key, &shrink_block_group->key, sizeof(key)); 3256 memcpy(&key, &shrink_block_group->key, sizeof(key));
3204 3257
@@ -3316,6 +3369,7 @@ int btrfs_read_block_groups(struct btrfs_root *root)
3316 break; 3369 break;
3317 } 3370 }
3318 3371
3372 spin_lock_init(&cache->lock);
3319 read_extent_buffer(leaf, &cache->item, 3373 read_extent_buffer(leaf, &cache->item,
3320 btrfs_item_ptr_offset(leaf, path->slots[0]), 3374 btrfs_item_ptr_offset(leaf, path->slots[0]),
3321 sizeof(cache->item)); 3375 sizeof(cache->item));
@@ -3343,10 +3397,12 @@ int btrfs_read_block_groups(struct btrfs_root *root)
3343 /* use EXTENT_LOCKED to prevent merging */ 3397 /* use EXTENT_LOCKED to prevent merging */
3344 set_extent_bits(block_group_cache, found_key.objectid, 3398 set_extent_bits(block_group_cache, found_key.objectid,
3345 found_key.objectid + found_key.offset - 1, 3399 found_key.objectid + found_key.offset - 1,
3346 bit | EXTENT_LOCKED, GFP_NOFS); 3400 EXTENT_LOCKED, GFP_NOFS);
3347 set_state_private(block_group_cache, found_key.objectid, 3401 set_state_private(block_group_cache, found_key.objectid,
3348 (unsigned long)cache); 3402 (unsigned long)cache);
3349 3403 set_extent_bits(block_group_cache, found_key.objectid,
3404 found_key.objectid + found_key.offset - 1,
3405 bit | EXTENT_LOCKED, GFP_NOFS);
3350 if (key.objectid >= 3406 if (key.objectid >=
3351 btrfs_super_total_bytes(&info->super_copy)) 3407 btrfs_super_total_bytes(&info->super_copy))
3352 break; 3408 break;
@@ -3377,6 +3433,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
3377 BUG_ON(!cache); 3433 BUG_ON(!cache);
3378 cache->key.objectid = chunk_offset; 3434 cache->key.objectid = chunk_offset;
3379 cache->key.offset = size; 3435 cache->key.offset = size;
3436 spin_lock_init(&cache->lock);
3380 btrfs_set_key_type(&cache->key, BTRFS_BLOCK_GROUP_ITEM_KEY); 3437 btrfs_set_key_type(&cache->key, BTRFS_BLOCK_GROUP_ITEM_KEY);
3381 3438
3382 btrfs_set_block_group_used(&cache->item, bytes_used); 3439 btrfs_set_block_group_used(&cache->item, bytes_used);
@@ -3391,10 +3448,13 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
3391 bit = block_group_state_bits(type); 3448 bit = block_group_state_bits(type);
3392 set_extent_bits(block_group_cache, chunk_offset, 3449 set_extent_bits(block_group_cache, chunk_offset,
3393 chunk_offset + size - 1, 3450 chunk_offset + size - 1,
3394 bit | EXTENT_LOCKED, GFP_NOFS); 3451 EXTENT_LOCKED, GFP_NOFS);
3395
3396 set_state_private(block_group_cache, chunk_offset, 3452 set_state_private(block_group_cache, chunk_offset,
3397 (unsigned long)cache); 3453 (unsigned long)cache);
3454 set_extent_bits(block_group_cache, chunk_offset,
3455 chunk_offset + size - 1,
3456 bit | EXTENT_LOCKED, GFP_NOFS);
3457
3398 ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item, 3458 ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
3399 sizeof(cache->item)); 3459 sizeof(cache->item));
3400 BUG_ON(ret); 3460 BUG_ON(ret);