aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent-tree.c
diff options
context:
space:
mode:
authorJosef Bacik <jbacik@redhat.com>2009-04-03 10:14:18 -0400
committerChris Mason <chris.mason@oracle.com>2009-04-03 10:14:18 -0400
commit6226cb0a5ea3f6289883753c15d53f48a6c6bbfb (patch)
tree819765cd5a5816017580f638c4b2a3e7f6354aea /fs/btrfs/extent-tree.c
parent2552d17e328044d1811cae733087a1fb9aac2eb6 (diff)
Btrfs: kill the block group alloc mutex
This patch removes the block group alloc mutex used to protect the free space tree for allocations and replaces it with a spin lock which is used only to protect the free space rb tree. This means we only take the lock when we are directly manipulating the tree, which makes us a touch faster with multi-threaded workloads. This patch also gets rid of btrfs_find_free_space and replaces it with btrfs_find_space_for_alloc, which takes the number of bytes you want to allocate, and empty_size, which is used to indicate how much free space should be at the end of the allocation. It will return an offset for the allocator to use. If we don't end up using it we _must_ call btrfs_add_free_space to put it back. This is the tradeoff to kill the alloc_mutex, since we need to make sure nobody else comes along and takes our space. Signed-off-by: Josef Bacik <jbacik@redhat.com>
Diffstat (limited to 'fs/btrfs/extent-tree.c')
-rw-r--r--fs/btrfs/extent-tree.c46
1 files changed, 23 insertions, 23 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index daff751ea6e2..6880a271975a 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2554,7 +2554,6 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
2554{ 2554{
2555 int ret = 0; 2555 int ret = 0;
2556 struct btrfs_root *root = orig_root->fs_info->extent_root; 2556 struct btrfs_root *root = orig_root->fs_info->extent_root;
2557 u64 total_needed = num_bytes;
2558 u64 *last_ptr = NULL; 2557 u64 *last_ptr = NULL;
2559 struct btrfs_block_group_cache *block_group = NULL; 2558 struct btrfs_block_group_cache *block_group = NULL;
2560 int empty_cluster = 2 * 1024 * 1024; 2559 int empty_cluster = 2 * 1024 * 1024;
@@ -2597,7 +2596,6 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
2597 block_group = btrfs_lookup_block_group(root->fs_info, 2596 block_group = btrfs_lookup_block_group(root->fs_info,
2598 search_start); 2597 search_start);
2599 if (block_group && block_group_bits(block_group, data)) { 2598 if (block_group && block_group_bits(block_group, data)) {
2600 total_needed += empty_size;
2601 down_read(&space_info->groups_sem); 2599 down_read(&space_info->groups_sem);
2602 goto have_block_group; 2600 goto have_block_group;
2603 } else if (block_group) { 2601 } else if (block_group) {
@@ -2611,7 +2609,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
2611search: 2609search:
2612 down_read(&space_info->groups_sem); 2610 down_read(&space_info->groups_sem);
2613 list_for_each_entry(block_group, &space_info->block_groups, list) { 2611 list_for_each_entry(block_group, &space_info->block_groups, list) {
2614 struct btrfs_free_space *free_space; 2612 u64 offset;
2615 2613
2616 atomic_inc(&block_group->count); 2614 atomic_inc(&block_group->count);
2617 search_start = block_group->key.objectid; 2615 search_start = block_group->key.objectid;
@@ -2627,62 +2625,65 @@ have_block_group:
2627 } 2625 }
2628 } 2626 }
2629 2627
2630 mutex_lock(&block_group->alloc_mutex);
2631
2632 if (unlikely(block_group->ro)) 2628 if (unlikely(block_group->ro))
2633 goto loop; 2629 goto loop;
2634 2630
2635 free_space = btrfs_find_free_space(block_group, search_start, 2631 offset = btrfs_find_space_for_alloc(block_group, search_start,
2636 total_needed); 2632 num_bytes, empty_size);
2637 if (!free_space) 2633 if (!offset)
2638 goto loop; 2634 goto loop;
2639 2635
2640 search_start = stripe_align(root, free_space->offset); 2636 search_start = stripe_align(root, offset);
2641 2637
2642 /* move on to the next group */ 2638 /* move on to the next group */
2643 if (search_start + num_bytes >= search_end) 2639 if (search_start + num_bytes >= search_end) {
2640 btrfs_add_free_space(block_group, offset, num_bytes);
2644 goto loop; 2641 goto loop;
2642 }
2645 2643
2646 /* move on to the next group */ 2644 /* move on to the next group */
2647 if (search_start + num_bytes > 2645 if (search_start + num_bytes >
2648 block_group->key.objectid + block_group->key.offset) 2646 block_group->key.objectid + block_group->key.offset) {
2647 btrfs_add_free_space(block_group, offset, num_bytes);
2649 goto loop; 2648 goto loop;
2649 }
2650 2650
2651 if (using_hint && search_start > hint_byte) 2651 if (using_hint && search_start > hint_byte) {
2652 btrfs_add_free_space(block_group, offset, num_bytes);
2652 goto loop; 2653 goto loop;
2654 }
2653 2655
2654 if (exclude_nr > 0 && 2656 if (exclude_nr > 0 &&
2655 (search_start + num_bytes > exclude_start && 2657 (search_start + num_bytes > exclude_start &&
2656 search_start < exclude_start + exclude_nr)) { 2658 search_start < exclude_start + exclude_nr)) {
2657 search_start = exclude_start + exclude_nr; 2659 search_start = exclude_start + exclude_nr;
2658 2660
2661 btrfs_add_free_space(block_group, offset, num_bytes);
2659 /* 2662 /*
2660 * if search_start is still in this block group 2663 * if search_start is still in this block group
2661 * then we just re-search this block group 2664 * then we just re-search this block group
2662 */ 2665 */
2663 if (search_start >= block_group->key.objectid && 2666 if (search_start >= block_group->key.objectid &&
2664 search_start < (block_group->key.objectid + 2667 search_start < (block_group->key.objectid +
2665 block_group->key.offset)) { 2668 block_group->key.offset))
2666 mutex_unlock(&block_group->alloc_mutex);
2667 goto have_block_group; 2669 goto have_block_group;
2668 }
2669 goto loop; 2670 goto loop;
2670 } 2671 }
2671 2672
2672 ins->objectid = search_start; 2673 ins->objectid = search_start;
2673 ins->offset = num_bytes; 2674 ins->offset = num_bytes;
2674 2675
2675 btrfs_remove_free_space_lock(block_group, search_start, 2676 if (offset < search_start)
2676 num_bytes); 2677 btrfs_add_free_space(block_group, offset,
2678 search_start - offset);
2679 BUG_ON(offset > search_start);
2680
2677 /* we are all good, lets return */ 2681 /* we are all good, lets return */
2678 mutex_unlock(&block_group->alloc_mutex);
2679 break; 2682 break;
2680loop: 2683loop:
2681 mutex_unlock(&block_group->alloc_mutex);
2682 put_block_group(block_group); 2684 put_block_group(block_group);
2683 if (using_hint) { 2685 if (using_hint) {
2684 empty_size += empty_cluster; 2686 empty_size += empty_cluster;
2685 total_needed += empty_cluster;
2686 using_hint = 0; 2687 using_hint = 0;
2687 up_read(&space_info->groups_sem); 2688 up_read(&space_info->groups_sem);
2688 goto search; 2689 goto search;
@@ -2693,7 +2694,6 @@ loop:
2693 if (!ins->objectid && (empty_size || allowed_chunk_alloc)) { 2694 if (!ins->objectid && (empty_size || allowed_chunk_alloc)) {
2694 int try_again = empty_size; 2695 int try_again = empty_size;
2695 2696
2696 total_needed -= empty_size;
2697 empty_size = 0; 2697 empty_size = 0;
2698 2698
2699 if (allowed_chunk_alloc) { 2699 if (allowed_chunk_alloc) {
@@ -5782,7 +5782,7 @@ int btrfs_read_block_groups(struct btrfs_root *root)
5782 5782
5783 atomic_set(&cache->count, 1); 5783 atomic_set(&cache->count, 1);
5784 spin_lock_init(&cache->lock); 5784 spin_lock_init(&cache->lock);
5785 mutex_init(&cache->alloc_mutex); 5785 spin_lock_init(&cache->tree_lock);
5786 mutex_init(&cache->cache_mutex); 5786 mutex_init(&cache->cache_mutex);
5787 INIT_LIST_HEAD(&cache->list); 5787 INIT_LIST_HEAD(&cache->list);
5788 read_extent_buffer(leaf, &cache->item, 5788 read_extent_buffer(leaf, &cache->item,
@@ -5838,7 +5838,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
5838 cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; 5838 cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
5839 atomic_set(&cache->count, 1); 5839 atomic_set(&cache->count, 1);
5840 spin_lock_init(&cache->lock); 5840 spin_lock_init(&cache->lock);
5841 mutex_init(&cache->alloc_mutex); 5841 spin_lock_init(&cache->tree_lock);
5842 mutex_init(&cache->cache_mutex); 5842 mutex_init(&cache->cache_mutex);
5843 INIT_LIST_HEAD(&cache->list); 5843 INIT_LIST_HEAD(&cache->list);
5844 5844