diff options
author | Josef Bacik <jbacik@redhat.com> | 2009-04-03 10:14:18 -0400 |
---|---|---|
committer | Chris Mason <chris.mason@oracle.com> | 2009-04-03 10:14:18 -0400 |
commit | 6226cb0a5ea3f6289883753c15d53f48a6c6bbfb (patch) | |
tree | 819765cd5a5816017580f638c4b2a3e7f6354aea /fs | |
parent | 2552d17e328044d1811cae733087a1fb9aac2eb6 (diff) |
Btrfs: kill the block group alloc mutex
This patch removes the block group alloc mutex used to protect the free space
tree for allocations and replaces it with a spin lock which is used only to
protect the free space rb tree. This means we only take the lock when we are
directly manipulating the tree, which makes us a touch faster with
multi-threaded workloads.
This patch also gets rid of btrfs_find_free_space and replaces it with
btrfs_find_space_for_alloc, which takes the number of bytes you want to
allocate, and empty_size, which is used to indicate how much free space should
be at the end of the allocation.
It will return an offset for the allocator to use. If we don't end up using it
we _must_ call btrfs_add_free_space to put it back. This is the tradeoff to
kill the alloc_mutex, since we need to make sure nobody else comes along and
takes our space.
Signed-off-by: Josef Bacik <jbacik@redhat.com>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/btrfs/ctree.h | 11 | ||||
-rw-r--r-- | fs/btrfs/extent-tree.c | 46 | ||||
-rw-r--r-- | fs/btrfs/free-space-cache.c | 183 |
3 files changed, 83 insertions, 157 deletions
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index f48905ee5240..527744561f93 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
@@ -644,7 +644,6 @@ struct btrfs_block_group_cache { | |||
644 | struct btrfs_key key; | 644 | struct btrfs_key key; |
645 | struct btrfs_block_group_item item; | 645 | struct btrfs_block_group_item item; |
646 | spinlock_t lock; | 646 | spinlock_t lock; |
647 | struct mutex alloc_mutex; | ||
648 | struct mutex cache_mutex; | 647 | struct mutex cache_mutex; |
649 | u64 pinned; | 648 | u64 pinned; |
650 | u64 reserved; | 649 | u64 reserved; |
@@ -656,6 +655,7 @@ struct btrfs_block_group_cache { | |||
656 | struct btrfs_space_info *space_info; | 655 | struct btrfs_space_info *space_info; |
657 | 656 | ||
658 | /* free space cache stuff */ | 657 | /* free space cache stuff */ |
658 | spinlock_t tree_lock; | ||
659 | struct rb_root free_space_bytes; | 659 | struct rb_root free_space_bytes; |
660 | struct rb_root free_space_offset; | 660 | struct rb_root free_space_offset; |
661 | 661 | ||
@@ -2177,17 +2177,12 @@ int btrfs_acl_chmod(struct inode *inode); | |||
2177 | /* free-space-cache.c */ | 2177 | /* free-space-cache.c */ |
2178 | int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, | 2178 | int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, |
2179 | u64 bytenr, u64 size); | 2179 | u64 bytenr, u64 size); |
2180 | int btrfs_add_free_space_lock(struct btrfs_block_group_cache *block_group, | ||
2181 | u64 offset, u64 bytes); | ||
2182 | int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, | 2180 | int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, |
2183 | u64 bytenr, u64 size); | 2181 | u64 bytenr, u64 size); |
2184 | int btrfs_remove_free_space_lock(struct btrfs_block_group_cache *block_group, | ||
2185 | u64 offset, u64 bytes); | ||
2186 | void btrfs_remove_free_space_cache(struct btrfs_block_group_cache | 2182 | void btrfs_remove_free_space_cache(struct btrfs_block_group_cache |
2187 | *block_group); | 2183 | *block_group); |
2188 | struct btrfs_free_space *btrfs_find_free_space(struct btrfs_block_group_cache | 2184 | u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, |
2189 | *block_group, u64 offset, | 2185 | u64 offset, u64 bytes, u64 empty_size); |
2190 | u64 bytes); | ||
2191 | void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, | 2186 | void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, |
2192 | u64 bytes); | 2187 | u64 bytes); |
2193 | u64 btrfs_block_group_free_space(struct btrfs_block_group_cache *block_group); | 2188 | u64 btrfs_block_group_free_space(struct btrfs_block_group_cache *block_group); |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index daff751ea6e2..6880a271975a 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -2554,7 +2554,6 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans, | |||
2554 | { | 2554 | { |
2555 | int ret = 0; | 2555 | int ret = 0; |
2556 | struct btrfs_root *root = orig_root->fs_info->extent_root; | 2556 | struct btrfs_root *root = orig_root->fs_info->extent_root; |
2557 | u64 total_needed = num_bytes; | ||
2558 | u64 *last_ptr = NULL; | 2557 | u64 *last_ptr = NULL; |
2559 | struct btrfs_block_group_cache *block_group = NULL; | 2558 | struct btrfs_block_group_cache *block_group = NULL; |
2560 | int empty_cluster = 2 * 1024 * 1024; | 2559 | int empty_cluster = 2 * 1024 * 1024; |
@@ -2597,7 +2596,6 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans, | |||
2597 | block_group = btrfs_lookup_block_group(root->fs_info, | 2596 | block_group = btrfs_lookup_block_group(root->fs_info, |
2598 | search_start); | 2597 | search_start); |
2599 | if (block_group && block_group_bits(block_group, data)) { | 2598 | if (block_group && block_group_bits(block_group, data)) { |
2600 | total_needed += empty_size; | ||
2601 | down_read(&space_info->groups_sem); | 2599 | down_read(&space_info->groups_sem); |
2602 | goto have_block_group; | 2600 | goto have_block_group; |
2603 | } else if (block_group) { | 2601 | } else if (block_group) { |
@@ -2611,7 +2609,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans, | |||
2611 | search: | 2609 | search: |
2612 | down_read(&space_info->groups_sem); | 2610 | down_read(&space_info->groups_sem); |
2613 | list_for_each_entry(block_group, &space_info->block_groups, list) { | 2611 | list_for_each_entry(block_group, &space_info->block_groups, list) { |
2614 | struct btrfs_free_space *free_space; | 2612 | u64 offset; |
2615 | 2613 | ||
2616 | atomic_inc(&block_group->count); | 2614 | atomic_inc(&block_group->count); |
2617 | search_start = block_group->key.objectid; | 2615 | search_start = block_group->key.objectid; |
@@ -2627,62 +2625,65 @@ have_block_group: | |||
2627 | } | 2625 | } |
2628 | } | 2626 | } |
2629 | 2627 | ||
2630 | mutex_lock(&block_group->alloc_mutex); | ||
2631 | |||
2632 | if (unlikely(block_group->ro)) | 2628 | if (unlikely(block_group->ro)) |
2633 | goto loop; | 2629 | goto loop; |
2634 | 2630 | ||
2635 | free_space = btrfs_find_free_space(block_group, search_start, | 2631 | offset = btrfs_find_space_for_alloc(block_group, search_start, |
2636 | total_needed); | 2632 | num_bytes, empty_size); |
2637 | if (!free_space) | 2633 | if (!offset) |
2638 | goto loop; | 2634 | goto loop; |
2639 | 2635 | ||
2640 | search_start = stripe_align(root, free_space->offset); | 2636 | search_start = stripe_align(root, offset); |
2641 | 2637 | ||
2642 | /* move on to the next group */ | 2638 | /* move on to the next group */ |
2643 | if (search_start + num_bytes >= search_end) | 2639 | if (search_start + num_bytes >= search_end) { |
2640 | btrfs_add_free_space(block_group, offset, num_bytes); | ||
2644 | goto loop; | 2641 | goto loop; |
2642 | } | ||
2645 | 2643 | ||
2646 | /* move on to the next group */ | 2644 | /* move on to the next group */ |
2647 | if (search_start + num_bytes > | 2645 | if (search_start + num_bytes > |
2648 | block_group->key.objectid + block_group->key.offset) | 2646 | block_group->key.objectid + block_group->key.offset) { |
2647 | btrfs_add_free_space(block_group, offset, num_bytes); | ||
2649 | goto loop; | 2648 | goto loop; |
2649 | } | ||
2650 | 2650 | ||
2651 | if (using_hint && search_start > hint_byte) | 2651 | if (using_hint && search_start > hint_byte) { |
2652 | btrfs_add_free_space(block_group, offset, num_bytes); | ||
2652 | goto loop; | 2653 | goto loop; |
2654 | } | ||
2653 | 2655 | ||
2654 | if (exclude_nr > 0 && | 2656 | if (exclude_nr > 0 && |
2655 | (search_start + num_bytes > exclude_start && | 2657 | (search_start + num_bytes > exclude_start && |
2656 | search_start < exclude_start + exclude_nr)) { | 2658 | search_start < exclude_start + exclude_nr)) { |
2657 | search_start = exclude_start + exclude_nr; | 2659 | search_start = exclude_start + exclude_nr; |
2658 | 2660 | ||
2661 | btrfs_add_free_space(block_group, offset, num_bytes); | ||
2659 | /* | 2662 | /* |
2660 | * if search_start is still in this block group | 2663 | * if search_start is still in this block group |
2661 | * then we just re-search this block group | 2664 | * then we just re-search this block group |
2662 | */ | 2665 | */ |
2663 | if (search_start >= block_group->key.objectid && | 2666 | if (search_start >= block_group->key.objectid && |
2664 | search_start < (block_group->key.objectid + | 2667 | search_start < (block_group->key.objectid + |
2665 | block_group->key.offset)) { | 2668 | block_group->key.offset)) |
2666 | mutex_unlock(&block_group->alloc_mutex); | ||
2667 | goto have_block_group; | 2669 | goto have_block_group; |
2668 | } | ||
2669 | goto loop; | 2670 | goto loop; |
2670 | } | 2671 | } |
2671 | 2672 | ||
2672 | ins->objectid = search_start; | 2673 | ins->objectid = search_start; |
2673 | ins->offset = num_bytes; | 2674 | ins->offset = num_bytes; |
2674 | 2675 | ||
2675 | btrfs_remove_free_space_lock(block_group, search_start, | 2676 | if (offset < search_start) |
2676 | num_bytes); | 2677 | btrfs_add_free_space(block_group, offset, |
2678 | search_start - offset); | ||
2679 | BUG_ON(offset > search_start); | ||
2680 | |||
2677 | /* we are all good, lets return */ | 2681 | /* we are all good, lets return */ |
2678 | mutex_unlock(&block_group->alloc_mutex); | ||
2679 | break; | 2682 | break; |
2680 | loop: | 2683 | loop: |
2681 | mutex_unlock(&block_group->alloc_mutex); | ||
2682 | put_block_group(block_group); | 2684 | put_block_group(block_group); |
2683 | if (using_hint) { | 2685 | if (using_hint) { |
2684 | empty_size += empty_cluster; | 2686 | empty_size += empty_cluster; |
2685 | total_needed += empty_cluster; | ||
2686 | using_hint = 0; | 2687 | using_hint = 0; |
2687 | up_read(&space_info->groups_sem); | 2688 | up_read(&space_info->groups_sem); |
2688 | goto search; | 2689 | goto search; |
@@ -2693,7 +2694,6 @@ loop: | |||
2693 | if (!ins->objectid && (empty_size || allowed_chunk_alloc)) { | 2694 | if (!ins->objectid && (empty_size || allowed_chunk_alloc)) { |
2694 | int try_again = empty_size; | 2695 | int try_again = empty_size; |
2695 | 2696 | ||
2696 | total_needed -= empty_size; | ||
2697 | empty_size = 0; | 2697 | empty_size = 0; |
2698 | 2698 | ||
2699 | if (allowed_chunk_alloc) { | 2699 | if (allowed_chunk_alloc) { |
@@ -5782,7 +5782,7 @@ int btrfs_read_block_groups(struct btrfs_root *root) | |||
5782 | 5782 | ||
5783 | atomic_set(&cache->count, 1); | 5783 | atomic_set(&cache->count, 1); |
5784 | spin_lock_init(&cache->lock); | 5784 | spin_lock_init(&cache->lock); |
5785 | mutex_init(&cache->alloc_mutex); | 5785 | spin_lock_init(&cache->tree_lock); |
5786 | mutex_init(&cache->cache_mutex); | 5786 | mutex_init(&cache->cache_mutex); |
5787 | INIT_LIST_HEAD(&cache->list); | 5787 | INIT_LIST_HEAD(&cache->list); |
5788 | read_extent_buffer(leaf, &cache->item, | 5788 | read_extent_buffer(leaf, &cache->item, |
@@ -5838,7 +5838,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, | |||
5838 | cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; | 5838 | cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; |
5839 | atomic_set(&cache->count, 1); | 5839 | atomic_set(&cache->count, 1); |
5840 | spin_lock_init(&cache->lock); | 5840 | spin_lock_init(&cache->lock); |
5841 | mutex_init(&cache->alloc_mutex); | 5841 | spin_lock_init(&cache->tree_lock); |
5842 | mutex_init(&cache->cache_mutex); | 5842 | mutex_init(&cache->cache_mutex); |
5843 | INIT_LIST_HEAD(&cache->list); | 5843 | INIT_LIST_HEAD(&cache->list); |
5844 | 5844 | ||
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 69b023ff6f72..df19b60eef61 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c | |||
@@ -182,6 +182,7 @@ static int link_free_space(struct btrfs_block_group_cache *block_group, | |||
182 | int ret = 0; | 182 | int ret = 0; |
183 | 183 | ||
184 | 184 | ||
185 | BUG_ON(!info->bytes); | ||
185 | ret = tree_insert_offset(&block_group->free_space_offset, info->offset, | 186 | ret = tree_insert_offset(&block_group->free_space_offset, info->offset, |
186 | &info->offset_index); | 187 | &info->offset_index); |
187 | if (ret) | 188 | if (ret) |
@@ -195,14 +196,23 @@ static int link_free_space(struct btrfs_block_group_cache *block_group, | |||
195 | return ret; | 196 | return ret; |
196 | } | 197 | } |
197 | 198 | ||
198 | static int __btrfs_add_free_space(struct btrfs_block_group_cache *block_group, | 199 | int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, |
199 | u64 offset, u64 bytes) | 200 | u64 offset, u64 bytes) |
200 | { | 201 | { |
201 | struct btrfs_free_space *right_info; | 202 | struct btrfs_free_space *right_info; |
202 | struct btrfs_free_space *left_info; | 203 | struct btrfs_free_space *left_info; |
203 | struct btrfs_free_space *info = NULL; | 204 | struct btrfs_free_space *info = NULL; |
204 | int ret = 0; | 205 | int ret = 0; |
205 | 206 | ||
207 | info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS); | ||
208 | if (!info) | ||
209 | return -ENOMEM; | ||
210 | |||
211 | info->offset = offset; | ||
212 | info->bytes = bytes; | ||
213 | |||
214 | spin_lock(&block_group->tree_lock); | ||
215 | |||
206 | /* | 216 | /* |
207 | * first we want to see if there is free space adjacent to the range we | 217 | * first we want to see if there is free space adjacent to the range we |
208 | * are adding, if there is remove that struct and add a new one to | 218 | * are adding, if there is remove that struct and add a new one to |
@@ -215,42 +225,23 @@ static int __btrfs_add_free_space(struct btrfs_block_group_cache *block_group, | |||
215 | 225 | ||
216 | if (right_info) { | 226 | if (right_info) { |
217 | unlink_free_space(block_group, right_info); | 227 | unlink_free_space(block_group, right_info); |
218 | info = right_info; | 228 | info->bytes += right_info->bytes; |
219 | info->offset = offset; | 229 | kfree(right_info); |
220 | info->bytes += bytes; | ||
221 | } | 230 | } |
222 | 231 | ||
223 | if (left_info && left_info->offset + left_info->bytes == offset) { | 232 | if (left_info && left_info->offset + left_info->bytes == offset) { |
224 | unlink_free_space(block_group, left_info); | 233 | unlink_free_space(block_group, left_info); |
225 | 234 | info->offset = left_info->offset; | |
226 | if (info) { | 235 | info->bytes += left_info->bytes; |
227 | info->offset = left_info->offset; | 236 | kfree(left_info); |
228 | info->bytes += left_info->bytes; | ||
229 | kfree(left_info); | ||
230 | } else { | ||
231 | info = left_info; | ||
232 | info->bytes += bytes; | ||
233 | } | ||
234 | } | ||
235 | |||
236 | if (info) { | ||
237 | ret = link_free_space(block_group, info); | ||
238 | if (ret) | ||
239 | kfree(info); | ||
240 | goto out; | ||
241 | } | 237 | } |
242 | 238 | ||
243 | info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS); | ||
244 | if (!info) | ||
245 | return -ENOMEM; | ||
246 | |||
247 | info->offset = offset; | ||
248 | info->bytes = bytes; | ||
249 | |||
250 | ret = link_free_space(block_group, info); | 239 | ret = link_free_space(block_group, info); |
251 | if (ret) | 240 | if (ret) |
252 | kfree(info); | 241 | kfree(info); |
253 | out: | 242 | |
243 | spin_unlock(&block_group->tree_lock); | ||
244 | |||
254 | if (ret) { | 245 | if (ret) { |
255 | printk(KERN_ERR "btrfs: unable to add free space :%d\n", ret); | 246 | printk(KERN_ERR "btrfs: unable to add free space :%d\n", ret); |
256 | if (ret == -EEXIST) | 247 | if (ret == -EEXIST) |
@@ -260,17 +251,16 @@ out: | |||
260 | return ret; | 251 | return ret; |
261 | } | 252 | } |
262 | 253 | ||
263 | static int | 254 | int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, |
264 | __btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, | 255 | u64 offset, u64 bytes) |
265 | u64 offset, u64 bytes) | ||
266 | { | 256 | { |
267 | struct btrfs_free_space *info; | 257 | struct btrfs_free_space *info; |
268 | int ret = 0; | 258 | int ret = 0; |
269 | 259 | ||
270 | BUG_ON(!block_group->cached); | 260 | spin_lock(&block_group->tree_lock); |
261 | |||
271 | info = tree_search_offset(&block_group->free_space_offset, offset, 0, | 262 | info = tree_search_offset(&block_group->free_space_offset, offset, 0, |
272 | 1); | 263 | 1); |
273 | |||
274 | if (info && info->offset == offset) { | 264 | if (info && info->offset == offset) { |
275 | if (info->bytes < bytes) { | 265 | if (info->bytes < bytes) { |
276 | printk(KERN_ERR "Found free space at %llu, size %llu," | 266 | printk(KERN_ERR "Found free space at %llu, size %llu," |
@@ -280,12 +270,14 @@ __btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, | |||
280 | (unsigned long long)bytes); | 270 | (unsigned long long)bytes); |
281 | WARN_ON(1); | 271 | WARN_ON(1); |
282 | ret = -EINVAL; | 272 | ret = -EINVAL; |
273 | spin_unlock(&block_group->tree_lock); | ||
283 | goto out; | 274 | goto out; |
284 | } | 275 | } |
285 | unlink_free_space(block_group, info); | 276 | unlink_free_space(block_group, info); |
286 | 277 | ||
287 | if (info->bytes == bytes) { | 278 | if (info->bytes == bytes) { |
288 | kfree(info); | 279 | kfree(info); |
280 | spin_unlock(&block_group->tree_lock); | ||
289 | goto out; | 281 | goto out; |
290 | } | 282 | } |
291 | 283 | ||
@@ -293,6 +285,7 @@ __btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, | |||
293 | info->bytes -= bytes; | 285 | info->bytes -= bytes; |
294 | 286 | ||
295 | ret = link_free_space(block_group, info); | 287 | ret = link_free_space(block_group, info); |
288 | spin_unlock(&block_group->tree_lock); | ||
296 | BUG_ON(ret); | 289 | BUG_ON(ret); |
297 | } else if (info && info->offset < offset && | 290 | } else if (info && info->offset < offset && |
298 | info->offset + info->bytes >= offset + bytes) { | 291 | info->offset + info->bytes >= offset + bytes) { |
@@ -318,14 +311,15 @@ __btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, | |||
318 | */ | 311 | */ |
319 | kfree(info); | 312 | kfree(info); |
320 | } | 313 | } |
321 | 314 | spin_unlock(&block_group->tree_lock); | |
322 | /* step two, insert a new info struct to cover anything | 315 | /* step two, insert a new info struct to cover anything |
323 | * before the hole | 316 | * before the hole |
324 | */ | 317 | */ |
325 | ret = __btrfs_add_free_space(block_group, old_start, | 318 | ret = btrfs_add_free_space(block_group, old_start, |
326 | offset - old_start); | 319 | offset - old_start); |
327 | BUG_ON(ret); | 320 | BUG_ON(ret); |
328 | } else { | 321 | } else { |
322 | spin_unlock(&block_group->tree_lock); | ||
329 | if (!info) { | 323 | if (!info) { |
330 | printk(KERN_ERR "couldn't find space %llu to free\n", | 324 | printk(KERN_ERR "couldn't find space %llu to free\n", |
331 | (unsigned long long)offset); | 325 | (unsigned long long)offset); |
@@ -344,50 +338,6 @@ out: | |||
344 | return ret; | 338 | return ret; |
345 | } | 339 | } |
346 | 340 | ||
347 | int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, | ||
348 | u64 offset, u64 bytes) | ||
349 | { | ||
350 | int ret; | ||
351 | |||
352 | mutex_lock(&block_group->alloc_mutex); | ||
353 | ret = __btrfs_add_free_space(block_group, offset, bytes); | ||
354 | mutex_unlock(&block_group->alloc_mutex); | ||
355 | |||
356 | return ret; | ||
357 | } | ||
358 | |||
359 | int btrfs_add_free_space_lock(struct btrfs_block_group_cache *block_group, | ||
360 | u64 offset, u64 bytes) | ||
361 | { | ||
362 | int ret; | ||
363 | |||
364 | ret = __btrfs_add_free_space(block_group, offset, bytes); | ||
365 | |||
366 | return ret; | ||
367 | } | ||
368 | |||
369 | int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, | ||
370 | u64 offset, u64 bytes) | ||
371 | { | ||
372 | int ret = 0; | ||
373 | |||
374 | mutex_lock(&block_group->alloc_mutex); | ||
375 | ret = __btrfs_remove_free_space(block_group, offset, bytes); | ||
376 | mutex_unlock(&block_group->alloc_mutex); | ||
377 | |||
378 | return ret; | ||
379 | } | ||
380 | |||
381 | int btrfs_remove_free_space_lock(struct btrfs_block_group_cache *block_group, | ||
382 | u64 offset, u64 bytes) | ||
383 | { | ||
384 | int ret; | ||
385 | |||
386 | ret = __btrfs_remove_free_space(block_group, offset, bytes); | ||
387 | |||
388 | return ret; | ||
389 | } | ||
390 | |||
391 | void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, | 341 | void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, |
392 | u64 bytes) | 342 | u64 bytes) |
393 | { | 343 | { |
@@ -426,63 +376,44 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) | |||
426 | struct btrfs_free_space *info; | 376 | struct btrfs_free_space *info; |
427 | struct rb_node *node; | 377 | struct rb_node *node; |
428 | 378 | ||
429 | mutex_lock(&block_group->alloc_mutex); | 379 | spin_lock(&block_group->tree_lock); |
430 | while ((node = rb_last(&block_group->free_space_bytes)) != NULL) { | 380 | while ((node = rb_last(&block_group->free_space_bytes)) != NULL) { |
431 | info = rb_entry(node, struct btrfs_free_space, bytes_index); | 381 | info = rb_entry(node, struct btrfs_free_space, bytes_index); |
432 | unlink_free_space(block_group, info); | 382 | unlink_free_space(block_group, info); |
433 | kfree(info); | 383 | kfree(info); |
434 | if (need_resched()) { | 384 | if (need_resched()) { |
435 | mutex_unlock(&block_group->alloc_mutex); | 385 | spin_unlock(&block_group->tree_lock); |
436 | cond_resched(); | 386 | cond_resched(); |
437 | mutex_lock(&block_group->alloc_mutex); | 387 | spin_lock(&block_group->tree_lock); |
438 | } | 388 | } |
439 | } | 389 | } |
440 | mutex_unlock(&block_group->alloc_mutex); | 390 | spin_unlock(&block_group->tree_lock); |
441 | } | ||
442 | |||
443 | #if 0 | ||
444 | static struct btrfs_free_space *btrfs_find_free_space_offset(struct | ||
445 | btrfs_block_group_cache | ||
446 | *block_group, u64 offset, | ||
447 | u64 bytes) | ||
448 | { | ||
449 | struct btrfs_free_space *ret; | ||
450 | |||
451 | mutex_lock(&block_group->alloc_mutex); | ||
452 | ret = tree_search_offset(&block_group->free_space_offset, offset, | ||
453 | bytes, 0); | ||
454 | mutex_unlock(&block_group->alloc_mutex); | ||
455 | |||
456 | return ret; | ||
457 | } | 391 | } |
458 | 392 | ||
459 | static struct btrfs_free_space *btrfs_find_free_space_bytes(struct | 393 | u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, |
460 | btrfs_block_group_cache | 394 | u64 offset, u64 bytes, u64 empty_size) |
461 | *block_group, u64 offset, | ||
462 | u64 bytes) | ||
463 | { | 395 | { |
464 | struct btrfs_free_space *ret; | 396 | struct btrfs_free_space *entry = NULL; |
465 | 397 | u64 ret = 0; | |
466 | mutex_lock(&block_group->alloc_mutex); | ||
467 | |||
468 | ret = tree_search_bytes(&block_group->free_space_bytes, offset, bytes); | ||
469 | mutex_unlock(&block_group->alloc_mutex); | ||
470 | |||
471 | return ret; | ||
472 | } | ||
473 | #endif | ||
474 | |||
475 | struct btrfs_free_space *btrfs_find_free_space(struct btrfs_block_group_cache | ||
476 | *block_group, u64 offset, | ||
477 | u64 bytes) | ||
478 | { | ||
479 | struct btrfs_free_space *ret = NULL; | ||
480 | 398 | ||
481 | ret = tree_search_offset(&block_group->free_space_offset, offset, | 399 | spin_lock(&block_group->tree_lock); |
482 | bytes, 1); | 400 | entry = tree_search_offset(&block_group->free_space_offset, offset, |
483 | if (!ret) | 401 | bytes + empty_size, 1); |
484 | ret = tree_search_bytes(&block_group->free_space_bytes, | 402 | if (!entry) |
485 | offset, bytes); | 403 | entry = tree_search_bytes(&block_group->free_space_bytes, |
404 | offset, bytes + empty_size); | ||
405 | if (entry) { | ||
406 | unlink_free_space(block_group, entry); | ||
407 | ret = entry->offset; | ||
408 | entry->offset += bytes; | ||
409 | entry->bytes -= bytes; | ||
410 | |||
411 | if (!entry->bytes) | ||
412 | kfree(entry); | ||
413 | else | ||
414 | link_free_space(block_group, entry); | ||
415 | } | ||
416 | spin_unlock(&block_group->tree_lock); | ||
486 | 417 | ||
487 | return ret; | 418 | return ret; |
488 | } | 419 | } |