aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/free-space-cache.c
diff options
context:
space:
mode:
authorJosef Bacik <jbacik@redhat.com>2008-10-29 14:49:05 -0400
committerChris Mason <chris.mason@oracle.com>2008-10-29 14:49:05 -0400
commit2517920135b0d29e70453e5b03d70d7b94207df3 (patch)
treee0c526faa5c2c7bc3add340e5b7e8df26924dca9 /fs/btrfs/free-space-cache.c
parent80eb234af09dbe6c97b2e3d60a13ec391e98fbba (diff)
Btrfs: nuke fs wide allocation mutex V2
This patch removes the giant fs_info->alloc_mutex and replaces it with a bunch of little locks. There is now a pinned_mutex, which is used when messing with the pinned_extents extent io tree, and the extent_ins_mutex which is used with the pending_del and extent_ins extent io trees. The locking for the extent tree stuff was inspired by a patch that Yan Zheng wrote to fix a race condition, I cleaned it up some and changed the locking around a little bit, but the idea remains the same. Basically instead of holding the extent_ins_mutex throughout the processing of an extent on the extent_ins or pending_del trees, we just hold it while we're searching and when we clear the bits on those trees, and lock the extent for the duration of the operations on the extent. Also to keep from getting hung up waiting to lock an extent, I've added a try_lock_extent so if we cannot lock the extent, move on to the next one in the tree and we'll come back to that one. I have tested this heavily and it does not appear to break anything. This has to be applied on top of my find_free_extent redo patch. I tested this patch on top of Yan's space reblancing code and it worked fine. The only thing that has changed since the last version is I pulled out all my debugging stuff, apparently I forgot to run guilt refresh before I sent the last patch out. Thank you, Signed-off-by: Josef Bacik <jbacik@redhat.com>
Diffstat (limited to 'fs/btrfs/free-space-cache.c')
-rw-r--r--fs/btrfs/free-space-cache.c92
1 files changed, 66 insertions, 26 deletions
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 96241f01fa0a..f4926c0f3c8c 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -184,8 +184,8 @@ static int link_free_space(struct btrfs_block_group_cache *block_group,
184 return ret; 184 return ret;
185} 185}
186 186
187int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, 187static int __btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
188 u64 offset, u64 bytes) 188 u64 offset, u64 bytes)
189{ 189{
190 struct btrfs_free_space *right_info; 190 struct btrfs_free_space *right_info;
191 struct btrfs_free_space *left_info; 191 struct btrfs_free_space *left_info;
@@ -202,8 +202,6 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
202 * are adding, if there is remove that struct and add a new one to 202 * are adding, if there is remove that struct and add a new one to
203 * cover the entire range 203 * cover the entire range
204 */ 204 */
205 spin_lock(&block_group->lock);
206
207 right_info = tree_search_offset(&block_group->free_space_offset, 205 right_info = tree_search_offset(&block_group->free_space_offset,
208 offset+bytes, 0, 1); 206 offset+bytes, 0, 1);
209 left_info = tree_search_offset(&block_group->free_space_offset, 207 left_info = tree_search_offset(&block_group->free_space_offset,
@@ -261,7 +259,6 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
261 if (ret) 259 if (ret)
262 kfree(info); 260 kfree(info);
263out: 261out:
264 spin_unlock(&block_group->lock);
265 if (ret) { 262 if (ret) {
266 printk(KERN_ERR "btrfs: unable to add free space :%d\n", ret); 263 printk(KERN_ERR "btrfs: unable to add free space :%d\n", ret);
267 if (ret == -EEXIST) 264 if (ret == -EEXIST)
@@ -274,13 +271,13 @@ out:
274 return ret; 271 return ret;
275} 272}
276 273
277int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, 274static int
278 u64 offset, u64 bytes) 275__btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
276 u64 offset, u64 bytes)
279{ 277{
280 struct btrfs_free_space *info; 278 struct btrfs_free_space *info;
281 int ret = 0; 279 int ret = 0;
282 280
283 spin_lock(&block_group->lock);
284 info = tree_search_offset(&block_group->free_space_offset, offset, 0, 281 info = tree_search_offset(&block_group->free_space_offset, offset, 0,
285 1); 282 1);
286 283
@@ -334,17 +331,63 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
334 /* step two, insert a new info struct to cover anything 331 /* step two, insert a new info struct to cover anything
335 * before the hole 332 * before the hole
336 */ 333 */
337 spin_unlock(&block_group->lock); 334 ret = __btrfs_add_free_space(block_group, old_start,
338 ret = btrfs_add_free_space(block_group, old_start, 335 offset - old_start);
339 offset - old_start);
340 BUG_ON(ret); 336 BUG_ON(ret);
341 goto out_nolock;
342 } else { 337 } else {
343 WARN_ON(1); 338 WARN_ON(1);
344 } 339 }
345out: 340out:
346 spin_unlock(&block_group->lock); 341 return ret;
347out_nolock: 342}
343
344int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
345 u64 offset, u64 bytes)
346{
347 int ret;
348 struct btrfs_free_space *sp;
349
350 mutex_lock(&block_group->alloc_mutex);
351 ret = __btrfs_add_free_space(block_group, offset, bytes);
352 sp = tree_search_offset(&block_group->free_space_offset, offset, 0, 1);
353 BUG_ON(!sp);
354 mutex_unlock(&block_group->alloc_mutex);
355
356 return ret;
357}
358
359int btrfs_add_free_space_lock(struct btrfs_block_group_cache *block_group,
360 u64 offset, u64 bytes)
361{
362 int ret;
363 struct btrfs_free_space *sp;
364
365 ret = __btrfs_add_free_space(block_group, offset, bytes);
366 sp = tree_search_offset(&block_group->free_space_offset, offset, 0, 1);
367 BUG_ON(!sp);
368
369 return ret;
370}
371
372int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
373 u64 offset, u64 bytes)
374{
375 int ret = 0;
376
377 mutex_lock(&block_group->alloc_mutex);
378 ret = __btrfs_remove_free_space(block_group, offset, bytes);
379 mutex_unlock(&block_group->alloc_mutex);
380
381 return ret;
382}
383
384int btrfs_remove_free_space_lock(struct btrfs_block_group_cache *block_group,
385 u64 offset, u64 bytes)
386{
387 int ret;
388
389 ret = __btrfs_remove_free_space(block_group, offset, bytes);
390
348 return ret; 391 return ret;
349} 392}
350 393
@@ -386,18 +429,18 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
386 struct btrfs_free_space *info; 429 struct btrfs_free_space *info;
387 struct rb_node *node; 430 struct rb_node *node;
388 431
389 spin_lock(&block_group->lock); 432 mutex_lock(&block_group->alloc_mutex);
390 while ((node = rb_last(&block_group->free_space_bytes)) != NULL) { 433 while ((node = rb_last(&block_group->free_space_bytes)) != NULL) {
391 info = rb_entry(node, struct btrfs_free_space, bytes_index); 434 info = rb_entry(node, struct btrfs_free_space, bytes_index);
392 unlink_free_space(block_group, info); 435 unlink_free_space(block_group, info);
393 kfree(info); 436 kfree(info);
394 if (need_resched()) { 437 if (need_resched()) {
395 spin_unlock(&block_group->lock); 438 mutex_unlock(&block_group->alloc_mutex);
396 cond_resched(); 439 cond_resched();
397 spin_lock(&block_group->lock); 440 mutex_lock(&block_group->alloc_mutex);
398 } 441 }
399 } 442 }
400 spin_unlock(&block_group->lock); 443 mutex_unlock(&block_group->alloc_mutex);
401} 444}
402 445
403struct btrfs_free_space *btrfs_find_free_space_offset(struct 446struct btrfs_free_space *btrfs_find_free_space_offset(struct
@@ -407,10 +450,10 @@ struct btrfs_free_space *btrfs_find_free_space_offset(struct
407{ 450{
408 struct btrfs_free_space *ret; 451 struct btrfs_free_space *ret;
409 452
410 spin_lock(&block_group->lock); 453 mutex_lock(&block_group->alloc_mutex);
411 ret = tree_search_offset(&block_group->free_space_offset, offset, 454 ret = tree_search_offset(&block_group->free_space_offset, offset,
412 bytes, 0); 455 bytes, 0);
413 spin_unlock(&block_group->lock); 456 mutex_unlock(&block_group->alloc_mutex);
414 457
415 return ret; 458 return ret;
416} 459}
@@ -422,10 +465,10 @@ struct btrfs_free_space *btrfs_find_free_space_bytes(struct
422{ 465{
423 struct btrfs_free_space *ret; 466 struct btrfs_free_space *ret;
424 467
425 spin_lock(&block_group->lock); 468 mutex_lock(&block_group->alloc_mutex);
426 469
427 ret = tree_search_bytes(&block_group->free_space_bytes, offset, bytes); 470 ret = tree_search_bytes(&block_group->free_space_bytes, offset, bytes);
428 spin_unlock(&block_group->lock); 471 mutex_unlock(&block_group->alloc_mutex);
429 472
430 return ret; 473 return ret;
431} 474}
@@ -434,16 +477,13 @@ struct btrfs_free_space *btrfs_find_free_space(struct btrfs_block_group_cache
434 *block_group, u64 offset, 477 *block_group, u64 offset,
435 u64 bytes) 478 u64 bytes)
436{ 479{
437 struct btrfs_free_space *ret; 480 struct btrfs_free_space *ret = NULL;
438 481
439 spin_lock(&block_group->lock);
440 ret = tree_search_offset(&block_group->free_space_offset, offset, 482 ret = tree_search_offset(&block_group->free_space_offset, offset,
441 bytes, 0); 483 bytes, 0);
442 if (!ret) 484 if (!ret)
443 ret = tree_search_bytes(&block_group->free_space_bytes, 485 ret = tree_search_bytes(&block_group->free_space_bytes,
444 offset, bytes); 486 offset, bytes);
445 487
446 spin_unlock(&block_group->lock);
447
448 return ret; 488 return ret;
449} 489}