diff options
Diffstat (limited to 'fs/btrfs/free-space-cache.c')
-rw-r--r-- | fs/btrfs/free-space-cache.c | 92 |
1 files changed, 66 insertions, 26 deletions
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 96241f01fa0a..f4926c0f3c8c 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c | |||
@@ -184,8 +184,8 @@ static int link_free_space(struct btrfs_block_group_cache *block_group, | |||
184 | return ret; | 184 | return ret; |
185 | } | 185 | } |
186 | 186 | ||
187 | int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, | 187 | static int __btrfs_add_free_space(struct btrfs_block_group_cache *block_group, |
188 | u64 offset, u64 bytes) | 188 | u64 offset, u64 bytes) |
189 | { | 189 | { |
190 | struct btrfs_free_space *right_info; | 190 | struct btrfs_free_space *right_info; |
191 | struct btrfs_free_space *left_info; | 191 | struct btrfs_free_space *left_info; |
@@ -202,8 +202,6 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, | |||
202 | * are adding, if there is remove that struct and add a new one to | 202 | * are adding, if there is remove that struct and add a new one to |
203 | * cover the entire range | 203 | * cover the entire range |
204 | */ | 204 | */ |
205 | spin_lock(&block_group->lock); | ||
206 | |||
207 | right_info = tree_search_offset(&block_group->free_space_offset, | 205 | right_info = tree_search_offset(&block_group->free_space_offset, |
208 | offset+bytes, 0, 1); | 206 | offset+bytes, 0, 1); |
209 | left_info = tree_search_offset(&block_group->free_space_offset, | 207 | left_info = tree_search_offset(&block_group->free_space_offset, |
@@ -261,7 +259,6 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, | |||
261 | if (ret) | 259 | if (ret) |
262 | kfree(info); | 260 | kfree(info); |
263 | out: | 261 | out: |
264 | spin_unlock(&block_group->lock); | ||
265 | if (ret) { | 262 | if (ret) { |
266 | printk(KERN_ERR "btrfs: unable to add free space :%d\n", ret); | 263 | printk(KERN_ERR "btrfs: unable to add free space :%d\n", ret); |
267 | if (ret == -EEXIST) | 264 | if (ret == -EEXIST) |
@@ -274,13 +271,13 @@ out: | |||
274 | return ret; | 271 | return ret; |
275 | } | 272 | } |
276 | 273 | ||
277 | int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, | 274 | static int |
278 | u64 offset, u64 bytes) | 275 | __btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, |
276 | u64 offset, u64 bytes) | ||
279 | { | 277 | { |
280 | struct btrfs_free_space *info; | 278 | struct btrfs_free_space *info; |
281 | int ret = 0; | 279 | int ret = 0; |
282 | 280 | ||
283 | spin_lock(&block_group->lock); | ||
284 | info = tree_search_offset(&block_group->free_space_offset, offset, 0, | 281 | info = tree_search_offset(&block_group->free_space_offset, offset, 0, |
285 | 1); | 282 | 1); |
286 | 283 | ||
@@ -334,17 +331,63 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, | |||
334 | /* step two, insert a new info struct to cover anything | 331 | /* step two, insert a new info struct to cover anything |
335 | * before the hole | 332 | * before the hole |
336 | */ | 333 | */ |
337 | spin_unlock(&block_group->lock); | 334 | ret = __btrfs_add_free_space(block_group, old_start, |
338 | ret = btrfs_add_free_space(block_group, old_start, | 335 | offset - old_start); |
339 | offset - old_start); | ||
340 | BUG_ON(ret); | 336 | BUG_ON(ret); |
341 | goto out_nolock; | ||
342 | } else { | 337 | } else { |
343 | WARN_ON(1); | 338 | WARN_ON(1); |
344 | } | 339 | } |
345 | out: | 340 | out: |
346 | spin_unlock(&block_group->lock); | 341 | return ret; |
347 | out_nolock: | 342 | } |
343 | |||
344 | int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, | ||
345 | u64 offset, u64 bytes) | ||
346 | { | ||
347 | int ret; | ||
348 | struct btrfs_free_space *sp; | ||
349 | |||
350 | mutex_lock(&block_group->alloc_mutex); | ||
351 | ret = __btrfs_add_free_space(block_group, offset, bytes); | ||
352 | sp = tree_search_offset(&block_group->free_space_offset, offset, 0, 1); | ||
353 | BUG_ON(!sp); | ||
354 | mutex_unlock(&block_group->alloc_mutex); | ||
355 | |||
356 | return ret; | ||
357 | } | ||
358 | |||
359 | int btrfs_add_free_space_lock(struct btrfs_block_group_cache *block_group, | ||
360 | u64 offset, u64 bytes) | ||
361 | { | ||
362 | int ret; | ||
363 | struct btrfs_free_space *sp; | ||
364 | |||
365 | ret = __btrfs_add_free_space(block_group, offset, bytes); | ||
366 | sp = tree_search_offset(&block_group->free_space_offset, offset, 0, 1); | ||
367 | BUG_ON(!sp); | ||
368 | |||
369 | return ret; | ||
370 | } | ||
371 | |||
372 | int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, | ||
373 | u64 offset, u64 bytes) | ||
374 | { | ||
375 | int ret = 0; | ||
376 | |||
377 | mutex_lock(&block_group->alloc_mutex); | ||
378 | ret = __btrfs_remove_free_space(block_group, offset, bytes); | ||
379 | mutex_unlock(&block_group->alloc_mutex); | ||
380 | |||
381 | return ret; | ||
382 | } | ||
383 | |||
384 | int btrfs_remove_free_space_lock(struct btrfs_block_group_cache *block_group, | ||
385 | u64 offset, u64 bytes) | ||
386 | { | ||
387 | int ret; | ||
388 | |||
389 | ret = __btrfs_remove_free_space(block_group, offset, bytes); | ||
390 | |||
348 | return ret; | 391 | return ret; |
349 | } | 392 | } |
350 | 393 | ||
@@ -386,18 +429,18 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) | |||
386 | struct btrfs_free_space *info; | 429 | struct btrfs_free_space *info; |
387 | struct rb_node *node; | 430 | struct rb_node *node; |
388 | 431 | ||
389 | spin_lock(&block_group->lock); | 432 | mutex_lock(&block_group->alloc_mutex); |
390 | while ((node = rb_last(&block_group->free_space_bytes)) != NULL) { | 433 | while ((node = rb_last(&block_group->free_space_bytes)) != NULL) { |
391 | info = rb_entry(node, struct btrfs_free_space, bytes_index); | 434 | info = rb_entry(node, struct btrfs_free_space, bytes_index); |
392 | unlink_free_space(block_group, info); | 435 | unlink_free_space(block_group, info); |
393 | kfree(info); | 436 | kfree(info); |
394 | if (need_resched()) { | 437 | if (need_resched()) { |
395 | spin_unlock(&block_group->lock); | 438 | mutex_unlock(&block_group->alloc_mutex); |
396 | cond_resched(); | 439 | cond_resched(); |
397 | spin_lock(&block_group->lock); | 440 | mutex_lock(&block_group->alloc_mutex); |
398 | } | 441 | } |
399 | } | 442 | } |
400 | spin_unlock(&block_group->lock); | 443 | mutex_unlock(&block_group->alloc_mutex); |
401 | } | 444 | } |
402 | 445 | ||
403 | struct btrfs_free_space *btrfs_find_free_space_offset(struct | 446 | struct btrfs_free_space *btrfs_find_free_space_offset(struct |
@@ -407,10 +450,10 @@ struct btrfs_free_space *btrfs_find_free_space_offset(struct | |||
407 | { | 450 | { |
408 | struct btrfs_free_space *ret; | 451 | struct btrfs_free_space *ret; |
409 | 452 | ||
410 | spin_lock(&block_group->lock); | 453 | mutex_lock(&block_group->alloc_mutex); |
411 | ret = tree_search_offset(&block_group->free_space_offset, offset, | 454 | ret = tree_search_offset(&block_group->free_space_offset, offset, |
412 | bytes, 0); | 455 | bytes, 0); |
413 | spin_unlock(&block_group->lock); | 456 | mutex_unlock(&block_group->alloc_mutex); |
414 | 457 | ||
415 | return ret; | 458 | return ret; |
416 | } | 459 | } |
@@ -422,10 +465,10 @@ struct btrfs_free_space *btrfs_find_free_space_bytes(struct | |||
422 | { | 465 | { |
423 | struct btrfs_free_space *ret; | 466 | struct btrfs_free_space *ret; |
424 | 467 | ||
425 | spin_lock(&block_group->lock); | 468 | mutex_lock(&block_group->alloc_mutex); |
426 | 469 | ||
427 | ret = tree_search_bytes(&block_group->free_space_bytes, offset, bytes); | 470 | ret = tree_search_bytes(&block_group->free_space_bytes, offset, bytes); |
428 | spin_unlock(&block_group->lock); | 471 | mutex_unlock(&block_group->alloc_mutex); |
429 | 472 | ||
430 | return ret; | 473 | return ret; |
431 | } | 474 | } |
@@ -434,16 +477,13 @@ struct btrfs_free_space *btrfs_find_free_space(struct btrfs_block_group_cache | |||
434 | *block_group, u64 offset, | 477 | *block_group, u64 offset, |
435 | u64 bytes) | 478 | u64 bytes) |
436 | { | 479 | { |
437 | struct btrfs_free_space *ret; | 480 | struct btrfs_free_space *ret = NULL; |
438 | 481 | ||
439 | spin_lock(&block_group->lock); | ||
440 | ret = tree_search_offset(&block_group->free_space_offset, offset, | 482 | ret = tree_search_offset(&block_group->free_space_offset, offset, |
441 | bytes, 0); | 483 | bytes, 0); |
442 | if (!ret) | 484 | if (!ret) |
443 | ret = tree_search_bytes(&block_group->free_space_bytes, | 485 | ret = tree_search_bytes(&block_group->free_space_bytes, |
444 | offset, bytes); | 486 | offset, bytes); |
445 | 487 | ||
446 | spin_unlock(&block_group->lock); | ||
447 | |||
448 | return ret; | 488 | return ret; |
449 | } | 489 | } |