aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent-tree.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/extent-tree.c')
-rw-r--r--fs/btrfs/extent-tree.c199
1 files changed, 169 insertions, 30 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 227e5815d838..588ff9849873 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -320,11 +320,6 @@ static int caching_kthread(void *data)
320 if (!path) 320 if (!path)
321 return -ENOMEM; 321 return -ENOMEM;
322 322
323 exclude_super_stripes(extent_root, block_group);
324 spin_lock(&block_group->space_info->lock);
325 block_group->space_info->bytes_readonly += block_group->bytes_super;
326 spin_unlock(&block_group->space_info->lock);
327
328 last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET); 323 last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
329 324
330 /* 325 /*
@@ -467,8 +462,10 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
467 cache->cached = BTRFS_CACHE_NO; 462 cache->cached = BTRFS_CACHE_NO;
468 } 463 }
469 spin_unlock(&cache->lock); 464 spin_unlock(&cache->lock);
470 if (ret == 1) 465 if (ret == 1) {
466 free_excluded_extents(fs_info->extent_root, cache);
471 return 0; 467 return 0;
468 }
472 } 469 }
473 470
474 if (load_cache_only) 471 if (load_cache_only)
@@ -3089,7 +3086,7 @@ static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
3089 return btrfs_reduce_alloc_profile(root, flags); 3086 return btrfs_reduce_alloc_profile(root, flags);
3090} 3087}
3091 3088
3092static u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data) 3089u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3093{ 3090{
3094 u64 flags; 3091 u64 flags;
3095 3092
@@ -3161,8 +3158,12 @@ alloc:
3161 bytes + 2 * 1024 * 1024, 3158 bytes + 2 * 1024 * 1024,
3162 alloc_target, 0); 3159 alloc_target, 0);
3163 btrfs_end_transaction(trans, root); 3160 btrfs_end_transaction(trans, root);
3164 if (ret < 0) 3161 if (ret < 0) {
3165 return ret; 3162 if (ret != -ENOSPC)
3163 return ret;
3164 else
3165 goto commit_trans;
3166 }
3166 3167
3167 if (!data_sinfo) { 3168 if (!data_sinfo) {
3168 btrfs_set_inode_space_info(root, inode); 3169 btrfs_set_inode_space_info(root, inode);
@@ -3173,6 +3174,7 @@ alloc:
3173 spin_unlock(&data_sinfo->lock); 3174 spin_unlock(&data_sinfo->lock);
3174 3175
3175 /* commit the current transaction and try again */ 3176 /* commit the current transaction and try again */
3177commit_trans:
3176 if (!committed && !root->fs_info->open_ioctl_trans) { 3178 if (!committed && !root->fs_info->open_ioctl_trans) {
3177 committed = 1; 3179 committed = 1;
3178 trans = btrfs_join_transaction(root, 1); 3180 trans = btrfs_join_transaction(root, 1);
@@ -3339,8 +3341,10 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans,
3339 u64 reserved; 3341 u64 reserved;
3340 u64 max_reclaim; 3342 u64 max_reclaim;
3341 u64 reclaimed = 0; 3343 u64 reclaimed = 0;
3344 long time_left;
3342 int pause = 1; 3345 int pause = 1;
3343 int nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT; 3346 int nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
3347 int loops = 0;
3344 3348
3345 block_rsv = &root->fs_info->delalloc_block_rsv; 3349 block_rsv = &root->fs_info->delalloc_block_rsv;
3346 space_info = block_rsv->space_info; 3350 space_info = block_rsv->space_info;
@@ -3353,7 +3357,7 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans,
3353 3357
3354 max_reclaim = min(reserved, to_reclaim); 3358 max_reclaim = min(reserved, to_reclaim);
3355 3359
3356 while (1) { 3360 while (loops < 1024) {
3357 /* have the flusher threads jump in and do some IO */ 3361 /* have the flusher threads jump in and do some IO */
3358 smp_mb(); 3362 smp_mb();
3359 nr_pages = min_t(unsigned long, nr_pages, 3363 nr_pages = min_t(unsigned long, nr_pages,
@@ -3361,8 +3365,12 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans,
3361 writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages); 3365 writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages);
3362 3366
3363 spin_lock(&space_info->lock); 3367 spin_lock(&space_info->lock);
3364 if (reserved > space_info->bytes_reserved) 3368 if (reserved > space_info->bytes_reserved) {
3369 loops = 0;
3365 reclaimed += reserved - space_info->bytes_reserved; 3370 reclaimed += reserved - space_info->bytes_reserved;
3371 } else {
3372 loops++;
3373 }
3366 reserved = space_info->bytes_reserved; 3374 reserved = space_info->bytes_reserved;
3367 spin_unlock(&space_info->lock); 3375 spin_unlock(&space_info->lock);
3368 3376
@@ -3373,7 +3381,12 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans,
3373 return -EAGAIN; 3381 return -EAGAIN;
3374 3382
3375 __set_current_state(TASK_INTERRUPTIBLE); 3383 __set_current_state(TASK_INTERRUPTIBLE);
3376 schedule_timeout(pause); 3384 time_left = schedule_timeout(pause);
3385
3386 /* We were interrupted, exit */
3387 if (time_left)
3388 break;
3389
3377 pause <<= 1; 3390 pause <<= 1;
3378 if (pause > HZ / 10) 3391 if (pause > HZ / 10)
3379 pause = HZ / 10; 3392 pause = HZ / 10;
@@ -3583,8 +3596,20 @@ void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv,
3583 3596
3584 if (num_bytes > 0) { 3597 if (num_bytes > 0) {
3585 if (dest) { 3598 if (dest) {
3586 block_rsv_add_bytes(dest, num_bytes, 0); 3599 spin_lock(&dest->lock);
3587 } else { 3600 if (!dest->full) {
3601 u64 bytes_to_add;
3602
3603 bytes_to_add = dest->size - dest->reserved;
3604 bytes_to_add = min(num_bytes, bytes_to_add);
3605 dest->reserved += bytes_to_add;
3606 if (dest->reserved >= dest->size)
3607 dest->full = 1;
3608 num_bytes -= bytes_to_add;
3609 }
3610 spin_unlock(&dest->lock);
3611 }
3612 if (num_bytes) {
3588 spin_lock(&space_info->lock); 3613 spin_lock(&space_info->lock);
3589 space_info->bytes_reserved -= num_bytes; 3614 space_info->bytes_reserved -= num_bytes;
3590 spin_unlock(&space_info->lock); 3615 spin_unlock(&space_info->lock);
@@ -3721,11 +3746,6 @@ int btrfs_block_rsv_check(struct btrfs_trans_handle *trans,
3721 return 0; 3746 return 0;
3722 } 3747 }
3723 3748
3724 WARN_ON(1);
3725 printk(KERN_INFO"block_rsv size %llu reserved %llu freed %llu %llu\n",
3726 block_rsv->size, block_rsv->reserved,
3727 block_rsv->freed[0], block_rsv->freed[1]);
3728
3729 return -ENOSPC; 3749 return -ENOSPC;
3730} 3750}
3731 3751
@@ -4012,6 +4032,7 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
4012 4032
4013 num_bytes = ALIGN(num_bytes, root->sectorsize); 4033 num_bytes = ALIGN(num_bytes, root->sectorsize);
4014 atomic_dec(&BTRFS_I(inode)->outstanding_extents); 4034 atomic_dec(&BTRFS_I(inode)->outstanding_extents);
4035 WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents) < 0);
4015 4036
4016 spin_lock(&BTRFS_I(inode)->accounting_lock); 4037 spin_lock(&BTRFS_I(inode)->accounting_lock);
4017 nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents); 4038 nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents);
@@ -5355,7 +5376,7 @@ again:
5355 num_bytes, data, 1); 5376 num_bytes, data, 1);
5356 goto again; 5377 goto again;
5357 } 5378 }
5358 if (ret == -ENOSPC) { 5379 if (ret == -ENOSPC && btrfs_test_opt(root, ENOSPC_DEBUG)) {
5359 struct btrfs_space_info *sinfo; 5380 struct btrfs_space_info *sinfo;
5360 5381
5361 sinfo = __find_space_info(root->fs_info, data); 5382 sinfo = __find_space_info(root->fs_info, data);
@@ -5633,6 +5654,7 @@ use_block_rsv(struct btrfs_trans_handle *trans,
5633 struct btrfs_root *root, u32 blocksize) 5654 struct btrfs_root *root, u32 blocksize)
5634{ 5655{
5635 struct btrfs_block_rsv *block_rsv; 5656 struct btrfs_block_rsv *block_rsv;
5657 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
5636 int ret; 5658 int ret;
5637 5659
5638 block_rsv = get_block_rsv(trans, root); 5660 block_rsv = get_block_rsv(trans, root);
@@ -5640,14 +5662,39 @@ use_block_rsv(struct btrfs_trans_handle *trans,
5640 if (block_rsv->size == 0) { 5662 if (block_rsv->size == 0) {
5641 ret = reserve_metadata_bytes(trans, root, block_rsv, 5663 ret = reserve_metadata_bytes(trans, root, block_rsv,
5642 blocksize, 0); 5664 blocksize, 0);
5643 if (ret) 5665 /*
5666 * If we couldn't reserve metadata bytes try and use some from
5667 * the global reserve.
5668 */
5669 if (ret && block_rsv != global_rsv) {
5670 ret = block_rsv_use_bytes(global_rsv, blocksize);
5671 if (!ret)
5672 return global_rsv;
5673 return ERR_PTR(ret);
5674 } else if (ret) {
5644 return ERR_PTR(ret); 5675 return ERR_PTR(ret);
5676 }
5645 return block_rsv; 5677 return block_rsv;
5646 } 5678 }
5647 5679
5648 ret = block_rsv_use_bytes(block_rsv, blocksize); 5680 ret = block_rsv_use_bytes(block_rsv, blocksize);
5649 if (!ret) 5681 if (!ret)
5650 return block_rsv; 5682 return block_rsv;
5683 if (ret) {
5684 WARN_ON(1);
5685 ret = reserve_metadata_bytes(trans, root, block_rsv, blocksize,
5686 0);
5687 if (!ret) {
5688 spin_lock(&block_rsv->lock);
5689 block_rsv->size += blocksize;
5690 spin_unlock(&block_rsv->lock);
5691 return block_rsv;
5692 } else if (ret && block_rsv != global_rsv) {
5693 ret = block_rsv_use_bytes(global_rsv, blocksize);
5694 if (!ret)
5695 return global_rsv;
5696 }
5697 }
5651 5698
5652 return ERR_PTR(-ENOSPC); 5699 return ERR_PTR(-ENOSPC);
5653} 5700}
@@ -6221,6 +6268,8 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
6221 BUG_ON(!wc); 6268 BUG_ON(!wc);
6222 6269
6223 trans = btrfs_start_transaction(tree_root, 0); 6270 trans = btrfs_start_transaction(tree_root, 0);
6271 BUG_ON(IS_ERR(trans));
6272
6224 if (block_rsv) 6273 if (block_rsv)
6225 trans->block_rsv = block_rsv; 6274 trans->block_rsv = block_rsv;
6226 6275
@@ -6318,6 +6367,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
6318 6367
6319 btrfs_end_transaction_throttle(trans, tree_root); 6368 btrfs_end_transaction_throttle(trans, tree_root);
6320 trans = btrfs_start_transaction(tree_root, 0); 6369 trans = btrfs_start_transaction(tree_root, 0);
6370 BUG_ON(IS_ERR(trans));
6321 if (block_rsv) 6371 if (block_rsv)
6322 trans->block_rsv = block_rsv; 6372 trans->block_rsv = block_rsv;
6323 } 6373 }
@@ -6446,6 +6496,8 @@ static noinline int relocate_inode_pages(struct inode *inode, u64 start,
6446 int ret = 0; 6496 int ret = 0;
6447 6497
6448 ra = kzalloc(sizeof(*ra), GFP_NOFS); 6498 ra = kzalloc(sizeof(*ra), GFP_NOFS);
6499 if (!ra)
6500 return -ENOMEM;
6449 6501
6450 mutex_lock(&inode->i_mutex); 6502 mutex_lock(&inode->i_mutex);
6451 first_index = start >> PAGE_CACHE_SHIFT; 6503 first_index = start >> PAGE_CACHE_SHIFT;
@@ -6531,7 +6583,7 @@ static noinline int relocate_data_extent(struct inode *reloc_inode,
6531 u64 end = start + extent_key->offset - 1; 6583 u64 end = start + extent_key->offset - 1;
6532 6584
6533 em = alloc_extent_map(GFP_NOFS); 6585 em = alloc_extent_map(GFP_NOFS);
6534 BUG_ON(!em || IS_ERR(em)); 6586 BUG_ON(!em);
6535 6587
6536 em->start = start; 6588 em->start = start;
6537 em->len = extent_key->offset; 6589 em->len = extent_key->offset;
@@ -7477,7 +7529,7 @@ int btrfs_drop_dead_reloc_roots(struct btrfs_root *root)
7477 BUG_ON(reloc_root->commit_root != NULL); 7529 BUG_ON(reloc_root->commit_root != NULL);
7478 while (1) { 7530 while (1) {
7479 trans = btrfs_join_transaction(root, 1); 7531 trans = btrfs_join_transaction(root, 1);
7480 BUG_ON(!trans); 7532 BUG_ON(IS_ERR(trans));
7481 7533
7482 mutex_lock(&root->fs_info->drop_mutex); 7534 mutex_lock(&root->fs_info->drop_mutex);
7483 ret = btrfs_drop_snapshot(trans, reloc_root); 7535 ret = btrfs_drop_snapshot(trans, reloc_root);
@@ -7535,7 +7587,7 @@ int btrfs_cleanup_reloc_trees(struct btrfs_root *root)
7535 7587
7536 if (found) { 7588 if (found) {
7537 trans = btrfs_start_transaction(root, 1); 7589 trans = btrfs_start_transaction(root, 1);
7538 BUG_ON(!trans); 7590 BUG_ON(IS_ERR(trans));
7539 ret = btrfs_commit_transaction(trans, root); 7591 ret = btrfs_commit_transaction(trans, root);
7540 BUG_ON(ret); 7592 BUG_ON(ret);
7541 } 7593 }
@@ -7779,7 +7831,7 @@ static noinline int relocate_one_extent(struct btrfs_root *extent_root,
7779 7831
7780 7832
7781 trans = btrfs_start_transaction(extent_root, 1); 7833 trans = btrfs_start_transaction(extent_root, 1);
7782 BUG_ON(!trans); 7834 BUG_ON(IS_ERR(trans));
7783 7835
7784 if (extent_key->objectid == 0) { 7836 if (extent_key->objectid == 0) {
7785 ret = del_extent_zero(trans, extent_root, path, extent_key); 7837 ret = del_extent_zero(trans, extent_root, path, extent_key);
@@ -7970,13 +8022,14 @@ static int set_block_group_ro(struct btrfs_block_group_cache *cache)
7970 8022
7971 if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned + 8023 if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
7972 sinfo->bytes_may_use + sinfo->bytes_readonly + 8024 sinfo->bytes_may_use + sinfo->bytes_readonly +
7973 cache->reserved_pinned + num_bytes < sinfo->total_bytes) { 8025 cache->reserved_pinned + num_bytes <= sinfo->total_bytes) {
7974 sinfo->bytes_readonly += num_bytes; 8026 sinfo->bytes_readonly += num_bytes;
7975 sinfo->bytes_reserved += cache->reserved_pinned; 8027 sinfo->bytes_reserved += cache->reserved_pinned;
7976 cache->reserved_pinned = 0; 8028 cache->reserved_pinned = 0;
7977 cache->ro = 1; 8029 cache->ro = 1;
7978 ret = 0; 8030 ret = 0;
7979 } 8031 }
8032
7980 spin_unlock(&cache->lock); 8033 spin_unlock(&cache->lock);
7981 spin_unlock(&sinfo->lock); 8034 spin_unlock(&sinfo->lock);
7982 return ret; 8035 return ret;
@@ -8012,6 +8065,69 @@ out:
8012 return ret; 8065 return ret;
8013} 8066}
8014 8067
8068int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
8069 struct btrfs_root *root, u64 type)
8070{
8071 u64 alloc_flags = get_alloc_profile(root, type);
8072 return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1);
8073}
8074
8075/*
8076 * helper to account the unused space of all the readonly block group in the
8077 * list. takes mirrors into account.
8078 */
8079static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
8080{
8081 struct btrfs_block_group_cache *block_group;
8082 u64 free_bytes = 0;
8083 int factor;
8084
8085 list_for_each_entry(block_group, groups_list, list) {
8086 spin_lock(&block_group->lock);
8087
8088 if (!block_group->ro) {
8089 spin_unlock(&block_group->lock);
8090 continue;
8091 }
8092
8093 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
8094 BTRFS_BLOCK_GROUP_RAID10 |
8095 BTRFS_BLOCK_GROUP_DUP))
8096 factor = 2;
8097 else
8098 factor = 1;
8099
8100 free_bytes += (block_group->key.offset -
8101 btrfs_block_group_used(&block_group->item)) *
8102 factor;
8103
8104 spin_unlock(&block_group->lock);
8105 }
8106
8107 return free_bytes;
8108}
8109
8110/*
8111 * helper to account the unused space of all the readonly block group in the
8112 * space_info. takes mirrors into account.
8113 */
8114u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
8115{
8116 int i;
8117 u64 free_bytes = 0;
8118
8119 spin_lock(&sinfo->lock);
8120
8121 for(i = 0; i < BTRFS_NR_RAID_TYPES; i++)
8122 if (!list_empty(&sinfo->block_groups[i]))
8123 free_bytes += __btrfs_get_ro_block_group_free_space(
8124 &sinfo->block_groups[i]);
8125
8126 spin_unlock(&sinfo->lock);
8127
8128 return free_bytes;
8129}
8130
8015int btrfs_set_block_group_rw(struct btrfs_root *root, 8131int btrfs_set_block_group_rw(struct btrfs_root *root,
8016 struct btrfs_block_group_cache *cache) 8132 struct btrfs_block_group_cache *cache)
8017{ 8133{
@@ -8092,7 +8208,7 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
8092 mutex_lock(&root->fs_info->chunk_mutex); 8208 mutex_lock(&root->fs_info->chunk_mutex);
8093 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { 8209 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
8094 u64 min_free = btrfs_block_group_used(&block_group->item); 8210 u64 min_free = btrfs_block_group_used(&block_group->item);
8095 u64 dev_offset, max_avail; 8211 u64 dev_offset;
8096 8212
8097 /* 8213 /*
8098 * check to make sure we can actually find a chunk with enough 8214 * check to make sure we can actually find a chunk with enough
@@ -8100,7 +8216,7 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
8100 */ 8216 */
8101 if (device->total_bytes > device->bytes_used + min_free) { 8217 if (device->total_bytes > device->bytes_used + min_free) {
8102 ret = find_free_dev_extent(NULL, device, min_free, 8218 ret = find_free_dev_extent(NULL, device, min_free,
8103 &dev_offset, &max_avail); 8219 &dev_offset, NULL);
8104 if (!ret) 8220 if (!ret)
8105 break; 8221 break;
8106 ret = -1; 8222 ret = -1;
@@ -8213,6 +8329,13 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
8213 if (block_group->cached == BTRFS_CACHE_STARTED) 8329 if (block_group->cached == BTRFS_CACHE_STARTED)
8214 wait_block_group_cache_done(block_group); 8330 wait_block_group_cache_done(block_group);
8215 8331
8332 /*
8333 * We haven't cached this block group, which means we could
8334 * possibly have excluded extents on this block group.
8335 */
8336 if (block_group->cached == BTRFS_CACHE_NO)
8337 free_excluded_extents(info->extent_root, block_group);
8338
8216 btrfs_remove_free_space_cache(block_group); 8339 btrfs_remove_free_space_cache(block_group);
8217 btrfs_put_block_group(block_group); 8340 btrfs_put_block_group(block_group);
8218 8341
@@ -8328,6 +8451,13 @@ int btrfs_read_block_groups(struct btrfs_root *root)
8328 cache->sectorsize = root->sectorsize; 8451 cache->sectorsize = root->sectorsize;
8329 8452
8330 /* 8453 /*
8454 * We need to exclude the super stripes now so that the space
8455 * info has super bytes accounted for, otherwise we'll think
8456 * we have more space than we actually do.
8457 */
8458 exclude_super_stripes(root, cache);
8459
8460 /*
8331 * check for two cases, either we are full, and therefore 8461 * check for two cases, either we are full, and therefore
8332 * don't need to bother with the caching work since we won't 8462 * don't need to bother with the caching work since we won't
8333 * find any space, or we are empty, and we can just add all 8463 * find any space, or we are empty, and we can just add all
@@ -8335,12 +8465,10 @@ int btrfs_read_block_groups(struct btrfs_root *root)
8335 * time, particularly in the full case. 8465 * time, particularly in the full case.
8336 */ 8466 */
8337 if (found_key.offset == btrfs_block_group_used(&cache->item)) { 8467 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
8338 exclude_super_stripes(root, cache);
8339 cache->last_byte_to_unpin = (u64)-1; 8468 cache->last_byte_to_unpin = (u64)-1;
8340 cache->cached = BTRFS_CACHE_FINISHED; 8469 cache->cached = BTRFS_CACHE_FINISHED;
8341 free_excluded_extents(root, cache); 8470 free_excluded_extents(root, cache);
8342 } else if (btrfs_block_group_used(&cache->item) == 0) { 8471 } else if (btrfs_block_group_used(&cache->item) == 0) {
8343 exclude_super_stripes(root, cache);
8344 cache->last_byte_to_unpin = (u64)-1; 8472 cache->last_byte_to_unpin = (u64)-1;
8345 cache->cached = BTRFS_CACHE_FINISHED; 8473 cache->cached = BTRFS_CACHE_FINISHED;
8346 add_new_free_space(cache, root->fs_info, 8474 add_new_free_space(cache, root->fs_info,
@@ -8584,3 +8712,14 @@ out:
8584 btrfs_free_path(path); 8712 btrfs_free_path(path);
8585 return ret; 8713 return ret;
8586} 8714}
8715
8716int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
8717{
8718 return unpin_extent_range(root, start, end);
8719}
8720
8721int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
8722 u64 num_bytes)
8723{
8724 return btrfs_discard_extent(root, bytenr, num_bytes);
8725}