aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent-tree.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/extent-tree.c')
-rw-r--r--fs/btrfs/extent-tree.c211
1 files changed, 159 insertions, 52 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 47c1ba141082..222d6aea4a8a 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -315,12 +315,6 @@ get_caching_control(struct btrfs_block_group_cache *cache)
315 struct btrfs_caching_control *ctl; 315 struct btrfs_caching_control *ctl;
316 316
317 spin_lock(&cache->lock); 317 spin_lock(&cache->lock);
318 if (cache->cached != BTRFS_CACHE_STARTED) {
319 spin_unlock(&cache->lock);
320 return NULL;
321 }
322
323 /* We're loading it the fast way, so we don't have a caching_ctl. */
324 if (!cache->caching_ctl) { 318 if (!cache->caching_ctl) {
325 spin_unlock(&cache->lock); 319 spin_unlock(&cache->lock);
326 return NULL; 320 return NULL;
@@ -594,6 +588,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
594 spin_unlock(&cache->lock); 588 spin_unlock(&cache->lock);
595 589
596 if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) { 590 if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
591 mutex_lock(&caching_ctl->mutex);
597 ret = load_free_space_cache(fs_info, cache); 592 ret = load_free_space_cache(fs_info, cache);
598 593
599 spin_lock(&cache->lock); 594 spin_lock(&cache->lock);
@@ -601,15 +596,19 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
601 cache->caching_ctl = NULL; 596 cache->caching_ctl = NULL;
602 cache->cached = BTRFS_CACHE_FINISHED; 597 cache->cached = BTRFS_CACHE_FINISHED;
603 cache->last_byte_to_unpin = (u64)-1; 598 cache->last_byte_to_unpin = (u64)-1;
599 caching_ctl->progress = (u64)-1;
604 } else { 600 } else {
605 if (load_cache_only) { 601 if (load_cache_only) {
606 cache->caching_ctl = NULL; 602 cache->caching_ctl = NULL;
607 cache->cached = BTRFS_CACHE_NO; 603 cache->cached = BTRFS_CACHE_NO;
608 } else { 604 } else {
609 cache->cached = BTRFS_CACHE_STARTED; 605 cache->cached = BTRFS_CACHE_STARTED;
606 cache->has_caching_ctl = 1;
610 } 607 }
611 } 608 }
612 spin_unlock(&cache->lock); 609 spin_unlock(&cache->lock);
610 mutex_unlock(&caching_ctl->mutex);
611
613 wake_up(&caching_ctl->wait); 612 wake_up(&caching_ctl->wait);
614 if (ret == 1) { 613 if (ret == 1) {
615 put_caching_control(caching_ctl); 614 put_caching_control(caching_ctl);
@@ -627,6 +626,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
627 cache->cached = BTRFS_CACHE_NO; 626 cache->cached = BTRFS_CACHE_NO;
628 } else { 627 } else {
629 cache->cached = BTRFS_CACHE_STARTED; 628 cache->cached = BTRFS_CACHE_STARTED;
629 cache->has_caching_ctl = 1;
630 } 630 }
631 spin_unlock(&cache->lock); 631 spin_unlock(&cache->lock);
632 wake_up(&caching_ctl->wait); 632 wake_up(&caching_ctl->wait);
@@ -3162,7 +3162,19 @@ next_block_group(struct btrfs_root *root,
3162 struct btrfs_block_group_cache *cache) 3162 struct btrfs_block_group_cache *cache)
3163{ 3163{
3164 struct rb_node *node; 3164 struct rb_node *node;
3165
3165 spin_lock(&root->fs_info->block_group_cache_lock); 3166 spin_lock(&root->fs_info->block_group_cache_lock);
3167
3168 /* If our block group was removed, we need a full search. */
3169 if (RB_EMPTY_NODE(&cache->cache_node)) {
3170 const u64 next_bytenr = cache->key.objectid + cache->key.offset;
3171
3172 spin_unlock(&root->fs_info->block_group_cache_lock);
3173 btrfs_put_block_group(cache);
3174 cache = btrfs_lookup_first_block_group(root->fs_info,
3175 next_bytenr);
3176 return cache;
3177 }
3166 node = rb_next(&cache->cache_node); 3178 node = rb_next(&cache->cache_node);
3167 btrfs_put_block_group(cache); 3179 btrfs_put_block_group(cache);
3168 if (node) { 3180 if (node) {
@@ -3504,6 +3516,7 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3504 found->chunk_alloc = 0; 3516 found->chunk_alloc = 0;
3505 found->flush = 0; 3517 found->flush = 0;
3506 init_waitqueue_head(&found->wait); 3518 init_waitqueue_head(&found->wait);
3519 INIT_LIST_HEAD(&found->ro_bgs);
3507 3520
3508 ret = kobject_init_and_add(&found->kobj, &space_info_ktype, 3521 ret = kobject_init_and_add(&found->kobj, &space_info_ktype,
3509 info->space_info_kobj, "%s", 3522 info->space_info_kobj, "%s",
@@ -5425,7 +5438,17 @@ static int update_block_group(struct btrfs_root *root,
5425 spin_unlock(&cache->space_info->lock); 5438 spin_unlock(&cache->space_info->lock);
5426 } else { 5439 } else {
5427 old_val -= num_bytes; 5440 old_val -= num_bytes;
5441 btrfs_set_block_group_used(&cache->item, old_val);
5442 cache->pinned += num_bytes;
5443 cache->space_info->bytes_pinned += num_bytes;
5444 cache->space_info->bytes_used -= num_bytes;
5445 cache->space_info->disk_used -= num_bytes * factor;
5446 spin_unlock(&cache->lock);
5447 spin_unlock(&cache->space_info->lock);
5428 5448
5449 set_extent_dirty(info->pinned_extents,
5450 bytenr, bytenr + num_bytes - 1,
5451 GFP_NOFS | __GFP_NOFAIL);
5429 /* 5452 /*
5430 * No longer have used bytes in this block group, queue 5453 * No longer have used bytes in this block group, queue
5431 * it for deletion. 5454 * it for deletion.
@@ -5439,17 +5462,6 @@ static int update_block_group(struct btrfs_root *root,
5439 } 5462 }
5440 spin_unlock(&info->unused_bgs_lock); 5463 spin_unlock(&info->unused_bgs_lock);
5441 } 5464 }
5442 btrfs_set_block_group_used(&cache->item, old_val);
5443 cache->pinned += num_bytes;
5444 cache->space_info->bytes_pinned += num_bytes;
5445 cache->space_info->bytes_used -= num_bytes;
5446 cache->space_info->disk_used -= num_bytes * factor;
5447 spin_unlock(&cache->lock);
5448 spin_unlock(&cache->space_info->lock);
5449
5450 set_extent_dirty(info->pinned_extents,
5451 bytenr, bytenr + num_bytes - 1,
5452 GFP_NOFS | __GFP_NOFAIL);
5453 } 5465 }
5454 btrfs_put_block_group(cache); 5466 btrfs_put_block_group(cache);
5455 total -= num_bytes; 5467 total -= num_bytes;
@@ -8511,6 +8523,7 @@ static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
8511 min_allocable_bytes <= sinfo->total_bytes) { 8523 min_allocable_bytes <= sinfo->total_bytes) {
8512 sinfo->bytes_readonly += num_bytes; 8524 sinfo->bytes_readonly += num_bytes;
8513 cache->ro = 1; 8525 cache->ro = 1;
8526 list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
8514 ret = 0; 8527 ret = 0;
8515 } 8528 }
8516out: 8529out:
@@ -8565,15 +8578,20 @@ int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
8565 8578
8566/* 8579/*
8567 * helper to account the unused space of all the readonly block group in the 8580 * helper to account the unused space of all the readonly block group in the
8568 * list. takes mirrors into account. 8581 * space_info. takes mirrors into account.
8569 */ 8582 */
8570static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list) 8583u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
8571{ 8584{
8572 struct btrfs_block_group_cache *block_group; 8585 struct btrfs_block_group_cache *block_group;
8573 u64 free_bytes = 0; 8586 u64 free_bytes = 0;
8574 int factor; 8587 int factor;
8575 8588
8576 list_for_each_entry(block_group, groups_list, list) { 8589 /* It's df, we don't care if it's racey */
8590 if (list_empty(&sinfo->ro_bgs))
8591 return 0;
8592
8593 spin_lock(&sinfo->lock);
8594 list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) {
8577 spin_lock(&block_group->lock); 8595 spin_lock(&block_group->lock);
8578 8596
8579 if (!block_group->ro) { 8597 if (!block_group->ro) {
@@ -8594,26 +8612,6 @@ static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
8594 8612
8595 spin_unlock(&block_group->lock); 8613 spin_unlock(&block_group->lock);
8596 } 8614 }
8597
8598 return free_bytes;
8599}
8600
8601/*
8602 * helper to account the unused space of all the readonly block group in the
8603 * space_info. takes mirrors into account.
8604 */
8605u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
8606{
8607 int i;
8608 u64 free_bytes = 0;
8609
8610 spin_lock(&sinfo->lock);
8611
8612 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
8613 if (!list_empty(&sinfo->block_groups[i]))
8614 free_bytes += __btrfs_get_ro_block_group_free_space(
8615 &sinfo->block_groups[i]);
8616
8617 spin_unlock(&sinfo->lock); 8615 spin_unlock(&sinfo->lock);
8618 8616
8619 return free_bytes; 8617 return free_bytes;
@@ -8633,6 +8631,7 @@ void btrfs_set_block_group_rw(struct btrfs_root *root,
8633 cache->bytes_super - btrfs_block_group_used(&cache->item); 8631 cache->bytes_super - btrfs_block_group_used(&cache->item);
8634 sinfo->bytes_readonly -= num_bytes; 8632 sinfo->bytes_readonly -= num_bytes;
8635 cache->ro = 0; 8633 cache->ro = 0;
8634 list_del_init(&cache->ro_list);
8636 spin_unlock(&cache->lock); 8635 spin_unlock(&cache->lock);
8637 spin_unlock(&sinfo->lock); 8636 spin_unlock(&sinfo->lock);
8638} 8637}
@@ -9002,7 +9001,9 @@ btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
9002 INIT_LIST_HEAD(&cache->list); 9001 INIT_LIST_HEAD(&cache->list);
9003 INIT_LIST_HEAD(&cache->cluster_list); 9002 INIT_LIST_HEAD(&cache->cluster_list);
9004 INIT_LIST_HEAD(&cache->bg_list); 9003 INIT_LIST_HEAD(&cache->bg_list);
9004 INIT_LIST_HEAD(&cache->ro_list);
9005 btrfs_init_free_space_ctl(cache); 9005 btrfs_init_free_space_ctl(cache);
9006 atomic_set(&cache->trimming, 0);
9006 9007
9007 return cache; 9008 return cache;
9008} 9009}
@@ -9195,9 +9196,8 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
9195 int ret = 0; 9196 int ret = 0;
9196 9197
9197 list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) { 9198 list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
9198 list_del_init(&block_group->bg_list);
9199 if (ret) 9199 if (ret)
9200 continue; 9200 goto next;
9201 9201
9202 spin_lock(&block_group->lock); 9202 spin_lock(&block_group->lock);
9203 memcpy(&item, &block_group->item, sizeof(item)); 9203 memcpy(&item, &block_group->item, sizeof(item));
@@ -9212,6 +9212,8 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
9212 key.objectid, key.offset); 9212 key.objectid, key.offset);
9213 if (ret) 9213 if (ret)
9214 btrfs_abort_transaction(trans, extent_root, ret); 9214 btrfs_abort_transaction(trans, extent_root, ret);
9215next:
9216 list_del_init(&block_group->bg_list);
9215 } 9217 }
9216} 9218}
9217 9219
@@ -9304,7 +9306,8 @@ static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
9304} 9306}
9305 9307
9306int btrfs_remove_block_group(struct btrfs_trans_handle *trans, 9308int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
9307 struct btrfs_root *root, u64 group_start) 9309 struct btrfs_root *root, u64 group_start,
9310 struct extent_map *em)
9308{ 9311{
9309 struct btrfs_path *path; 9312 struct btrfs_path *path;
9310 struct btrfs_block_group_cache *block_group; 9313 struct btrfs_block_group_cache *block_group;
@@ -9316,6 +9319,8 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
9316 int ret; 9319 int ret;
9317 int index; 9320 int index;
9318 int factor; 9321 int factor;
9322 struct btrfs_caching_control *caching_ctl = NULL;
9323 bool remove_em;
9319 9324
9320 root = root->fs_info->extent_root; 9325 root = root->fs_info->extent_root;
9321 9326
@@ -9400,6 +9405,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
9400 spin_lock(&root->fs_info->block_group_cache_lock); 9405 spin_lock(&root->fs_info->block_group_cache_lock);
9401 rb_erase(&block_group->cache_node, 9406 rb_erase(&block_group->cache_node,
9402 &root->fs_info->block_group_cache_tree); 9407 &root->fs_info->block_group_cache_tree);
9408 RB_CLEAR_NODE(&block_group->cache_node);
9403 9409
9404 if (root->fs_info->first_logical_byte == block_group->key.objectid) 9410 if (root->fs_info->first_logical_byte == block_group->key.objectid)
9405 root->fs_info->first_logical_byte = (u64)-1; 9411 root->fs_info->first_logical_byte = (u64)-1;
@@ -9411,6 +9417,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
9411 * are still on the list after taking the semaphore 9417 * are still on the list after taking the semaphore
9412 */ 9418 */
9413 list_del_init(&block_group->list); 9419 list_del_init(&block_group->list);
9420 list_del_init(&block_group->ro_list);
9414 if (list_empty(&block_group->space_info->block_groups[index])) { 9421 if (list_empty(&block_group->space_info->block_groups[index])) {
9415 kobj = block_group->space_info->block_group_kobjs[index]; 9422 kobj = block_group->space_info->block_group_kobjs[index];
9416 block_group->space_info->block_group_kobjs[index] = NULL; 9423 block_group->space_info->block_group_kobjs[index] = NULL;
@@ -9422,8 +9429,32 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
9422 kobject_put(kobj); 9429 kobject_put(kobj);
9423 } 9430 }
9424 9431
9432 if (block_group->has_caching_ctl)
9433 caching_ctl = get_caching_control(block_group);
9425 if (block_group->cached == BTRFS_CACHE_STARTED) 9434 if (block_group->cached == BTRFS_CACHE_STARTED)
9426 wait_block_group_cache_done(block_group); 9435 wait_block_group_cache_done(block_group);
9436 if (block_group->has_caching_ctl) {
9437 down_write(&root->fs_info->commit_root_sem);
9438 if (!caching_ctl) {
9439 struct btrfs_caching_control *ctl;
9440
9441 list_for_each_entry(ctl,
9442 &root->fs_info->caching_block_groups, list)
9443 if (ctl->block_group == block_group) {
9444 caching_ctl = ctl;
9445 atomic_inc(&caching_ctl->count);
9446 break;
9447 }
9448 }
9449 if (caching_ctl)
9450 list_del_init(&caching_ctl->list);
9451 up_write(&root->fs_info->commit_root_sem);
9452 if (caching_ctl) {
9453 /* Once for the caching bgs list and once for us. */
9454 put_caching_control(caching_ctl);
9455 put_caching_control(caching_ctl);
9456 }
9457 }
9427 9458
9428 btrfs_remove_free_space_cache(block_group); 9459 btrfs_remove_free_space_cache(block_group);
9429 9460
@@ -9435,6 +9466,71 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
9435 9466
9436 memcpy(&key, &block_group->key, sizeof(key)); 9467 memcpy(&key, &block_group->key, sizeof(key));
9437 9468
9469 lock_chunks(root);
9470 if (!list_empty(&em->list)) {
9471 /* We're in the transaction->pending_chunks list. */
9472 free_extent_map(em);
9473 }
9474 spin_lock(&block_group->lock);
9475 block_group->removed = 1;
9476 /*
9477 * At this point trimming can't start on this block group, because we
9478 * removed the block group from the tree fs_info->block_group_cache_tree
9479 * so no one can't find it anymore and even if someone already got this
9480 * block group before we removed it from the rbtree, they have already
9481 * incremented block_group->trimming - if they didn't, they won't find
9482 * any free space entries because we already removed them all when we
9483 * called btrfs_remove_free_space_cache().
9484 *
9485 * And we must not remove the extent map from the fs_info->mapping_tree
9486 * to prevent the same logical address range and physical device space
9487 * ranges from being reused for a new block group. This is because our
9488 * fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
9489 * completely transactionless, so while it is trimming a range the
9490 * currently running transaction might finish and a new one start,
9491 * allowing for new block groups to be created that can reuse the same
9492 * physical device locations unless we take this special care.
9493 */
9494 remove_em = (atomic_read(&block_group->trimming) == 0);
9495 /*
9496 * Make sure a trimmer task always sees the em in the pinned_chunks list
9497 * if it sees block_group->removed == 1 (needs to lock block_group->lock
9498 * before checking block_group->removed).
9499 */
9500 if (!remove_em) {
9501 /*
9502 * Our em might be in trans->transaction->pending_chunks which
9503 * is protected by fs_info->chunk_mutex ([lock|unlock]_chunks),
9504 * and so is the fs_info->pinned_chunks list.
9505 *
9506 * So at this point we must be holding the chunk_mutex to avoid
9507 * any races with chunk allocation (more specifically at
9508 * volumes.c:contains_pending_extent()), to ensure it always
9509 * sees the em, either in the pending_chunks list or in the
9510 * pinned_chunks list.
9511 */
9512 list_move_tail(&em->list, &root->fs_info->pinned_chunks);
9513 }
9514 spin_unlock(&block_group->lock);
9515
9516 if (remove_em) {
9517 struct extent_map_tree *em_tree;
9518
9519 em_tree = &root->fs_info->mapping_tree.map_tree;
9520 write_lock(&em_tree->lock);
9521 /*
9522 * The em might be in the pending_chunks list, so make sure the
9523 * chunk mutex is locked, since remove_extent_mapping() will
9524 * delete us from that list.
9525 */
9526 remove_extent_mapping(em_tree, em);
9527 write_unlock(&em_tree->lock);
9528 /* once for the tree */
9529 free_extent_map(em);
9530 }
9531
9532 unlock_chunks(root);
9533
9438 btrfs_put_block_group(block_group); 9534 btrfs_put_block_group(block_group);
9439 btrfs_put_block_group(block_group); 9535 btrfs_put_block_group(block_group);
9440 9536
@@ -9523,10 +9619,18 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
9523 */ 9619 */
9524 start = block_group->key.objectid; 9620 start = block_group->key.objectid;
9525 end = start + block_group->key.offset - 1; 9621 end = start + block_group->key.offset - 1;
9526 clear_extent_bits(&fs_info->freed_extents[0], start, end, 9622 ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
9527 EXTENT_DIRTY, GFP_NOFS); 9623 EXTENT_DIRTY, GFP_NOFS);
9528 clear_extent_bits(&fs_info->freed_extents[1], start, end, 9624 if (ret) {
9625 btrfs_set_block_group_rw(root, block_group);
9626 goto end_trans;
9627 }
9628 ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
9529 EXTENT_DIRTY, GFP_NOFS); 9629 EXTENT_DIRTY, GFP_NOFS);
9630 if (ret) {
9631 btrfs_set_block_group_rw(root, block_group);
9632 goto end_trans;
9633 }
9530 9634
9531 /* Reset pinned so btrfs_put_block_group doesn't complain */ 9635 /* Reset pinned so btrfs_put_block_group doesn't complain */
9532 block_group->pinned = 0; 9636 block_group->pinned = 0;
@@ -9537,6 +9641,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
9537 */ 9641 */
9538 ret = btrfs_remove_chunk(trans, root, 9642 ret = btrfs_remove_chunk(trans, root,
9539 block_group->key.objectid); 9643 block_group->key.objectid);
9644end_trans:
9540 btrfs_end_transaction(trans, root); 9645 btrfs_end_transaction(trans, root);
9541next: 9646next:
9542 btrfs_put_block_group(block_group); 9647 btrfs_put_block_group(block_group);
@@ -9657,12 +9762,14 @@ int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
9657} 9762}
9658 9763
9659/* 9764/*
9660 * btrfs_{start,end}_write() is similar to mnt_{want, drop}_write(), 9765 * btrfs_{start,end}_write_no_snapshoting() are similar to
9661 * they are used to prevent the some tasks writing data into the page cache 9766 * mnt_{want,drop}_write(), they are used to prevent some tasks from writing
9662 * by nocow before the subvolume is snapshoted, but flush the data into 9767 * data into the page cache through nocow before the subvolume is snapshoted,
9663 * the disk after the snapshot creation. 9768 * but flush the data into disk after the snapshot creation, or to prevent
9769 * operations while snapshoting is ongoing and that cause the snapshot to be
9770 * inconsistent (writes followed by expanding truncates for example).
9664 */ 9771 */
9665void btrfs_end_nocow_write(struct btrfs_root *root) 9772void btrfs_end_write_no_snapshoting(struct btrfs_root *root)
9666{ 9773{
9667 percpu_counter_dec(&root->subv_writers->counter); 9774 percpu_counter_dec(&root->subv_writers->counter);
9668 /* 9775 /*
@@ -9674,7 +9781,7 @@ void btrfs_end_nocow_write(struct btrfs_root *root)
9674 wake_up(&root->subv_writers->wait); 9781 wake_up(&root->subv_writers->wait);
9675} 9782}
9676 9783
9677int btrfs_start_nocow_write(struct btrfs_root *root) 9784int btrfs_start_write_no_snapshoting(struct btrfs_root *root)
9678{ 9785{
9679 if (atomic_read(&root->will_be_snapshoted)) 9786 if (atomic_read(&root->will_be_snapshoted))
9680 return 0; 9787 return 0;
@@ -9685,7 +9792,7 @@ int btrfs_start_nocow_write(struct btrfs_root *root)
9685 */ 9792 */
9686 smp_mb(); 9793 smp_mb();
9687 if (atomic_read(&root->will_be_snapshoted)) { 9794 if (atomic_read(&root->will_be_snapshoted)) {
9688 btrfs_end_nocow_write(root); 9795 btrfs_end_write_no_snapshoting(root);
9689 return 0; 9796 return 0;
9690 } 9797 }
9691 return 1; 9798 return 1;