aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/btrfs/ctree.c385
1 files changed, 296 insertions, 89 deletions
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 59664f6cbc4e..7d88d8543aa1 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -39,7 +39,7 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
39 struct extent_buffer *src_buf); 39 struct extent_buffer *src_buf);
40static void del_ptr(struct btrfs_root *root, struct btrfs_path *path, 40static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
41 int level, int slot); 41 int level, int slot);
42static void tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, 42static int tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
43 struct extent_buffer *eb); 43 struct extent_buffer *eb);
44 44
45struct btrfs_path *btrfs_alloc_path(void) 45struct btrfs_path *btrfs_alloc_path(void)
@@ -474,6 +474,8 @@ void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
474 * the index is the shifted logical of the *new* root node for root replace 474 * the index is the shifted logical of the *new* root node for root replace
475 * operations, or the shifted logical of the affected block for all other 475 * operations, or the shifted logical of the affected block for all other
476 * operations. 476 * operations.
477 *
478 * Note: must be called with write lock (tree_mod_log_write_lock).
477 */ 479 */
478static noinline int 480static noinline int
479__tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm) 481__tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
@@ -482,24 +484,9 @@ __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
482 struct rb_node **new; 484 struct rb_node **new;
483 struct rb_node *parent = NULL; 485 struct rb_node *parent = NULL;
484 struct tree_mod_elem *cur; 486 struct tree_mod_elem *cur;
485 int ret = 0;
486 487
487 BUG_ON(!tm); 488 BUG_ON(!tm);
488 489
489 tree_mod_log_write_lock(fs_info);
490 if (list_empty(&fs_info->tree_mod_seq_list)) {
491 tree_mod_log_write_unlock(fs_info);
492 /*
493 * Ok we no longer care about logging modifications, free up tm
494 * and return 0. Any callers shouldn't be using tm after
495 * calling tree_mod_log_insert, but if they do we can just
496 * change this to return a special error code to let the callers
497 * do their own thing.
498 */
499 kfree(tm);
500 return 0;
501 }
502
503 spin_lock(&fs_info->tree_mod_seq_lock); 490 spin_lock(&fs_info->tree_mod_seq_lock);
504 tm->seq = btrfs_inc_tree_mod_seq_minor(fs_info); 491 tm->seq = btrfs_inc_tree_mod_seq_minor(fs_info);
505 spin_unlock(&fs_info->tree_mod_seq_lock); 492 spin_unlock(&fs_info->tree_mod_seq_lock);
@@ -517,18 +504,13 @@ __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
517 new = &((*new)->rb_left); 504 new = &((*new)->rb_left);
518 else if (cur->seq > tm->seq) 505 else if (cur->seq > tm->seq)
519 new = &((*new)->rb_right); 506 new = &((*new)->rb_right);
520 else { 507 else
521 ret = -EEXIST; 508 return -EEXIST;
522 kfree(tm);
523 goto out;
524 }
525 } 509 }
526 510
527 rb_link_node(&tm->node, parent, new); 511 rb_link_node(&tm->node, parent, new);
528 rb_insert_color(&tm->node, tm_root); 512 rb_insert_color(&tm->node, tm_root);
529out: 513 return 0;
530 tree_mod_log_write_unlock(fs_info);
531 return ret;
532} 514}
533 515
534/* 516/*
@@ -544,19 +526,38 @@ static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
544 return 1; 526 return 1;
545 if (eb && btrfs_header_level(eb) == 0) 527 if (eb && btrfs_header_level(eb) == 0)
546 return 1; 528 return 1;
529
530 tree_mod_log_write_lock(fs_info);
531 if (list_empty(&(fs_info)->tree_mod_seq_list)) {
532 tree_mod_log_write_unlock(fs_info);
533 return 1;
534 }
535
547 return 0; 536 return 0;
548} 537}
549 538
550static inline int 539/* Similar to tree_mod_dont_log, but doesn't acquire any locks. */
551__tree_mod_log_insert_key(struct btrfs_fs_info *fs_info, 540static inline int tree_mod_need_log(const struct btrfs_fs_info *fs_info,
552 struct extent_buffer *eb, int slot, 541 struct extent_buffer *eb)
553 enum mod_log_op op, gfp_t flags) 542{
543 smp_mb();
544 if (list_empty(&(fs_info)->tree_mod_seq_list))
545 return 0;
546 if (eb && btrfs_header_level(eb) == 0)
547 return 0;
548
549 return 1;
550}
551
552static struct tree_mod_elem *
553alloc_tree_mod_elem(struct extent_buffer *eb, int slot,
554 enum mod_log_op op, gfp_t flags)
554{ 555{
555 struct tree_mod_elem *tm; 556 struct tree_mod_elem *tm;
556 557
557 tm = kzalloc(sizeof(*tm), flags); 558 tm = kzalloc(sizeof(*tm), flags);
558 if (!tm) 559 if (!tm)
559 return -ENOMEM; 560 return NULL;
560 561
561 tm->index = eb->start >> PAGE_CACHE_SHIFT; 562 tm->index = eb->start >> PAGE_CACHE_SHIFT;
562 if (op != MOD_LOG_KEY_ADD) { 563 if (op != MOD_LOG_KEY_ADD) {
@@ -566,8 +567,9 @@ __tree_mod_log_insert_key(struct btrfs_fs_info *fs_info,
566 tm->op = op; 567 tm->op = op;
567 tm->slot = slot; 568 tm->slot = slot;
568 tm->generation = btrfs_node_ptr_generation(eb, slot); 569 tm->generation = btrfs_node_ptr_generation(eb, slot);
570 RB_CLEAR_NODE(&tm->node);
569 571
570 return __tree_mod_log_insert(fs_info, tm); 572 return tm;
571} 573}
572 574
573static noinline int 575static noinline int
@@ -575,10 +577,27 @@ tree_mod_log_insert_key(struct btrfs_fs_info *fs_info,
575 struct extent_buffer *eb, int slot, 577 struct extent_buffer *eb, int slot,
576 enum mod_log_op op, gfp_t flags) 578 enum mod_log_op op, gfp_t flags)
577{ 579{
578 if (tree_mod_dont_log(fs_info, eb)) 580 struct tree_mod_elem *tm;
581 int ret;
582
583 if (!tree_mod_need_log(fs_info, eb))
584 return 0;
585
586 tm = alloc_tree_mod_elem(eb, slot, op, flags);
587 if (!tm)
588 return -ENOMEM;
589
590 if (tree_mod_dont_log(fs_info, eb)) {
591 kfree(tm);
579 return 0; 592 return 0;
593 }
594
595 ret = __tree_mod_log_insert(fs_info, tm);
596 tree_mod_log_write_unlock(fs_info);
597 if (ret)
598 kfree(tm);
580 599
581 return __tree_mod_log_insert_key(fs_info, eb, slot, op, flags); 600 return ret;
582} 601}
583 602
584static noinline int 603static noinline int
@@ -586,53 +605,95 @@ tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
586 struct extent_buffer *eb, int dst_slot, int src_slot, 605 struct extent_buffer *eb, int dst_slot, int src_slot,
587 int nr_items, gfp_t flags) 606 int nr_items, gfp_t flags)
588{ 607{
589 struct tree_mod_elem *tm; 608 struct tree_mod_elem *tm = NULL;
590 int ret; 609 struct tree_mod_elem **tm_list = NULL;
610 int ret = 0;
591 int i; 611 int i;
612 int locked = 0;
592 613
593 if (tree_mod_dont_log(fs_info, eb)) 614 if (!tree_mod_need_log(fs_info, eb))
594 return 0; 615 return 0;
595 616
617 tm_list = kzalloc(nr_items * sizeof(struct tree_mod_elem *), flags);
618 if (!tm_list)
619 return -ENOMEM;
620
621 tm = kzalloc(sizeof(*tm), flags);
622 if (!tm) {
623 ret = -ENOMEM;
624 goto free_tms;
625 }
626
627 tm->index = eb->start >> PAGE_CACHE_SHIFT;
628 tm->slot = src_slot;
629 tm->move.dst_slot = dst_slot;
630 tm->move.nr_items = nr_items;
631 tm->op = MOD_LOG_MOVE_KEYS;
632
633 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
634 tm_list[i] = alloc_tree_mod_elem(eb, i + dst_slot,
635 MOD_LOG_KEY_REMOVE_WHILE_MOVING, flags);
636 if (!tm_list[i]) {
637 ret = -ENOMEM;
638 goto free_tms;
639 }
640 }
641
642 if (tree_mod_dont_log(fs_info, eb))
643 goto free_tms;
644 locked = 1;
645
596 /* 646 /*
597 * When we override something during the move, we log these removals. 647 * When we override something during the move, we log these removals.
598 * This can only happen when we move towards the beginning of the 648 * This can only happen when we move towards the beginning of the
599 * buffer, i.e. dst_slot < src_slot. 649 * buffer, i.e. dst_slot < src_slot.
600 */ 650 */
601 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) { 651 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
602 ret = __tree_mod_log_insert_key(fs_info, eb, i + dst_slot, 652 ret = __tree_mod_log_insert(fs_info, tm_list[i]);
603 MOD_LOG_KEY_REMOVE_WHILE_MOVING, GFP_NOFS); 653 if (ret)
604 BUG_ON(ret < 0); 654 goto free_tms;
605 } 655 }
606 656
607 tm = kzalloc(sizeof(*tm), flags); 657 ret = __tree_mod_log_insert(fs_info, tm);
608 if (!tm) 658 if (ret)
609 return -ENOMEM; 659 goto free_tms;
660 tree_mod_log_write_unlock(fs_info);
661 kfree(tm_list);
610 662
611 tm->index = eb->start >> PAGE_CACHE_SHIFT; 663 return 0;
612 tm->slot = src_slot; 664free_tms:
613 tm->move.dst_slot = dst_slot; 665 for (i = 0; i < nr_items; i++) {
614 tm->move.nr_items = nr_items; 666 if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
615 tm->op = MOD_LOG_MOVE_KEYS; 667 rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
668 kfree(tm_list[i]);
669 }
670 if (locked)
671 tree_mod_log_write_unlock(fs_info);
672 kfree(tm_list);
673 kfree(tm);
616 674
617 return __tree_mod_log_insert(fs_info, tm); 675 return ret;
618} 676}
619 677
620static inline void 678static inline int
621__tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb) 679__tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
680 struct tree_mod_elem **tm_list,
681 int nritems)
622{ 682{
623 int i; 683 int i, j;
624 u32 nritems;
625 int ret; 684 int ret;
626 685
627 if (btrfs_header_level(eb) == 0)
628 return;
629
630 nritems = btrfs_header_nritems(eb);
631 for (i = nritems - 1; i >= 0; i--) { 686 for (i = nritems - 1; i >= 0; i--) {
632 ret = __tree_mod_log_insert_key(fs_info, eb, i, 687 ret = __tree_mod_log_insert(fs_info, tm_list[i]);
633 MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS); 688 if (ret) {
634 BUG_ON(ret < 0); 689 for (j = nritems - 1; j > i; j--)
690 rb_erase(&tm_list[j]->node,
691 &fs_info->tree_mod_log);
692 return ret;
693 }
635 } 694 }
695
696 return 0;
636} 697}
637 698
638static noinline int 699static noinline int
@@ -641,17 +702,38 @@ tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
641 struct extent_buffer *new_root, gfp_t flags, 702 struct extent_buffer *new_root, gfp_t flags,
642 int log_removal) 703 int log_removal)
643{ 704{
644 struct tree_mod_elem *tm; 705 struct tree_mod_elem *tm = NULL;
706 struct tree_mod_elem **tm_list = NULL;
707 int nritems = 0;
708 int ret = 0;
709 int i;
645 710
646 if (tree_mod_dont_log(fs_info, NULL)) 711 if (!tree_mod_need_log(fs_info, NULL))
647 return 0; 712 return 0;
648 713
649 if (log_removal) 714 if (log_removal && btrfs_header_level(old_root) > 0) {
650 __tree_mod_log_free_eb(fs_info, old_root); 715 nritems = btrfs_header_nritems(old_root);
716 tm_list = kzalloc(nritems * sizeof(struct tree_mod_elem *),
717 flags);
718 if (!tm_list) {
719 ret = -ENOMEM;
720 goto free_tms;
721 }
722 for (i = 0; i < nritems; i++) {
723 tm_list[i] = alloc_tree_mod_elem(old_root, i,
724 MOD_LOG_KEY_REMOVE_WHILE_FREEING, flags);
725 if (!tm_list[i]) {
726 ret = -ENOMEM;
727 goto free_tms;
728 }
729 }
730 }
651 731
652 tm = kzalloc(sizeof(*tm), flags); 732 tm = kzalloc(sizeof(*tm), flags);
653 if (!tm) 733 if (!tm) {
654 return -ENOMEM; 734 ret = -ENOMEM;
735 goto free_tms;
736 }
655 737
656 tm->index = new_root->start >> PAGE_CACHE_SHIFT; 738 tm->index = new_root->start >> PAGE_CACHE_SHIFT;
657 tm->old_root.logical = old_root->start; 739 tm->old_root.logical = old_root->start;
@@ -659,7 +741,30 @@ tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
659 tm->generation = btrfs_header_generation(old_root); 741 tm->generation = btrfs_header_generation(old_root);
660 tm->op = MOD_LOG_ROOT_REPLACE; 742 tm->op = MOD_LOG_ROOT_REPLACE;
661 743
662 return __tree_mod_log_insert(fs_info, tm); 744 if (tree_mod_dont_log(fs_info, NULL))
745 goto free_tms;
746
747 if (tm_list)
748 ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
749 if (!ret)
750 ret = __tree_mod_log_insert(fs_info, tm);
751
752 tree_mod_log_write_unlock(fs_info);
753 if (ret)
754 goto free_tms;
755 kfree(tm_list);
756
757 return ret;
758
759free_tms:
760 if (tm_list) {
761 for (i = 0; i < nritems; i++)
762 kfree(tm_list[i]);
763 kfree(tm_list);
764 }
765 kfree(tm);
766
767 return ret;
663} 768}
664 769
665static struct tree_mod_elem * 770static struct tree_mod_elem *
@@ -728,31 +833,75 @@ tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
728 return __tree_mod_log_search(fs_info, start, min_seq, 0); 833 return __tree_mod_log_search(fs_info, start, min_seq, 0);
729} 834}
730 835
731static noinline void 836static noinline int
732tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst, 837tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
733 struct extent_buffer *src, unsigned long dst_offset, 838 struct extent_buffer *src, unsigned long dst_offset,
734 unsigned long src_offset, int nr_items) 839 unsigned long src_offset, int nr_items)
735{ 840{
736 int ret; 841 int ret = 0;
842 struct tree_mod_elem **tm_list = NULL;
843 struct tree_mod_elem **tm_list_add, **tm_list_rem;
737 int i; 844 int i;
845 int locked = 0;
738 846
739 if (tree_mod_dont_log(fs_info, NULL)) 847 if (!tree_mod_need_log(fs_info, NULL))
740 return; 848 return 0;
741 849
742 if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0) 850 if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
743 return; 851 return 0;
744 852
853 tm_list = kzalloc(nr_items * 2 * sizeof(struct tree_mod_elem *),
854 GFP_NOFS);
855 if (!tm_list)
856 return -ENOMEM;
857
858 tm_list_add = tm_list;
859 tm_list_rem = tm_list + nr_items;
745 for (i = 0; i < nr_items; i++) { 860 for (i = 0; i < nr_items; i++) {
746 ret = __tree_mod_log_insert_key(fs_info, src, 861 tm_list_rem[i] = alloc_tree_mod_elem(src, i + src_offset,
747 i + src_offset, 862 MOD_LOG_KEY_REMOVE, GFP_NOFS);
748 MOD_LOG_KEY_REMOVE, GFP_NOFS); 863 if (!tm_list_rem[i]) {
749 BUG_ON(ret < 0); 864 ret = -ENOMEM;
750 ret = __tree_mod_log_insert_key(fs_info, dst, 865 goto free_tms;
751 i + dst_offset, 866 }
752 MOD_LOG_KEY_ADD, 867
753 GFP_NOFS); 868 tm_list_add[i] = alloc_tree_mod_elem(dst, i + dst_offset,
754 BUG_ON(ret < 0); 869 MOD_LOG_KEY_ADD, GFP_NOFS);
870 if (!tm_list_add[i]) {
871 ret = -ENOMEM;
872 goto free_tms;
873 }
874 }
875
876 if (tree_mod_dont_log(fs_info, NULL))
877 goto free_tms;
878 locked = 1;
879
880 for (i = 0; i < nr_items; i++) {
881 ret = __tree_mod_log_insert(fs_info, tm_list_rem[i]);
882 if (ret)
883 goto free_tms;
884 ret = __tree_mod_log_insert(fs_info, tm_list_add[i]);
885 if (ret)
886 goto free_tms;
887 }
888
889 tree_mod_log_write_unlock(fs_info);
890 kfree(tm_list);
891
892 return 0;
893
894free_tms:
895 for (i = 0; i < nr_items * 2; i++) {
896 if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
897 rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
898 kfree(tm_list[i]);
755 } 899 }
900 if (locked)
901 tree_mod_log_write_unlock(fs_info);
902 kfree(tm_list);
903
904 return ret;
756} 905}
757 906
758static inline void 907static inline void
@@ -777,12 +926,52 @@ tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info,
777 BUG_ON(ret < 0); 926 BUG_ON(ret < 0);
778} 927}
779 928
780static noinline void 929static noinline int
781tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb) 930tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
782{ 931{
932 struct tree_mod_elem **tm_list = NULL;
933 int nritems = 0;
934 int i;
935 int ret = 0;
936
937 if (btrfs_header_level(eb) == 0)
938 return 0;
939
940 if (!tree_mod_need_log(fs_info, NULL))
941 return 0;
942
943 nritems = btrfs_header_nritems(eb);
944 tm_list = kzalloc(nritems * sizeof(struct tree_mod_elem *),
945 GFP_NOFS);
946 if (!tm_list)
947 return -ENOMEM;
948
949 for (i = 0; i < nritems; i++) {
950 tm_list[i] = alloc_tree_mod_elem(eb, i,
951 MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
952 if (!tm_list[i]) {
953 ret = -ENOMEM;
954 goto free_tms;
955 }
956 }
957
783 if (tree_mod_dont_log(fs_info, eb)) 958 if (tree_mod_dont_log(fs_info, eb))
784 return; 959 goto free_tms;
785 __tree_mod_log_free_eb(fs_info, eb); 960
961 ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
962 tree_mod_log_write_unlock(fs_info);
963 if (ret)
964 goto free_tms;
965 kfree(tm_list);
966
967 return 0;
968
969free_tms:
970 for (i = 0; i < nritems; i++)
971 kfree(tm_list[i]);
972 kfree(tm_list);
973
974 return ret;
786} 975}
787 976
788static noinline void 977static noinline void
@@ -1040,8 +1229,13 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
1040 btrfs_set_node_ptr_generation(parent, parent_slot, 1229 btrfs_set_node_ptr_generation(parent, parent_slot,
1041 trans->transid); 1230 trans->transid);
1042 btrfs_mark_buffer_dirty(parent); 1231 btrfs_mark_buffer_dirty(parent);
1043 if (last_ref) 1232 if (last_ref) {
1044 tree_mod_log_free_eb(root->fs_info, buf); 1233 ret = tree_mod_log_free_eb(root->fs_info, buf);
1234 if (ret) {
1235 btrfs_abort_transaction(trans, root, ret);
1236 return ret;
1237 }
1238 }
1045 btrfs_free_tree_block(trans, root, buf, parent_start, 1239 btrfs_free_tree_block(trans, root, buf, parent_start,
1046 last_ref); 1240 last_ref);
1047 } 1241 }
@@ -3064,8 +3258,12 @@ static int push_node_left(struct btrfs_trans_handle *trans,
3064 } else 3258 } else
3065 push_items = min(src_nritems - 8, push_items); 3259 push_items = min(src_nritems - 8, push_items);
3066 3260
3067 tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0, 3261 ret = tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0,
3068 push_items); 3262 push_items);
3263 if (ret) {
3264 btrfs_abort_transaction(trans, root, ret);
3265 return ret;
3266 }
3069 copy_extent_buffer(dst, src, 3267 copy_extent_buffer(dst, src,
3070 btrfs_node_key_ptr_offset(dst_nritems), 3268 btrfs_node_key_ptr_offset(dst_nritems),
3071 btrfs_node_key_ptr_offset(0), 3269 btrfs_node_key_ptr_offset(0),
@@ -3135,8 +3333,12 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
3135 (dst_nritems) * 3333 (dst_nritems) *
3136 sizeof(struct btrfs_key_ptr)); 3334 sizeof(struct btrfs_key_ptr));
3137 3335
3138 tree_mod_log_eb_copy(root->fs_info, dst, src, 0, 3336 ret = tree_mod_log_eb_copy(root->fs_info, dst, src, 0,
3139 src_nritems - push_items, push_items); 3337 src_nritems - push_items, push_items);
3338 if (ret) {
3339 btrfs_abort_transaction(trans, root, ret);
3340 return ret;
3341 }
3140 copy_extent_buffer(dst, src, 3342 copy_extent_buffer(dst, src,
3141 btrfs_node_key_ptr_offset(0), 3343 btrfs_node_key_ptr_offset(0),
3142 btrfs_node_key_ptr_offset(src_nritems - push_items), 3344 btrfs_node_key_ptr_offset(src_nritems - push_items),
@@ -3337,7 +3539,12 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
3337 btrfs_header_chunk_tree_uuid(split), 3539 btrfs_header_chunk_tree_uuid(split),
3338 BTRFS_UUID_SIZE); 3540 BTRFS_UUID_SIZE);
3339 3541
3340 tree_mod_log_eb_copy(root->fs_info, split, c, 0, mid, c_nritems - mid); 3542 ret = tree_mod_log_eb_copy(root->fs_info, split, c, 0,
3543 mid, c_nritems - mid);
3544 if (ret) {
3545 btrfs_abort_transaction(trans, root, ret);
3546 return ret;
3547 }
3341 copy_extent_buffer(split, c, 3548 copy_extent_buffer(split, c,
3342 btrfs_node_key_ptr_offset(0), 3549 btrfs_node_key_ptr_offset(0),
3343 btrfs_node_key_ptr_offset(mid), 3550 btrfs_node_key_ptr_offset(mid),