aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@infradead.org>2008-10-30 01:57:40 -0400
committerLachlan McIlroy <lachlan@sgi.com>2008-10-30 01:57:40 -0400
commit4b22a57188d87e873346b73c227607715be96399 (patch)
tree4cf2d0deede695968b7a32b098c0c64fac8610e5
parentea77b0a66e6c910ef265d9af522d6303ea6b3055 (diff)
[XFS] implement generic xfs_btree_insert/insrec
Make the btree insert code generic. Based on a patch from David Chinner with lots of changes to follow the original btree implementations more closely. While this loses some of the generic helper routines for inserting/moving/removing records it also solves some of the one off bugs in the original code and makes it easier to verify. SGI-PV: 985583 SGI-Modid: xfs-linux-melb:xfs-kern:32202a Signed-off-by: Christoph Hellwig <hch@infradead.org> Signed-off-by: Lachlan McIlroy <lachlan@sgi.com> Signed-off-by: Bill O'Donnell <billodo@sgi.com> Signed-off-by: David Chinner <david@fromorbit.com>
-rw-r--r--fs/xfs/xfs_alloc.c10
-rw-r--r--fs/xfs/xfs_alloc_btree.c339
-rw-r--r--fs/xfs/xfs_alloc_btree.h6
-rw-r--r--fs/xfs/xfs_bmap.c20
-rw-r--r--fs/xfs/xfs_bmap_btree.c308
-rw-r--r--fs/xfs/xfs_bmap_btree.h1
-rw-r--r--fs/xfs/xfs_btree.c362
-rw-r--r--fs/xfs/xfs_btree.h11
-rw-r--r--fs/xfs/xfs_ialloc.c2
-rw-r--r--fs/xfs/xfs_ialloc_btree.c302
-rw-r--r--fs/xfs/xfs_ialloc_btree.h6
11 files changed, 494 insertions, 873 deletions
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index 875e1bae1941..a983824c12be 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -408,7 +408,7 @@ xfs_alloc_fixup_trees(
408 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i))) 408 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i)))
409 return error; 409 return error;
410 XFS_WANT_CORRUPTED_RETURN(i == 0); 410 XFS_WANT_CORRUPTED_RETURN(i == 0);
411 if ((error = xfs_alloc_insert(cnt_cur, &i))) 411 if ((error = xfs_btree_insert(cnt_cur, &i)))
412 return error; 412 return error;
413 XFS_WANT_CORRUPTED_RETURN(i == 1); 413 XFS_WANT_CORRUPTED_RETURN(i == 1);
414 } 414 }
@@ -416,7 +416,7 @@ xfs_alloc_fixup_trees(
416 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i))) 416 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i)))
417 return error; 417 return error;
418 XFS_WANT_CORRUPTED_RETURN(i == 0); 418 XFS_WANT_CORRUPTED_RETURN(i == 0);
419 if ((error = xfs_alloc_insert(cnt_cur, &i))) 419 if ((error = xfs_btree_insert(cnt_cur, &i)))
420 return error; 420 return error;
421 XFS_WANT_CORRUPTED_RETURN(i == 1); 421 XFS_WANT_CORRUPTED_RETURN(i == 1);
422 } 422 }
@@ -444,7 +444,7 @@ xfs_alloc_fixup_trees(
444 if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i))) 444 if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i)))
445 return error; 445 return error;
446 XFS_WANT_CORRUPTED_RETURN(i == 0); 446 XFS_WANT_CORRUPTED_RETURN(i == 0);
447 if ((error = xfs_alloc_insert(bno_cur, &i))) 447 if ((error = xfs_btree_insert(bno_cur, &i)))
448 return error; 448 return error;
449 XFS_WANT_CORRUPTED_RETURN(i == 1); 449 XFS_WANT_CORRUPTED_RETURN(i == 1);
450 } 450 }
@@ -1756,7 +1756,7 @@ xfs_free_ag_extent(
1756 else { 1756 else {
1757 nbno = bno; 1757 nbno = bno;
1758 nlen = len; 1758 nlen = len;
1759 if ((error = xfs_alloc_insert(bno_cur, &i))) 1759 if ((error = xfs_btree_insert(bno_cur, &i)))
1760 goto error0; 1760 goto error0;
1761 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1761 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1762 } 1762 }
@@ -1768,7 +1768,7 @@ xfs_free_ag_extent(
1768 if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i))) 1768 if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i)))
1769 goto error0; 1769 goto error0;
1770 XFS_WANT_CORRUPTED_GOTO(i == 0, error0); 1770 XFS_WANT_CORRUPTED_GOTO(i == 0, error0);
1771 if ((error = xfs_alloc_insert(cnt_cur, &i))) 1771 if ((error = xfs_btree_insert(cnt_cur, &i)))
1772 goto error0; 1772 goto error0;
1773 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1773 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1774 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); 1774 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c
index f21a3e9cc3db..818adca77fc6 100644
--- a/fs/xfs/xfs_alloc_btree.c
+++ b/fs/xfs/xfs_alloc_btree.c
@@ -583,256 +583,6 @@ error0:
583} 583}
584 584
585/* 585/*
586 * Insert one record/level. Return information to the caller
587 * allowing the next level up to proceed if necessary.
588 */
589STATIC int /* error */
590xfs_alloc_insrec(
591 xfs_btree_cur_t *cur, /* btree cursor */
592 int level, /* level to insert record at */
593 xfs_agblock_t *bnop, /* i/o: block number inserted */
594 xfs_alloc_rec_t *recp, /* i/o: record data inserted */
595 xfs_btree_cur_t **curp, /* output: new cursor replacing cur */
596 int *stat) /* output: success/failure */
597{
598 xfs_agf_t *agf; /* allocation group freelist header */
599 xfs_alloc_block_t *block; /* btree block record/key lives in */
600 xfs_buf_t *bp; /* buffer for block */
601 int error; /* error return value */
602 int i; /* loop index */
603 xfs_alloc_key_t key; /* key value being inserted */
604 xfs_alloc_key_t *kp; /* pointer to btree keys */
605 xfs_agblock_t nbno; /* block number of allocated block */
606 xfs_btree_cur_t *ncur; /* new cursor to be used at next lvl */
607 xfs_alloc_key_t nkey; /* new key value, from split */
608 xfs_alloc_rec_t nrec; /* new record value, for caller */
609 int numrecs;
610 int optr; /* old ptr value */
611 xfs_alloc_ptr_t *pp; /* pointer to btree addresses */
612 int ptr; /* index in btree block for this rec */
613 xfs_alloc_rec_t *rp; /* pointer to btree records */
614
615 ASSERT(be32_to_cpu(recp->ar_blockcount) > 0);
616
617 /*
618 * GCC doesn't understand the (arguably complex) control flow in
619 * this function and complains about uninitialized structure fields
620 * without this.
621 */
622 memset(&nrec, 0, sizeof(nrec));
623
624 /*
625 * If we made it to the root level, allocate a new root block
626 * and we're done.
627 */
628 if (level >= cur->bc_nlevels) {
629 XFS_STATS_INC(xs_abt_insrec);
630 if ((error = xfs_btree_new_root(cur, &i)))
631 return error;
632 *bnop = NULLAGBLOCK;
633 *stat = i;
634 return 0;
635 }
636 /*
637 * Make a key out of the record data to be inserted, and save it.
638 */
639 key.ar_startblock = recp->ar_startblock;
640 key.ar_blockcount = recp->ar_blockcount;
641 optr = ptr = cur->bc_ptrs[level];
642 /*
643 * If we're off the left edge, return failure.
644 */
645 if (ptr == 0) {
646 *stat = 0;
647 return 0;
648 }
649 XFS_STATS_INC(xs_abt_insrec);
650 /*
651 * Get pointers to the btree buffer and block.
652 */
653 bp = cur->bc_bufs[level];
654 block = XFS_BUF_TO_ALLOC_BLOCK(bp);
655 numrecs = be16_to_cpu(block->bb_numrecs);
656#ifdef DEBUG
657 if ((error = xfs_btree_check_sblock(cur, block, level, bp)))
658 return error;
659 /*
660 * Check that the new entry is being inserted in the right place.
661 */
662 if (ptr <= numrecs) {
663 if (level == 0) {
664 rp = XFS_ALLOC_REC_ADDR(block, ptr, cur);
665 xfs_btree_check_rec(cur->bc_btnum, recp, rp);
666 } else {
667 kp = XFS_ALLOC_KEY_ADDR(block, ptr, cur);
668 xfs_btree_check_key(cur->bc_btnum, &key, kp);
669 }
670 }
671#endif
672 nbno = NULLAGBLOCK;
673 ncur = NULL;
674 /*
675 * If the block is full, we can't insert the new entry until we
676 * make the block un-full.
677 */
678 if (numrecs == XFS_ALLOC_BLOCK_MAXRECS(level, cur)) {
679 /*
680 * First, try shifting an entry to the right neighbor.
681 */
682 if ((error = xfs_btree_rshift(cur, level, &i)))
683 return error;
684 if (i) {
685 /* nothing */
686 }
687 /*
688 * Next, try shifting an entry to the left neighbor.
689 */
690 else {
691 if ((error = xfs_btree_lshift(cur, level, &i)))
692 return error;
693 if (i)
694 optr = ptr = cur->bc_ptrs[level];
695 else {
696 union xfs_btree_ptr bno = { .s = cpu_to_be32(nbno) };
697 /*
698 * Next, try splitting the current block in
699 * half. If this works we have to re-set our
700 * variables because we could be in a
701 * different block now.
702 */
703 if ((error = xfs_btree_split(cur, level, &bno,
704 (union xfs_btree_key *)&nkey,
705 &ncur, &i)))
706 return error;
707 nbno = be32_to_cpu(bno.s);
708 if (i) {
709 bp = cur->bc_bufs[level];
710 block = XFS_BUF_TO_ALLOC_BLOCK(bp);
711#ifdef DEBUG
712 if ((error =
713 xfs_btree_check_sblock(cur,
714 block, level, bp)))
715 return error;
716#endif
717 ptr = cur->bc_ptrs[level];
718 nrec.ar_startblock = nkey.ar_startblock;
719 nrec.ar_blockcount = nkey.ar_blockcount;
720 }
721 /*
722 * Otherwise the insert fails.
723 */
724 else {
725 *stat = 0;
726 return 0;
727 }
728 }
729 }
730 }
731 /*
732 * At this point we know there's room for our new entry in the block
733 * we're pointing at.
734 */
735 numrecs = be16_to_cpu(block->bb_numrecs);
736 if (level > 0) {
737 /*
738 * It's a non-leaf entry. Make a hole for the new data
739 * in the key and ptr regions of the block.
740 */
741 kp = XFS_ALLOC_KEY_ADDR(block, 1, cur);
742 pp = XFS_ALLOC_PTR_ADDR(block, 1, cur);
743#ifdef DEBUG
744 for (i = numrecs; i >= ptr; i--) {
745 if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(pp[i - 1]), level)))
746 return error;
747 }
748#endif
749 memmove(&kp[ptr], &kp[ptr - 1],
750 (numrecs - ptr + 1) * sizeof(*kp));
751 memmove(&pp[ptr], &pp[ptr - 1],
752 (numrecs - ptr + 1) * sizeof(*pp));
753#ifdef DEBUG
754 if ((error = xfs_btree_check_sptr(cur, *bnop, level)))
755 return error;
756#endif
757 /*
758 * Now stuff the new data in, bump numrecs and log the new data.
759 */
760 kp[ptr - 1] = key;
761 pp[ptr - 1] = cpu_to_be32(*bnop);
762 numrecs++;
763 block->bb_numrecs = cpu_to_be16(numrecs);
764 xfs_alloc_log_keys(cur, bp, ptr, numrecs);
765 xfs_alloc_log_ptrs(cur, bp, ptr, numrecs);
766#ifdef DEBUG
767 if (ptr < numrecs)
768 xfs_btree_check_key(cur->bc_btnum, kp + ptr - 1,
769 kp + ptr);
770#endif
771 } else {
772 /*
773 * It's a leaf entry. Make a hole for the new record.
774 */
775 rp = XFS_ALLOC_REC_ADDR(block, 1, cur);
776 memmove(&rp[ptr], &rp[ptr - 1],
777 (numrecs - ptr + 1) * sizeof(*rp));
778 /*
779 * Now stuff the new record in, bump numrecs
780 * and log the new data.
781 */
782 rp[ptr - 1] = *recp;
783 numrecs++;
784 block->bb_numrecs = cpu_to_be16(numrecs);
785 xfs_alloc_log_recs(cur, bp, ptr, numrecs);
786#ifdef DEBUG
787 if (ptr < numrecs)
788 xfs_btree_check_rec(cur->bc_btnum, rp + ptr - 1,
789 rp + ptr);
790#endif
791 }
792 /*
793 * Log the new number of records in the btree header.
794 */
795 xfs_alloc_log_block(cur->bc_tp, bp, XFS_BB_NUMRECS);
796 /*
797 * If we inserted at the start of a block, update the parents' keys.
798 */
799 if (optr == 1 && (error = xfs_btree_updkey(cur, (union xfs_btree_key *)&key, level + 1)))
800 return error;
801 /*
802 * Look to see if the longest extent in the allocation group
803 * needs to be updated.
804 */
805
806 agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
807 if (level == 0 &&
808 cur->bc_btnum == XFS_BTNUM_CNT &&
809 be32_to_cpu(block->bb_rightsib) == NULLAGBLOCK &&
810 be32_to_cpu(recp->ar_blockcount) > be32_to_cpu(agf->agf_longest)) {
811 /*
812 * If this is a leaf in the by-size btree and there
813 * is no right sibling block and this block is bigger
814 * than the previous longest block, update it.
815 */
816 agf->agf_longest = recp->ar_blockcount;
817 cur->bc_mp->m_perag[be32_to_cpu(agf->agf_seqno)].pagf_longest
818 = be32_to_cpu(recp->ar_blockcount);
819 xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp,
820 XFS_AGF_LONGEST);
821 }
822 /*
823 * Return the new block number, if any.
824 * If there is one, give back a record value and a cursor too.
825 */
826 *bnop = nbno;
827 if (nbno != NULLAGBLOCK) {
828 *recp = nrec;
829 *curp = ncur;
830 }
831 *stat = 1;
832 return 0;
833}
834
835/*
836 * Log header fields from a btree block. 586 * Log header fields from a btree block.
837 */ 587 */
838STATIC void 588STATIC void
@@ -1019,65 +769,6 @@ xfs_alloc_get_rec(
1019 return 0; 769 return 0;
1020} 770}
1021 771
1022/*
1023 * Insert the current record at the point referenced by cur.
1024 * The cursor may be inconsistent on return if splits have been done.
1025 */
1026int /* error */
1027xfs_alloc_insert(
1028 xfs_btree_cur_t *cur, /* btree cursor */
1029 int *stat) /* success/failure */
1030{
1031 int error; /* error return value */
1032 int i; /* result value, 0 for failure */
1033 int level; /* current level number in btree */
1034 xfs_agblock_t nbno; /* new block number (split result) */
1035 xfs_btree_cur_t *ncur; /* new cursor (split result) */
1036 xfs_alloc_rec_t nrec; /* record being inserted this level */
1037 xfs_btree_cur_t *pcur; /* previous level's cursor */
1038
1039 level = 0;
1040 nbno = NULLAGBLOCK;
1041 nrec.ar_startblock = cpu_to_be32(cur->bc_rec.a.ar_startblock);
1042 nrec.ar_blockcount = cpu_to_be32(cur->bc_rec.a.ar_blockcount);
1043 ncur = NULL;
1044 pcur = cur;
1045 /*
1046 * Loop going up the tree, starting at the leaf level.
1047 * Stop when we don't get a split block, that must mean that
1048 * the insert is finished with this level.
1049 */
1050 do {
1051 /*
1052 * Insert nrec/nbno into this level of the tree.
1053 * Note if we fail, nbno will be null.
1054 */
1055 if ((error = xfs_alloc_insrec(pcur, level++, &nbno, &nrec, &ncur,
1056 &i))) {
1057 if (pcur != cur)
1058 xfs_btree_del_cursor(pcur, XFS_BTREE_ERROR);
1059 return error;
1060 }
1061 /*
1062 * See if the cursor we just used is trash.
1063 * Can't trash the caller's cursor, but otherwise we should
1064 * if ncur is a new cursor or we're about to be done.
1065 */
1066 if (pcur != cur && (ncur || nbno == NULLAGBLOCK)) {
1067 cur->bc_nlevels = pcur->bc_nlevels;
1068 xfs_btree_del_cursor(pcur, XFS_BTREE_NOERROR);
1069 }
1070 /*
1071 * If we got a new cursor, switch to it.
1072 */
1073 if (ncur) {
1074 pcur = ncur;
1075 ncur = NULL;
1076 }
1077 } while (nbno != NULLAGBLOCK);
1078 *stat = i;
1079 return 0;
1080}
1081 772
1082STATIC struct xfs_btree_cur * 773STATIC struct xfs_btree_cur *
1083xfs_allocbt_dup_cursor( 774xfs_allocbt_dup_cursor(
@@ -1170,6 +861,12 @@ xfs_allocbt_update_lastrec(
1170 return; 861 return;
1171 len = rec->alloc.ar_blockcount; 862 len = rec->alloc.ar_blockcount;
1172 break; 863 break;
864 case LASTREC_INSREC:
865 if (be32_to_cpu(rec->alloc.ar_blockcount) <=
866 be32_to_cpu(agf->agf_longest))
867 return;
868 len = rec->alloc.ar_blockcount;
869 break;
1173 default: 870 default:
1174 ASSERT(0); 871 ASSERT(0);
1175 return; 872 return;
@@ -1200,6 +897,28 @@ xfs_allocbt_init_key_from_rec(
1200} 897}
1201 898
1202STATIC void 899STATIC void
900xfs_allocbt_init_rec_from_key(
901 union xfs_btree_key *key,
902 union xfs_btree_rec *rec)
903{
904 ASSERT(key->alloc.ar_startblock != 0);
905
906 rec->alloc.ar_startblock = key->alloc.ar_startblock;
907 rec->alloc.ar_blockcount = key->alloc.ar_blockcount;
908}
909
910STATIC void
911xfs_allocbt_init_rec_from_cur(
912 struct xfs_btree_cur *cur,
913 union xfs_btree_rec *rec)
914{
915 ASSERT(cur->bc_rec.a.ar_startblock != 0);
916
917 rec->alloc.ar_startblock = cpu_to_be32(cur->bc_rec.a.ar_startblock);
918 rec->alloc.ar_blockcount = cpu_to_be32(cur->bc_rec.a.ar_blockcount);
919}
920
921STATIC void
1203xfs_allocbt_init_ptr_from_cur( 922xfs_allocbt_init_ptr_from_cur(
1204 struct xfs_btree_cur *cur, 923 struct xfs_btree_cur *cur,
1205 union xfs_btree_ptr *ptr) 924 union xfs_btree_ptr *ptr)
@@ -1309,6 +1028,8 @@ static const struct xfs_btree_ops xfs_allocbt_ops = {
1309 .update_lastrec = xfs_allocbt_update_lastrec, 1028 .update_lastrec = xfs_allocbt_update_lastrec,
1310 .get_maxrecs = xfs_allocbt_get_maxrecs, 1029 .get_maxrecs = xfs_allocbt_get_maxrecs,
1311 .init_key_from_rec = xfs_allocbt_init_key_from_rec, 1030 .init_key_from_rec = xfs_allocbt_init_key_from_rec,
1031 .init_rec_from_key = xfs_allocbt_init_rec_from_key,
1032 .init_rec_from_cur = xfs_allocbt_init_rec_from_cur,
1312 .init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur, 1033 .init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur,
1313 .key_diff = xfs_allocbt_key_diff, 1034 .key_diff = xfs_allocbt_key_diff,
1314 1035
diff --git a/fs/xfs/xfs_alloc_btree.h b/fs/xfs/xfs_alloc_btree.h
index 81e2f3607819..2e340ef8025a 100644
--- a/fs/xfs/xfs_alloc_btree.h
+++ b/fs/xfs/xfs_alloc_btree.h
@@ -107,12 +107,6 @@ extern int xfs_alloc_delete(struct xfs_btree_cur *cur, int *stat);
107extern int xfs_alloc_get_rec(struct xfs_btree_cur *cur, xfs_agblock_t *bno, 107extern int xfs_alloc_get_rec(struct xfs_btree_cur *cur, xfs_agblock_t *bno,
108 xfs_extlen_t *len, int *stat); 108 xfs_extlen_t *len, int *stat);
109 109
110/*
111 * Insert the current record at the point referenced by cur.
112 * The cursor may be inconsistent on return if splits have been done.
113 */
114extern int xfs_alloc_insert(struct xfs_btree_cur *cur, int *stat);
115
116 110
117extern struct xfs_btree_cur *xfs_allocbt_init_cursor(struct xfs_mount *, 111extern struct xfs_btree_cur *xfs_allocbt_init_cursor(struct xfs_mount *,
118 struct xfs_trans *, struct xfs_buf *, 112 struct xfs_trans *, struct xfs_buf *,
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 315bc2912682..85e2e8b9cf41 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -977,7 +977,7 @@ xfs_bmap_add_extent_delay_real(
977 goto done; 977 goto done;
978 XFS_WANT_CORRUPTED_GOTO(i == 0, done); 978 XFS_WANT_CORRUPTED_GOTO(i == 0, done);
979 cur->bc_rec.b.br_state = XFS_EXT_NORM; 979 cur->bc_rec.b.br_state = XFS_EXT_NORM;
980 if ((error = xfs_bmbt_insert(cur, &i))) 980 if ((error = xfs_btree_insert(cur, &i)))
981 goto done; 981 goto done;
982 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 982 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
983 } 983 }
@@ -1053,7 +1053,7 @@ xfs_bmap_add_extent_delay_real(
1053 goto done; 1053 goto done;
1054 XFS_WANT_CORRUPTED_GOTO(i == 0, done); 1054 XFS_WANT_CORRUPTED_GOTO(i == 0, done);
1055 cur->bc_rec.b.br_state = XFS_EXT_NORM; 1055 cur->bc_rec.b.br_state = XFS_EXT_NORM;
1056 if ((error = xfs_bmbt_insert(cur, &i))) 1056 if ((error = xfs_btree_insert(cur, &i)))
1057 goto done; 1057 goto done;
1058 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1058 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1059 } 1059 }
@@ -1143,7 +1143,7 @@ xfs_bmap_add_extent_delay_real(
1143 goto done; 1143 goto done;
1144 XFS_WANT_CORRUPTED_GOTO(i == 0, done); 1144 XFS_WANT_CORRUPTED_GOTO(i == 0, done);
1145 cur->bc_rec.b.br_state = XFS_EXT_NORM; 1145 cur->bc_rec.b.br_state = XFS_EXT_NORM;
1146 if ((error = xfs_bmbt_insert(cur, &i))) 1146 if ((error = xfs_btree_insert(cur, &i)))
1147 goto done; 1147 goto done;
1148 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1148 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1149 } 1149 }
@@ -1198,7 +1198,7 @@ xfs_bmap_add_extent_delay_real(
1198 goto done; 1198 goto done;
1199 XFS_WANT_CORRUPTED_GOTO(i == 0, done); 1199 XFS_WANT_CORRUPTED_GOTO(i == 0, done);
1200 cur->bc_rec.b.br_state = XFS_EXT_NORM; 1200 cur->bc_rec.b.br_state = XFS_EXT_NORM;
1201 if ((error = xfs_bmbt_insert(cur, &i))) 1201 if ((error = xfs_btree_insert(cur, &i)))
1202 goto done; 1202 goto done;
1203 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1203 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1204 } 1204 }
@@ -1651,7 +1651,7 @@ xfs_bmap_add_extent_unwritten_real(
1651 oldext))) 1651 oldext)))
1652 goto done; 1652 goto done;
1653 cur->bc_rec.b = *new; 1653 cur->bc_rec.b = *new;
1654 if ((error = xfs_bmbt_insert(cur, &i))) 1654 if ((error = xfs_btree_insert(cur, &i)))
1655 goto done; 1655 goto done;
1656 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1656 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1657 } 1657 }
@@ -1741,7 +1741,7 @@ xfs_bmap_add_extent_unwritten_real(
1741 goto done; 1741 goto done;
1742 XFS_WANT_CORRUPTED_GOTO(i == 0, done); 1742 XFS_WANT_CORRUPTED_GOTO(i == 0, done);
1743 cur->bc_rec.b.br_state = XFS_EXT_NORM; 1743 cur->bc_rec.b.br_state = XFS_EXT_NORM;
1744 if ((error = xfs_bmbt_insert(cur, &i))) 1744 if ((error = xfs_btree_insert(cur, &i)))
1745 goto done; 1745 goto done;
1746 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1746 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1747 } 1747 }
@@ -1789,7 +1789,7 @@ xfs_bmap_add_extent_unwritten_real(
1789 cur->bc_rec.b = PREV; 1789 cur->bc_rec.b = PREV;
1790 cur->bc_rec.b.br_blockcount = 1790 cur->bc_rec.b.br_blockcount =
1791 new->br_startoff - PREV.br_startoff; 1791 new->br_startoff - PREV.br_startoff;
1792 if ((error = xfs_bmbt_insert(cur, &i))) 1792 if ((error = xfs_btree_insert(cur, &i)))
1793 goto done; 1793 goto done;
1794 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1794 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1795 /* 1795 /*
@@ -1804,7 +1804,7 @@ xfs_bmap_add_extent_unwritten_real(
1804 XFS_WANT_CORRUPTED_GOTO(i == 0, done); 1804 XFS_WANT_CORRUPTED_GOTO(i == 0, done);
1805 /* new middle extent - newext */ 1805 /* new middle extent - newext */
1806 cur->bc_rec.b.br_state = new->br_state; 1806 cur->bc_rec.b.br_state = new->br_state;
1807 if ((error = xfs_bmbt_insert(cur, &i))) 1807 if ((error = xfs_btree_insert(cur, &i)))
1808 goto done; 1808 goto done;
1809 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1809 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1810 } 1810 }
@@ -2264,7 +2264,7 @@ xfs_bmap_add_extent_hole_real(
2264 goto done; 2264 goto done;
2265 XFS_WANT_CORRUPTED_GOTO(i == 0, done); 2265 XFS_WANT_CORRUPTED_GOTO(i == 0, done);
2266 cur->bc_rec.b.br_state = new->br_state; 2266 cur->bc_rec.b.br_state = new->br_state;
2267 if ((error = xfs_bmbt_insert(cur, &i))) 2267 if ((error = xfs_btree_insert(cur, &i)))
2268 goto done; 2268 goto done;
2269 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 2269 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
2270 } 2270 }
@@ -3303,7 +3303,7 @@ xfs_bmap_del_extent(
3303 if ((error = xfs_btree_increment(cur, 0, &i))) 3303 if ((error = xfs_btree_increment(cur, 0, &i)))
3304 goto done; 3304 goto done;
3305 cur->bc_rec.b = new; 3305 cur->bc_rec.b = new;
3306 error = xfs_bmbt_insert(cur, &i); 3306 error = xfs_btree_insert(cur, &i);
3307 if (error && error != ENOSPC) 3307 if (error && error != ENOSPC)
3308 goto done; 3308 goto done;
3309 /* 3309 /*
diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c
index 204f276aeaad..2b15df32b7d2 100644
--- a/fs/xfs/xfs_bmap_btree.c
+++ b/fs/xfs/xfs_bmap_btree.c
@@ -456,198 +456,6 @@ error0:
456 return error; 456 return error;
457} 457}
458 458
459/*
460 * Insert one record/level. Return information to the caller
461 * allowing the next level up to proceed if necessary.
462 */
463STATIC int /* error */
464xfs_bmbt_insrec(
465 xfs_btree_cur_t *cur,
466 int level,
467 xfs_fsblock_t *bnop,
468 xfs_bmbt_rec_t *recp,
469 xfs_btree_cur_t **curp,
470 int *stat) /* no-go/done/continue */
471{
472 xfs_bmbt_block_t *block; /* bmap btree block */
473 xfs_buf_t *bp; /* buffer for block */
474 int error; /* error return value */
475 int i; /* loop index */
476 xfs_bmbt_key_t key; /* bmap btree key */
477 xfs_bmbt_key_t *kp=NULL; /* pointer to bmap btree key */
478 int logflags; /* inode logging flags */
479 xfs_fsblock_t nbno; /* new block number */
480 struct xfs_btree_cur *ncur; /* new btree cursor */
481 __uint64_t startoff; /* new btree key value */
482 xfs_bmbt_rec_t nrec; /* new record count */
483 int optr; /* old key/record index */
484 xfs_bmbt_ptr_t *pp; /* pointer to bmap block addr */
485 int ptr; /* key/record index */
486 xfs_bmbt_rec_t *rp=NULL; /* pointer to bmap btree rec */
487 int numrecs;
488
489 ASSERT(level < cur->bc_nlevels);
490 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
491 XFS_BMBT_TRACE_ARGIFR(cur, level, *bnop, recp);
492 ncur = NULL;
493 key.br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(recp));
494 optr = ptr = cur->bc_ptrs[level];
495 if (ptr == 0) {
496 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
497 *stat = 0;
498 return 0;
499 }
500 XFS_STATS_INC(xs_bmbt_insrec);
501 block = xfs_bmbt_get_block(cur, level, &bp);
502 numrecs = be16_to_cpu(block->bb_numrecs);
503#ifdef DEBUG
504 if ((error = xfs_btree_check_lblock(cur, block, level, bp))) {
505 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
506 return error;
507 }
508 if (ptr <= numrecs) {
509 if (level == 0) {
510 rp = XFS_BMAP_REC_IADDR(block, ptr, cur);
511 xfs_btree_check_rec(XFS_BTNUM_BMAP, recp, rp);
512 } else {
513 kp = XFS_BMAP_KEY_IADDR(block, ptr, cur);
514 xfs_btree_check_key(XFS_BTNUM_BMAP, &key, kp);
515 }
516 }
517#endif
518 nbno = NULLFSBLOCK;
519 if (numrecs == XFS_BMAP_BLOCK_IMAXRECS(level, cur)) {
520 if (numrecs < XFS_BMAP_BLOCK_DMAXRECS(level, cur)) {
521 /*
522 * A root block, that can be made bigger.
523 */
524 xfs_iroot_realloc(cur->bc_private.b.ip, 1,
525 cur->bc_private.b.whichfork);
526 block = xfs_bmbt_get_block(cur, level, &bp);
527 } else if (level == cur->bc_nlevels - 1) {
528 if ((error = xfs_btree_new_iroot(cur, &logflags, stat)) ||
529 *stat == 0) {
530 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
531 return error;
532 }
533 xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip,
534 logflags);
535 block = xfs_bmbt_get_block(cur, level, &bp);
536 } else {
537 if ((error = xfs_btree_rshift(cur, level, &i))) {
538 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
539 return error;
540 }
541 if (i) {
542 /* nothing */
543 } else {
544 if ((error = xfs_btree_lshift(cur, level, &i))) {
545 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
546 return error;
547 }
548 if (i) {
549 optr = ptr = cur->bc_ptrs[level];
550 } else {
551 union xfs_btree_ptr bno = { .l = cpu_to_be64(nbno) };
552 union xfs_btree_key skey;
553 if ((error = xfs_btree_split(cur, level,
554 &bno, &skey, &ncur,
555 &i))) {
556 XFS_BMBT_TRACE_CURSOR(cur,
557 ERROR);
558 return error;
559 }
560 nbno = be64_to_cpu(bno.l);
561 startoff = be64_to_cpu(skey.bmbt.br_startoff);
562 if (i) {
563 block = xfs_bmbt_get_block(
564 cur, level, &bp);
565#ifdef DEBUG
566 if ((error =
567 xfs_btree_check_lblock(cur,
568 block, level, bp))) {
569 XFS_BMBT_TRACE_CURSOR(
570 cur, ERROR);
571 return error;
572 }
573#endif
574 ptr = cur->bc_ptrs[level];
575 xfs_bmbt_disk_set_allf(&nrec,
576 startoff, 0, 0,
577 XFS_EXT_NORM);
578 } else {
579 XFS_BMBT_TRACE_CURSOR(cur,
580 EXIT);
581 *stat = 0;
582 return 0;
583 }
584 }
585 }
586 }
587 }
588 numrecs = be16_to_cpu(block->bb_numrecs);
589 if (level > 0) {
590 kp = XFS_BMAP_KEY_IADDR(block, 1, cur);
591 pp = XFS_BMAP_PTR_IADDR(block, 1, cur);
592#ifdef DEBUG
593 for (i = numrecs; i >= ptr; i--) {
594 if ((error = xfs_btree_check_lptr_disk(cur, pp[i - 1],
595 level))) {
596 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
597 return error;
598 }
599 }
600#endif
601 memmove(&kp[ptr], &kp[ptr - 1],
602 (numrecs - ptr + 1) * sizeof(*kp));
603 memmove(&pp[ptr], &pp[ptr - 1],
604 (numrecs - ptr + 1) * sizeof(*pp));
605#ifdef DEBUG
606 if ((error = xfs_btree_check_lptr(cur, *bnop, level))) {
607 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
608 return error;
609 }
610#endif
611 kp[ptr - 1] = key;
612 pp[ptr - 1] = cpu_to_be64(*bnop);
613 numrecs++;
614 block->bb_numrecs = cpu_to_be16(numrecs);
615 xfs_bmbt_log_keys(cur, bp, ptr, numrecs);
616 xfs_bmbt_log_ptrs(cur, bp, ptr, numrecs);
617 } else {
618 rp = XFS_BMAP_REC_IADDR(block, 1, cur);
619 memmove(&rp[ptr], &rp[ptr - 1],
620 (numrecs - ptr + 1) * sizeof(*rp));
621 rp[ptr - 1] = *recp;
622 numrecs++;
623 block->bb_numrecs = cpu_to_be16(numrecs);
624 xfs_bmbt_log_recs(cur, bp, ptr, numrecs);
625 }
626 xfs_bmbt_log_block(cur, bp, XFS_BB_NUMRECS);
627#ifdef DEBUG
628 if (ptr < numrecs) {
629 if (level == 0)
630 xfs_btree_check_rec(XFS_BTNUM_BMAP, rp + ptr - 1,
631 rp + ptr);
632 else
633 xfs_btree_check_key(XFS_BTNUM_BMAP, kp + ptr - 1,
634 kp + ptr);
635 }
636#endif
637 if (optr == 1 && (error = xfs_btree_updkey(cur, (union xfs_btree_key *)&key, level + 1))) {
638 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
639 return error;
640 }
641 *bnop = nbno;
642 if (nbno != NULLFSBLOCK) {
643 *recp = nrec;
644 *curp = ncur;
645 }
646 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
647 *stat = 1;
648 return 0;
649}
650
651STATIC int 459STATIC int
652xfs_bmbt_killroot( 460xfs_bmbt_killroot(
653 xfs_btree_cur_t *cur) 461 xfs_btree_cur_t *cur)
@@ -1060,67 +868,6 @@ xfs_bmbt_disk_get_startoff(
1060} 868}
1061 869
1062/* 870/*
1063 * Insert the current record at the point referenced by cur.
1064 *
1065 * A multi-level split of the tree on insert will invalidate the original
1066 * cursor. All callers of this function should assume that the cursor is
1067 * no longer valid and revalidate it.
1068 */
1069int /* error */
1070xfs_bmbt_insert(
1071 xfs_btree_cur_t *cur,
1072 int *stat) /* success/failure */
1073{
1074 int error; /* error return value */
1075 int i;
1076 int level;
1077 xfs_fsblock_t nbno;
1078 xfs_btree_cur_t *ncur;
1079 xfs_bmbt_rec_t nrec;
1080 xfs_btree_cur_t *pcur;
1081
1082 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
1083 level = 0;
1084 nbno = NULLFSBLOCK;
1085 xfs_bmbt_disk_set_all(&nrec, &cur->bc_rec.b);
1086 ncur = NULL;
1087 pcur = cur;
1088 do {
1089 if ((error = xfs_bmbt_insrec(pcur, level++, &nbno, &nrec, &ncur,
1090 &i))) {
1091 if (pcur != cur)
1092 xfs_btree_del_cursor(pcur, XFS_BTREE_ERROR);
1093 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1094 return error;
1095 }
1096 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1097 if (pcur != cur && (ncur || nbno == NULLFSBLOCK)) {
1098 cur->bc_nlevels = pcur->bc_nlevels;
1099 cur->bc_private.b.allocated +=
1100 pcur->bc_private.b.allocated;
1101 pcur->bc_private.b.allocated = 0;
1102 ASSERT((cur->bc_private.b.firstblock != NULLFSBLOCK) ||
1103 XFS_IS_REALTIME_INODE(cur->bc_private.b.ip));
1104 cur->bc_private.b.firstblock =
1105 pcur->bc_private.b.firstblock;
1106 ASSERT(cur->bc_private.b.flist ==
1107 pcur->bc_private.b.flist);
1108 xfs_btree_del_cursor(pcur, XFS_BTREE_NOERROR);
1109 }
1110 if (ncur) {
1111 pcur = ncur;
1112 ncur = NULL;
1113 }
1114 } while (nbno != NULLFSBLOCK);
1115 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1116 *stat = i;
1117 return 0;
1118error0:
1119 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
1120 return error;
1121}
1122
1123/*
1124 * Log fields from the btree block header. 871 * Log fields from the btree block header.
1125 */ 872 */
1126void 873void
@@ -1450,6 +1197,21 @@ xfs_bmbt_dup_cursor(
1450 return new; 1197 return new;
1451} 1198}
1452 1199
1200STATIC void
1201xfs_bmbt_update_cursor(
1202 struct xfs_btree_cur *src,
1203 struct xfs_btree_cur *dst)
1204{
1205 ASSERT((dst->bc_private.b.firstblock != NULLFSBLOCK) ||
1206 (dst->bc_private.b.ip->i_d.di_flags & XFS_DIFLAG_REALTIME));
1207 ASSERT(dst->bc_private.b.flist == src->bc_private.b.flist);
1208
1209 dst->bc_private.b.allocated += src->bc_private.b.allocated;
1210 dst->bc_private.b.firstblock = src->bc_private.b.firstblock;
1211
1212 src->bc_private.b.allocated = 0;
1213}
1214
1453STATIC int 1215STATIC int
1454xfs_bmbt_alloc_block( 1216xfs_bmbt_alloc_block(
1455 struct xfs_btree_cur *cur, 1217 struct xfs_btree_cur *cur,
@@ -1544,6 +1306,23 @@ xfs_bmbt_get_maxrecs(
1544 return XFS_BMAP_BLOCK_IMAXRECS(level, cur); 1306 return XFS_BMAP_BLOCK_IMAXRECS(level, cur);
1545} 1307}
1546 1308
1309/*
1310 * Get the maximum records we could store in the on-disk format.
1311 *
1312 * For non-root nodes this is equivalent to xfs_bmbt_get_maxrecs, but
1313 * for the root node this checks the available space in the dinode fork
1314 * so that we can resize the in-memory buffer to match it. After a
1315 * resize to the maximum size this function returns the same value
1316 * as xfs_bmbt_get_maxrecs for the root node, too.
1317 */
1318STATIC int
1319xfs_bmbt_get_dmaxrecs(
1320 struct xfs_btree_cur *cur,
1321 int level)
1322{
1323 return XFS_BMAP_BLOCK_DMAXRECS(level, cur);
1324}
1325
1547STATIC void 1326STATIC void
1548xfs_bmbt_init_key_from_rec( 1327xfs_bmbt_init_key_from_rec(
1549 union xfs_btree_key *key, 1328 union xfs_btree_key *key,
@@ -1554,6 +1333,25 @@ xfs_bmbt_init_key_from_rec(
1554} 1333}
1555 1334
1556STATIC void 1335STATIC void
1336xfs_bmbt_init_rec_from_key(
1337 union xfs_btree_key *key,
1338 union xfs_btree_rec *rec)
1339{
1340 ASSERT(key->bmbt.br_startoff != 0);
1341
1342 xfs_bmbt_disk_set_allf(&rec->bmbt, be64_to_cpu(key->bmbt.br_startoff),
1343 0, 0, XFS_EXT_NORM);
1344}
1345
1346STATIC void
1347xfs_bmbt_init_rec_from_cur(
1348 struct xfs_btree_cur *cur,
1349 union xfs_btree_rec *rec)
1350{
1351 xfs_bmbt_disk_set_all(&rec->bmbt, &cur->bc_rec.b);
1352}
1353
1354STATIC void
1557xfs_bmbt_init_ptr_from_cur( 1355xfs_bmbt_init_ptr_from_cur(
1558 struct xfs_btree_cur *cur, 1356 struct xfs_btree_cur *cur,
1559 union xfs_btree_ptr *ptr) 1357 union xfs_btree_ptr *ptr)
@@ -1660,9 +1458,13 @@ static const struct xfs_btree_ops xfs_bmbt_ops = {
1660 .key_len = sizeof(xfs_bmbt_key_t), 1458 .key_len = sizeof(xfs_bmbt_key_t),
1661 1459
1662 .dup_cursor = xfs_bmbt_dup_cursor, 1460 .dup_cursor = xfs_bmbt_dup_cursor,
1461 .update_cursor = xfs_bmbt_update_cursor,
1663 .alloc_block = xfs_bmbt_alloc_block, 1462 .alloc_block = xfs_bmbt_alloc_block,
1664 .get_maxrecs = xfs_bmbt_get_maxrecs, 1463 .get_maxrecs = xfs_bmbt_get_maxrecs,
1464 .get_dmaxrecs = xfs_bmbt_get_dmaxrecs,
1665 .init_key_from_rec = xfs_bmbt_init_key_from_rec, 1465 .init_key_from_rec = xfs_bmbt_init_key_from_rec,
1466 .init_rec_from_key = xfs_bmbt_init_rec_from_key,
1467 .init_rec_from_cur = xfs_bmbt_init_rec_from_cur,
1666 .init_ptr_from_cur = xfs_bmbt_init_ptr_from_cur, 1468 .init_ptr_from_cur = xfs_bmbt_init_ptr_from_cur,
1667 .key_diff = xfs_bmbt_key_diff, 1469 .key_diff = xfs_bmbt_key_diff,
1668 1470
diff --git a/fs/xfs/xfs_bmap_btree.h b/fs/xfs/xfs_bmap_btree.h
index 26fd8ace3e77..703fe2e34347 100644
--- a/fs/xfs/xfs_bmap_btree.h
+++ b/fs/xfs/xfs_bmap_btree.h
@@ -250,7 +250,6 @@ extern void xfs_bmbt_disk_get_all(xfs_bmbt_rec_t *r, xfs_bmbt_irec_t *s);
250extern xfs_filblks_t xfs_bmbt_disk_get_blockcount(xfs_bmbt_rec_t *r); 250extern xfs_filblks_t xfs_bmbt_disk_get_blockcount(xfs_bmbt_rec_t *r);
251extern xfs_fileoff_t xfs_bmbt_disk_get_startoff(xfs_bmbt_rec_t *r); 251extern xfs_fileoff_t xfs_bmbt_disk_get_startoff(xfs_bmbt_rec_t *r);
252 252
253extern int xfs_bmbt_insert(struct xfs_btree_cur *, int *);
254extern void xfs_bmbt_log_block(struct xfs_btree_cur *, struct xfs_buf *, int); 253extern void xfs_bmbt_log_block(struct xfs_btree_cur *, struct xfs_buf *, int);
255extern void xfs_bmbt_log_recs(struct xfs_btree_cur *, struct xfs_buf *, int, 254extern void xfs_bmbt_log_recs(struct xfs_btree_cur *, struct xfs_buf *, int,
256 int); 255 int);
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c
index 3b6e01dea669..36477aae77df 100644
--- a/fs/xfs/xfs_btree.c
+++ b/fs/xfs/xfs_btree.c
@@ -963,6 +963,17 @@ xfs_btree_ptr_is_null(
963 return be32_to_cpu(ptr->s) == NULLAGBLOCK; 963 return be32_to_cpu(ptr->s) == NULLAGBLOCK;
964} 964}
965 965
966STATIC void
967xfs_btree_set_ptr_null(
968 struct xfs_btree_cur *cur,
969 union xfs_btree_ptr *ptr)
970{
971 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
972 ptr->l = cpu_to_be64(NULLFSBLOCK);
973 else
974 ptr->s = cpu_to_be32(NULLAGBLOCK);
975}
976
966/* 977/*
967 * Get/set/init sibling pointers 978 * Get/set/init sibling pointers
968 */ 979 */
@@ -2697,3 +2708,354 @@ out0:
2697 *stat = 0; 2708 *stat = 0;
2698 return 0; 2709 return 0;
2699} 2710}
2711
2712STATIC int
2713xfs_btree_make_block_unfull(
2714 struct xfs_btree_cur *cur, /* btree cursor */
2715 int level, /* btree level */
2716 int numrecs,/* # of recs in block */
2717 int *oindex,/* old tree index */
2718 int *index, /* new tree index */
2719 union xfs_btree_ptr *nptr, /* new btree ptr */
2720 struct xfs_btree_cur **ncur, /* new btree cursor */
2721 union xfs_btree_rec *nrec, /* new record */
2722 int *stat)
2723{
2724 union xfs_btree_key key; /* new btree key value */
2725 int error = 0;
2726
2727 if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
2728 level == cur->bc_nlevels - 1) {
2729 struct xfs_inode *ip = cur->bc_private.b.ip;
2730
2731 if (numrecs < cur->bc_ops->get_dmaxrecs(cur, level)) {
2732 /* A root block that can be made bigger. */
2733
2734 xfs_iroot_realloc(ip, 1, cur->bc_private.b.whichfork);
2735 } else {
2736 /* A root block that needs replacing */
2737 int logflags = 0;
2738
2739 error = xfs_btree_new_iroot(cur, &logflags, stat);
2740 if (error || *stat == 0)
2741 return error;
2742
2743 xfs_trans_log_inode(cur->bc_tp, ip, logflags);
2744 }
2745
2746 return 0;
2747 }
2748
2749 /* First, try shifting an entry to the right neighbor. */
2750 error = xfs_btree_rshift(cur, level, stat);
2751 if (error || *stat)
2752 return error;
2753
2754 /* Next, try shifting an entry to the left neighbor. */
2755 error = xfs_btree_lshift(cur, level, stat);
2756 if (error)
2757 return error;
2758
2759 if (*stat) {
2760 *oindex = *index = cur->bc_ptrs[level];
2761 return 0;
2762 }
2763
2764 /*
2765 * Next, try splitting the current block in half.
2766 *
2767 * If this works we have to re-set our variables because we
2768 * could be in a different block now.
2769 */
2770 error = xfs_btree_split(cur, level, nptr, &key, ncur, stat);
2771 if (error || *stat == 0)
2772 return error;
2773
2774
2775 *index = cur->bc_ptrs[level];
2776 cur->bc_ops->init_rec_from_key(&key, nrec);
2777 return 0;
2778}
2779
2780/*
2781 * Insert one record/level. Return information to the caller
2782 * allowing the next level up to proceed if necessary.
2783 */
2784STATIC int
2785xfs_btree_insrec(
2786 struct xfs_btree_cur *cur, /* btree cursor */
2787 int level, /* level to insert record at */
2788 union xfs_btree_ptr *ptrp, /* i/o: block number inserted */
2789 union xfs_btree_rec *recp, /* i/o: record data inserted */
2790 struct xfs_btree_cur **curp, /* output: new cursor replacing cur */
2791 int *stat) /* success/failure */
2792{
2793 struct xfs_btree_block *block; /* btree block */
2794 struct xfs_buf *bp; /* buffer for block */
2795 union xfs_btree_key key; /* btree key */
2796 union xfs_btree_ptr nptr; /* new block ptr */
2797 struct xfs_btree_cur *ncur; /* new btree cursor */
2798 union xfs_btree_rec nrec; /* new record count */
2799 int optr; /* old key/record index */
2800 int ptr; /* key/record index */
2801 int numrecs;/* number of records */
2802 int error; /* error return value */
2803#ifdef DEBUG
2804 int i;
2805#endif
2806
2807 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
2808 XFS_BTREE_TRACE_ARGIPR(cur, level, *ptrp, recp);
2809
2810 ncur = NULL;
2811
2812 /*
2813 * If we have an external root pointer, and we've made it to the
2814 * root level, allocate a new root block and we're done.
2815 */
2816 if (!(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
2817 (level >= cur->bc_nlevels)) {
2818 error = xfs_btree_new_root(cur, stat);
2819 xfs_btree_set_ptr_null(cur, ptrp);
2820
2821 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
2822 return error;
2823 }
2824
2825 /* If we're off the left edge, return failure. */
2826 ptr = cur->bc_ptrs[level];
2827 if (ptr == 0) {
2828 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
2829 *stat = 0;
2830 return 0;
2831 }
2832
2833 /* Make a key out of the record data to be inserted, and save it. */
2834 cur->bc_ops->init_key_from_rec(&key, recp);
2835
2836 optr = ptr;
2837
2838 XFS_BTREE_STATS_INC(cur, insrec);
2839
2840 /* Get pointers to the btree buffer and block. */
2841 block = xfs_btree_get_block(cur, level, &bp);
2842 numrecs = xfs_btree_get_numrecs(block);
2843
2844#ifdef DEBUG
2845 error = xfs_btree_check_block(cur, block, level, bp);
2846 if (error)
2847 goto error0;
2848
2849 /* Check that the new entry is being inserted in the right place. */
2850 if (ptr <= numrecs) {
2851 if (level == 0) {
2852 xfs_btree_check_rec(cur->bc_btnum, recp,
2853 xfs_btree_rec_addr(cur, ptr, block));
2854 } else {
2855 xfs_btree_check_key(cur->bc_btnum, &key,
2856 xfs_btree_key_addr(cur, ptr, block));
2857 }
2858 }
2859#endif
2860
2861 /*
2862 * If the block is full, we can't insert the new entry until we
2863 * make the block un-full.
2864 */
2865 xfs_btree_set_ptr_null(cur, &nptr);
2866 if (numrecs == cur->bc_ops->get_maxrecs(cur, level)) {
2867 error = xfs_btree_make_block_unfull(cur, level, numrecs,
2868 &optr, &ptr, &nptr, &ncur, &nrec, stat);
2869 if (error || *stat == 0)
2870 goto error0;
2871 }
2872
2873 /*
2874 * The current block may have changed if the block was
2875 * previously full and we have just made space in it.
2876 */
2877 block = xfs_btree_get_block(cur, level, &bp);
2878 numrecs = xfs_btree_get_numrecs(block);
2879
2880#ifdef DEBUG
2881 error = xfs_btree_check_block(cur, block, level, bp);
2882 if (error)
2883 return error;
2884#endif
2885
2886 /*
2887 * At this point we know there's room for our new entry in the block
2888 * we're pointing at.
2889 */
2890 XFS_BTREE_STATS_ADD(cur, moves, numrecs - ptr + 1);
2891
2892 if (level > 0) {
2893 /* It's a nonleaf. make a hole in the keys and ptrs */
2894 union xfs_btree_key *kp;
2895 union xfs_btree_ptr *pp;
2896
2897 kp = xfs_btree_key_addr(cur, ptr, block);
2898 pp = xfs_btree_ptr_addr(cur, ptr, block);
2899
2900#ifdef DEBUG
2901 for (i = numrecs - ptr; i >= 0; i--) {
2902 error = xfs_btree_check_ptr(cur, pp, i, level);
2903 if (error)
2904 return error;
2905 }
2906#endif
2907
2908 xfs_btree_shift_keys(cur, kp, 1, numrecs - ptr + 1);
2909 xfs_btree_shift_ptrs(cur, pp, 1, numrecs - ptr + 1);
2910
2911#ifdef DEBUG
2912 error = xfs_btree_check_ptr(cur, ptrp, 0, level);
2913 if (error)
2914 goto error0;
2915#endif
2916
2917 /* Now put the new data in, bump numrecs and log it. */
2918 xfs_btree_copy_keys(cur, kp, &key, 1);
2919 xfs_btree_copy_ptrs(cur, pp, ptrp, 1);
2920 numrecs++;
2921 xfs_btree_set_numrecs(block, numrecs);
2922 xfs_btree_log_ptrs(cur, bp, ptr, numrecs);
2923 xfs_btree_log_keys(cur, bp, ptr, numrecs);
2924#ifdef DEBUG
2925 if (ptr < numrecs) {
2926 xfs_btree_check_key(cur->bc_btnum, kp,
2927 xfs_btree_key_addr(cur, ptr + 1, block));
2928 }
2929#endif
2930 } else {
2931 /* It's a leaf. make a hole in the records */
2932 union xfs_btree_rec *rp;
2933
2934 rp = xfs_btree_rec_addr(cur, ptr, block);
2935
2936 xfs_btree_shift_recs(cur, rp, 1, numrecs - ptr + 1);
2937
2938 /* Now put the new data in, bump numrecs and log it. */
2939 xfs_btree_copy_recs(cur, rp, recp, 1);
2940 xfs_btree_set_numrecs(block, ++numrecs);
2941 xfs_btree_log_recs(cur, bp, ptr, numrecs);
2942#ifdef DEBUG
2943 if (ptr < numrecs) {
2944 xfs_btree_check_rec(cur->bc_btnum, rp,
2945 xfs_btree_rec_addr(cur, ptr + 1, block));
2946 }
2947#endif
2948 }
2949
2950 /* Log the new number of records in the btree header. */
2951 xfs_btree_log_block(cur, bp, XFS_BB_NUMRECS);
2952
2953 /* If we inserted at the start of a block, update the parents' keys. */
2954 if (optr == 1) {
2955 error = xfs_btree_updkey(cur, &key, level + 1);
2956 if (error)
2957 goto error0;
2958 }
2959
2960 /*
2961 * If we are tracking the last record in the tree and
2962 * we are at the far right edge of the tree, update it.
2963 */
2964 if (xfs_btree_is_lastrec(cur, block, level)) {
2965 cur->bc_ops->update_lastrec(cur, block, recp,
2966 ptr, LASTREC_INSREC);
2967 }
2968
2969 /*
2970 * Return the new block number, if any.
2971 * If there is one, give back a record value and a cursor too.
2972 */
2973 *ptrp = nptr;
2974 if (!xfs_btree_ptr_is_null(cur, &nptr)) {
2975 *recp = nrec;
2976 *curp = ncur;
2977 }
2978
2979 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
2980 *stat = 1;
2981 return 0;
2982
2983error0:
2984 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
2985 return error;
2986}
2987
2988/*
2989 * Insert the record at the point referenced by cur.
2990 *
2991 * A multi-level split of the tree on insert will invalidate the original
2992 * cursor. All callers of this function should assume that the cursor is
2993 * no longer valid and revalidate it.
2994 */
2995int
2996xfs_btree_insert(
2997 struct xfs_btree_cur *cur,
2998 int *stat)
2999{
3000 int error; /* error return value */
3001 int i; /* result value, 0 for failure */
3002 int level; /* current level number in btree */
3003 union xfs_btree_ptr nptr; /* new block number (split result) */
3004 struct xfs_btree_cur *ncur; /* new cursor (split result) */
3005 struct xfs_btree_cur *pcur; /* previous level's cursor */
3006 union xfs_btree_rec rec; /* record to insert */
3007
3008 level = 0;
3009 ncur = NULL;
3010 pcur = cur;
3011
3012 xfs_btree_set_ptr_null(cur, &nptr);
3013 cur->bc_ops->init_rec_from_cur(cur, &rec);
3014
3015 /*
3016 * Loop going up the tree, starting at the leaf level.
3017 * Stop when we don't get a split block, that must mean that
3018 * the insert is finished with this level.
3019 */
3020 do {
3021 /*
3022 * Insert nrec/nptr into this level of the tree.
3023 * Note if we fail, nptr will be null.
3024 */
3025 error = xfs_btree_insrec(pcur, level, &nptr, &rec, &ncur, &i);
3026 if (error) {
3027 if (pcur != cur)
3028 xfs_btree_del_cursor(pcur, XFS_BTREE_ERROR);
3029 goto error0;
3030 }
3031
3032 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
3033 level++;
3034
3035 /*
3036 * See if the cursor we just used is trash.
3037 * Can't trash the caller's cursor, but otherwise we should
3038 * if ncur is a new cursor or we're about to be done.
3039 */
3040 if (pcur != cur &&
3041 (ncur || xfs_btree_ptr_is_null(cur, &nptr))) {
3042 /* Save the state from the cursor before we trash it */
3043 if (cur->bc_ops->update_cursor)
3044 cur->bc_ops->update_cursor(pcur, cur);
3045 cur->bc_nlevels = pcur->bc_nlevels;
3046 xfs_btree_del_cursor(pcur, XFS_BTREE_NOERROR);
3047 }
3048 /* If we got a new cursor, switch to it. */
3049 if (ncur) {
3050 pcur = ncur;
3051 ncur = NULL;
3052 }
3053 } while (!xfs_btree_ptr_is_null(cur, &nptr));
3054
3055 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
3056 *stat = i;
3057 return 0;
3058error0:
3059 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
3060 return error;
3061}
diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h
index 21eec863f00f..6f03871f5995 100644
--- a/fs/xfs/xfs_btree.h
+++ b/fs/xfs/xfs_btree.h
@@ -186,6 +186,8 @@ struct xfs_btree_ops {
186 186
187 /* cursor operations */ 187 /* cursor operations */
188 struct xfs_btree_cur *(*dup_cursor)(struct xfs_btree_cur *); 188 struct xfs_btree_cur *(*dup_cursor)(struct xfs_btree_cur *);
189 void (*update_cursor)(struct xfs_btree_cur *src,
190 struct xfs_btree_cur *dst);
189 191
190 /* update btree root pointer */ 192 /* update btree root pointer */
191 void (*set_root)(struct xfs_btree_cur *cur, 193 void (*set_root)(struct xfs_btree_cur *cur,
@@ -206,9 +208,16 @@ struct xfs_btree_ops {
206 /* records in block/level */ 208 /* records in block/level */
207 int (*get_maxrecs)(struct xfs_btree_cur *cur, int level); 209 int (*get_maxrecs)(struct xfs_btree_cur *cur, int level);
208 210
211 /* records on disk. Matter for the root in inode case. */
212 int (*get_dmaxrecs)(struct xfs_btree_cur *cur, int level);
213
209 /* init values of btree structures */ 214 /* init values of btree structures */
210 void (*init_key_from_rec)(union xfs_btree_key *key, 215 void (*init_key_from_rec)(union xfs_btree_key *key,
211 union xfs_btree_rec *rec); 216 union xfs_btree_rec *rec);
217 void (*init_rec_from_key)(union xfs_btree_key *key,
218 union xfs_btree_rec *rec);
219 void (*init_rec_from_cur)(struct xfs_btree_cur *cur,
220 union xfs_btree_rec *rec);
212 void (*init_ptr_from_cur)(struct xfs_btree_cur *cur, 221 void (*init_ptr_from_cur)(struct xfs_btree_cur *cur,
213 union xfs_btree_ptr *ptr); 222 union xfs_btree_ptr *ptr);
214 223
@@ -240,6 +249,7 @@ struct xfs_btree_ops {
240 * Reasons for the update_lastrec method to be called. 249 * Reasons for the update_lastrec method to be called.
241 */ 250 */
242#define LASTREC_UPDATE 0 251#define LASTREC_UPDATE 0
252#define LASTREC_INSREC 1
243 253
244 254
245/* 255/*
@@ -549,6 +559,7 @@ int xfs_btree_split(struct xfs_btree_cur *, int, union xfs_btree_ptr *,
549 union xfs_btree_key *, struct xfs_btree_cur **, int *); 559 union xfs_btree_key *, struct xfs_btree_cur **, int *);
550int xfs_btree_new_root(struct xfs_btree_cur *, int *); 560int xfs_btree_new_root(struct xfs_btree_cur *, int *);
551int xfs_btree_new_iroot(struct xfs_btree_cur *, int *, int *); 561int xfs_btree_new_iroot(struct xfs_btree_cur *, int *, int *);
562int xfs_btree_insert(struct xfs_btree_cur *, int *);
552 563
553/* 564/*
554 * Helpers. 565 * Helpers.
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c
index 138651afd44f..b68e73bb17cd 100644
--- a/fs/xfs/xfs_ialloc.c
+++ b/fs/xfs/xfs_ialloc.c
@@ -418,7 +418,7 @@ xfs_ialloc_ag_alloc(
418 return error; 418 return error;
419 } 419 }
420 ASSERT(i == 0); 420 ASSERT(i == 0);
421 if ((error = xfs_inobt_insert(cur, &i))) { 421 if ((error = xfs_btree_insert(cur, &i))) {
422 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 422 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
423 return error; 423 return error;
424 } 424 }
diff --git a/fs/xfs/xfs_ialloc_btree.c b/fs/xfs/xfs_ialloc_btree.c
index 7ba3c7bb3984..8f66e2720566 100644
--- a/fs/xfs/xfs_ialloc_btree.c
+++ b/fs/xfs/xfs_ialloc_btree.c
@@ -515,228 +515,6 @@ error0:
515} 515}
516 516
517/* 517/*
518 * Insert one record/level. Return information to the caller
519 * allowing the next level up to proceed if necessary.
520 */
521STATIC int /* error */
522xfs_inobt_insrec(
523 xfs_btree_cur_t *cur, /* btree cursor */
524 int level, /* level to insert record at */
525 xfs_agblock_t *bnop, /* i/o: block number inserted */
526 xfs_inobt_rec_t *recp, /* i/o: record data inserted */
527 xfs_btree_cur_t **curp, /* output: new cursor replacing cur */
528 int *stat) /* success/failure */
529{
530 xfs_inobt_block_t *block; /* btree block record/key lives in */
531 xfs_buf_t *bp; /* buffer for block */
532 int error; /* error return value */
533 int i; /* loop index */
534 xfs_inobt_key_t key; /* key value being inserted */
535 xfs_inobt_key_t *kp=NULL; /* pointer to btree keys */
536 xfs_agblock_t nbno; /* block number of allocated block */
537 xfs_btree_cur_t *ncur; /* new cursor to be used at next lvl */
538 xfs_inobt_key_t nkey; /* new key value, from split */
539 xfs_inobt_rec_t nrec; /* new record value, for caller */
540 int numrecs;
541 int optr; /* old ptr value */
542 xfs_inobt_ptr_t *pp; /* pointer to btree addresses */
543 int ptr; /* index in btree block for this rec */
544 xfs_inobt_rec_t *rp=NULL; /* pointer to btree records */
545
546 /*
547 * GCC doesn't understand the (arguably complex) control flow in
548 * this function and complains about uninitialized structure fields
549 * without this.
550 */
551 memset(&nrec, 0, sizeof(nrec));
552
553 /*
554 * If we made it to the root level, allocate a new root block
555 * and we're done.
556 */
557 if (level >= cur->bc_nlevels) {
558 error = xfs_btree_new_root(cur, &i);
559 *bnop = NULLAGBLOCK;
560 *stat = i;
561 return error;
562 }
563 /*
564 * Make a key out of the record data to be inserted, and save it.
565 */
566 key.ir_startino = recp->ir_startino;
567 optr = ptr = cur->bc_ptrs[level];
568 /*
569 * If we're off the left edge, return failure.
570 */
571 if (ptr == 0) {
572 *stat = 0;
573 return 0;
574 }
575 /*
576 * Get pointers to the btree buffer and block.
577 */
578 bp = cur->bc_bufs[level];
579 block = XFS_BUF_TO_INOBT_BLOCK(bp);
580 numrecs = be16_to_cpu(block->bb_numrecs);
581#ifdef DEBUG
582 if ((error = xfs_btree_check_sblock(cur, block, level, bp)))
583 return error;
584 /*
585 * Check that the new entry is being inserted in the right place.
586 */
587 if (ptr <= numrecs) {
588 if (level == 0) {
589 rp = XFS_INOBT_REC_ADDR(block, ptr, cur);
590 xfs_btree_check_rec(cur->bc_btnum, recp, rp);
591 } else {
592 kp = XFS_INOBT_KEY_ADDR(block, ptr, cur);
593 xfs_btree_check_key(cur->bc_btnum, &key, kp);
594 }
595 }
596#endif
597 nbno = NULLAGBLOCK;
598 ncur = NULL;
599 /*
600 * If the block is full, we can't insert the new entry until we
601 * make the block un-full.
602 */
603 if (numrecs == XFS_INOBT_BLOCK_MAXRECS(level, cur)) {
604 /*
605 * First, try shifting an entry to the right neighbor.
606 */
607 if ((error = xfs_btree_rshift(cur, level, &i)))
608 return error;
609 if (i) {
610 /* nothing */
611 }
612 /*
613 * Next, try shifting an entry to the left neighbor.
614 */
615 else {
616 if ((error = xfs_btree_lshift(cur, level, &i)))
617 return error;
618 if (i) {
619 optr = ptr = cur->bc_ptrs[level];
620 } else {
621 union xfs_btree_ptr bno = { .s = cpu_to_be32(nbno) };
622 /*
623 * Next, try splitting the current block
624 * in half. If this works we have to
625 * re-set our variables because
626 * we could be in a different block now.
627 */
628 if ((error = xfs_btree_split(cur, level, &bno,
629 (union xfs_btree_key *)&nkey,
630 &ncur, &i)))
631 return error;
632 nbno = be32_to_cpu(bno.s);
633 if (i) {
634 bp = cur->bc_bufs[level];
635 block = XFS_BUF_TO_INOBT_BLOCK(bp);
636#ifdef DEBUG
637 if ((error = xfs_btree_check_sblock(cur,
638 block, level, bp)))
639 return error;
640#endif
641 ptr = cur->bc_ptrs[level];
642 nrec.ir_startino = nkey.ir_startino;
643 } else {
644 /*
645 * Otherwise the insert fails.
646 */
647 *stat = 0;
648 return 0;
649 }
650 }
651 }
652 }
653 /*
654 * At this point we know there's room for our new entry in the block
655 * we're pointing at.
656 */
657 numrecs = be16_to_cpu(block->bb_numrecs);
658 if (level > 0) {
659 /*
660 * It's a non-leaf entry. Make a hole for the new data
661 * in the key and ptr regions of the block.
662 */
663 kp = XFS_INOBT_KEY_ADDR(block, 1, cur);
664 pp = XFS_INOBT_PTR_ADDR(block, 1, cur);
665#ifdef DEBUG
666 for (i = numrecs; i >= ptr; i--) {
667 if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(pp[i - 1]), level)))
668 return error;
669 }
670#endif
671 memmove(&kp[ptr], &kp[ptr - 1],
672 (numrecs - ptr + 1) * sizeof(*kp));
673 memmove(&pp[ptr], &pp[ptr - 1],
674 (numrecs - ptr + 1) * sizeof(*pp));
675 /*
676 * Now stuff the new data in, bump numrecs and log the new data.
677 */
678#ifdef DEBUG
679 if ((error = xfs_btree_check_sptr(cur, *bnop, level)))
680 return error;
681#endif
682 kp[ptr - 1] = key;
683 pp[ptr - 1] = cpu_to_be32(*bnop);
684 numrecs++;
685 block->bb_numrecs = cpu_to_be16(numrecs);
686 xfs_inobt_log_keys(cur, bp, ptr, numrecs);
687 xfs_inobt_log_ptrs(cur, bp, ptr, numrecs);
688 } else {
689 /*
690 * It's a leaf entry. Make a hole for the new record.
691 */
692 rp = XFS_INOBT_REC_ADDR(block, 1, cur);
693 memmove(&rp[ptr], &rp[ptr - 1],
694 (numrecs - ptr + 1) * sizeof(*rp));
695 /*
696 * Now stuff the new record in, bump numrecs
697 * and log the new data.
698 */
699 rp[ptr - 1] = *recp;
700 numrecs++;
701 block->bb_numrecs = cpu_to_be16(numrecs);
702 xfs_inobt_log_recs(cur, bp, ptr, numrecs);
703 }
704 /*
705 * Log the new number of records in the btree header.
706 */
707 xfs_inobt_log_block(cur->bc_tp, bp, XFS_BB_NUMRECS);
708#ifdef DEBUG
709 /*
710 * Check that the key/record is in the right place, now.
711 */
712 if (ptr < numrecs) {
713 if (level == 0)
714 xfs_btree_check_rec(cur->bc_btnum, rp + ptr - 1,
715 rp + ptr);
716 else
717 xfs_btree_check_key(cur->bc_btnum, kp + ptr - 1,
718 kp + ptr);
719 }
720#endif
721 /*
722 * If we inserted at the start of a block, update the parents' keys.
723 */
724 if (optr == 1 && (error = xfs_btree_updkey(cur, (union xfs_btree_key *)&key, level + 1)))
725 return error;
726 /*
727 * Return the new block number, if any.
728 * If there is one, give back a record value and a cursor too.
729 */
730 *bnop = nbno;
731 if (nbno != NULLAGBLOCK) {
732 *recp = nrec;
733 *curp = ncur;
734 }
735 *stat = 1;
736 return 0;
737}
738
739/*
740 * Log header fields from a btree block. 518 * Log header fields from a btree block.
741 */ 519 */
742STATIC void 520STATIC void
@@ -912,66 +690,6 @@ xfs_inobt_get_rec(
912 return 0; 690 return 0;
913} 691}
914 692
915/*
916 * Insert the current record at the point referenced by cur.
917 * The cursor may be inconsistent on return if splits have been done.
918 */
919int /* error */
920xfs_inobt_insert(
921 xfs_btree_cur_t *cur, /* btree cursor */
922 int *stat) /* success/failure */
923{
924 int error; /* error return value */
925 int i; /* result value, 0 for failure */
926 int level; /* current level number in btree */
927 xfs_agblock_t nbno; /* new block number (split result) */
928 xfs_btree_cur_t *ncur; /* new cursor (split result) */
929 xfs_inobt_rec_t nrec; /* record being inserted this level */
930 xfs_btree_cur_t *pcur; /* previous level's cursor */
931
932 level = 0;
933 nbno = NULLAGBLOCK;
934 nrec.ir_startino = cpu_to_be32(cur->bc_rec.i.ir_startino);
935 nrec.ir_freecount = cpu_to_be32(cur->bc_rec.i.ir_freecount);
936 nrec.ir_free = cpu_to_be64(cur->bc_rec.i.ir_free);
937 ncur = NULL;
938 pcur = cur;
939 /*
940 * Loop going up the tree, starting at the leaf level.
941 * Stop when we don't get a split block, that must mean that
942 * the insert is finished with this level.
943 */
944 do {
945 /*
946 * Insert nrec/nbno into this level of the tree.
947 * Note if we fail, nbno will be null.
948 */
949 if ((error = xfs_inobt_insrec(pcur, level++, &nbno, &nrec, &ncur,
950 &i))) {
951 if (pcur != cur)
952 xfs_btree_del_cursor(pcur, XFS_BTREE_ERROR);
953 return error;
954 }
955 /*
956 * See if the cursor we just used is trash.
957 * Can't trash the caller's cursor, but otherwise we should
958 * if ncur is a new cursor or we're about to be done.
959 */
960 if (pcur != cur && (ncur || nbno == NULLAGBLOCK)) {
961 cur->bc_nlevels = pcur->bc_nlevels;
962 xfs_btree_del_cursor(pcur, XFS_BTREE_NOERROR);
963 }
964 /*
965 * If we got a new cursor, switch to it.
966 */
967 if (ncur) {
968 pcur = ncur;
969 ncur = NULL;
970 }
971 } while (nbno != NULLAGBLOCK);
972 *stat = i;
973 return 0;
974}
975 693
976STATIC struct xfs_btree_cur * 694STATIC struct xfs_btree_cur *
977xfs_inobt_dup_cursor( 695xfs_inobt_dup_cursor(
@@ -1053,6 +771,24 @@ xfs_inobt_init_key_from_rec(
1053 key->inobt.ir_startino = rec->inobt.ir_startino; 771 key->inobt.ir_startino = rec->inobt.ir_startino;
1054} 772}
1055 773
774STATIC void
775xfs_inobt_init_rec_from_key(
776 union xfs_btree_key *key,
777 union xfs_btree_rec *rec)
778{
779 rec->inobt.ir_startino = key->inobt.ir_startino;
780}
781
782STATIC void
783xfs_inobt_init_rec_from_cur(
784 struct xfs_btree_cur *cur,
785 union xfs_btree_rec *rec)
786{
787 rec->inobt.ir_startino = cpu_to_be32(cur->bc_rec.i.ir_startino);
788 rec->inobt.ir_freecount = cpu_to_be32(cur->bc_rec.i.ir_freecount);
789 rec->inobt.ir_free = cpu_to_be64(cur->bc_rec.i.ir_free);
790}
791
1056/* 792/*
1057 * intial value of ptr for lookup 793 * intial value of ptr for lookup
1058 */ 794 */
@@ -1152,6 +888,8 @@ static const struct xfs_btree_ops xfs_inobt_ops = {
1152 .alloc_block = xfs_inobt_alloc_block, 888 .alloc_block = xfs_inobt_alloc_block,
1153 .get_maxrecs = xfs_inobt_get_maxrecs, 889 .get_maxrecs = xfs_inobt_get_maxrecs,
1154 .init_key_from_rec = xfs_inobt_init_key_from_rec, 890 .init_key_from_rec = xfs_inobt_init_key_from_rec,
891 .init_rec_from_key = xfs_inobt_init_rec_from_key,
892 .init_rec_from_cur = xfs_inobt_init_rec_from_cur,
1155 .init_ptr_from_cur = xfs_inobt_init_ptr_from_cur, 893 .init_ptr_from_cur = xfs_inobt_init_ptr_from_cur,
1156 .key_diff = xfs_inobt_key_diff, 894 .key_diff = xfs_inobt_key_diff,
1157 895
diff --git a/fs/xfs/xfs_ialloc_btree.h b/fs/xfs/xfs_ialloc_btree.h
index 7f77549e82a6..c9cbc4f2168d 100644
--- a/fs/xfs/xfs_ialloc_btree.h
+++ b/fs/xfs/xfs_ialloc_btree.h
@@ -129,12 +129,6 @@ extern int xfs_inobt_delete(struct xfs_btree_cur *cur, int *stat);
129extern int xfs_inobt_get_rec(struct xfs_btree_cur *cur, xfs_agino_t *ino, 129extern int xfs_inobt_get_rec(struct xfs_btree_cur *cur, xfs_agino_t *ino,
130 __int32_t *fcnt, xfs_inofree_t *free, int *stat); 130 __int32_t *fcnt, xfs_inofree_t *free, int *stat);
131 131
132/*
133 * Insert the current record at the point referenced by cur.
134 * The cursor may be inconsistent on return if splits have been done.
135 */
136extern int xfs_inobt_insert(struct xfs_btree_cur *cur, int *stat);
137
138 132
139extern struct xfs_btree_cur *xfs_inobt_init_cursor(struct xfs_mount *, 133extern struct xfs_btree_cur *xfs_inobt_init_cursor(struct xfs_mount *,
140 struct xfs_trans *, struct xfs_buf *, xfs_agnumber_t); 134 struct xfs_trans *, struct xfs_buf *, xfs_agnumber_t);