aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@infradead.org>2008-10-30 01:57:03 -0400
committerLachlan McIlroy <lachlan@sgi.com>2008-10-30 01:57:03 -0400
commitf5eb8e7ca58bc1e92436614444006120d21668ba (patch)
tree94825b885653224f74c6087956cb4e1930380467 /fs/xfs
parent687b890a184fef263ebb773926e1f4aa69240d01 (diff)
[XFS] implement generic xfs_btree_split
Make the btree split code generic. Based on a patch from David Chinner with lots of changes to follow the original btree implementations more closely. While this loses some of the generic helper routines for inserting/moving/removing records it also solves some of the one off bugs in the original code and makes it easier to verify. SGI-PV: 985583 SGI-Modid: xfs-linux-melb:xfs-kern:32198a Signed-off-by: Christoph Hellwig <hch@infradead.org> Signed-off-by: Lachlan McIlroy <lachlan@sgi.com> Signed-off-by: Bill O'Donnell <billodo@sgi.com> Signed-off-by: David Chinner <david@fromorbit.com>
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/xfs_alloc_btree.c200
-rw-r--r--fs/xfs/xfs_bmap_btree.c275
-rw-r--r--fs/xfs/xfs_btree.c268
-rw-r--r--fs/xfs/xfs_btree.h8
-rw-r--r--fs/xfs/xfs_ialloc_btree.c212
5 files changed, 460 insertions, 503 deletions
diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c
index 974a412ebc8a..8a8d1aeec52a 100644
--- a/fs/xfs/xfs_alloc_btree.c
+++ b/fs/xfs/xfs_alloc_btree.c
@@ -35,6 +35,7 @@
35#include "xfs_dinode.h" 35#include "xfs_dinode.h"
36#include "xfs_inode.h" 36#include "xfs_inode.h"
37#include "xfs_btree.h" 37#include "xfs_btree.h"
38#include "xfs_btree_trace.h"
38#include "xfs_ialloc.h" 39#include "xfs_ialloc.h"
39#include "xfs_alloc.h" 40#include "xfs_alloc.h"
40#include "xfs_error.h" 41#include "xfs_error.h"
@@ -48,8 +49,6 @@ STATIC void xfs_alloc_log_keys(xfs_btree_cur_t *, xfs_buf_t *, int, int);
48STATIC void xfs_alloc_log_ptrs(xfs_btree_cur_t *, xfs_buf_t *, int, int); 49STATIC void xfs_alloc_log_ptrs(xfs_btree_cur_t *, xfs_buf_t *, int, int);
49STATIC void xfs_alloc_log_recs(xfs_btree_cur_t *, xfs_buf_t *, int, int); 50STATIC void xfs_alloc_log_recs(xfs_btree_cur_t *, xfs_buf_t *, int, int);
50STATIC int xfs_alloc_newroot(xfs_btree_cur_t *, int *); 51STATIC int xfs_alloc_newroot(xfs_btree_cur_t *, int *);
51STATIC int xfs_alloc_split(xfs_btree_cur_t *, int, xfs_agblock_t *,
52 xfs_alloc_key_t *, xfs_btree_cur_t **, int *);
53 52
54/* 53/*
55 * Internal functions. 54 * Internal functions.
@@ -695,15 +694,18 @@ xfs_alloc_insrec(
695 if (i) 694 if (i)
696 optr = ptr = cur->bc_ptrs[level]; 695 optr = ptr = cur->bc_ptrs[level];
697 else { 696 else {
697 union xfs_btree_ptr bno = { .s = cpu_to_be32(nbno) };
698 /* 698 /*
699 * Next, try splitting the current block in 699 * Next, try splitting the current block in
700 * half. If this works we have to re-set our 700 * half. If this works we have to re-set our
701 * variables because we could be in a 701 * variables because we could be in a
702 * different block now. 702 * different block now.
703 */ 703 */
704 if ((error = xfs_alloc_split(cur, level, &nbno, 704 if ((error = xfs_btree_split(cur, level, &bno,
705 &nkey, &ncur, &i))) 705 (union xfs_btree_key *)&nkey,
706 &ncur, &i)))
706 return error; 707 return error;
708 nbno = be32_to_cpu(bno.s);
707 if (i) { 709 if (i) {
708 bp = cur->bc_bufs[level]; 710 bp = cur->bc_bufs[level];
709 block = XFS_BUF_TO_ALLOC_BLOCK(bp); 711 block = XFS_BUF_TO_ALLOC_BLOCK(bp);
@@ -1089,160 +1091,6 @@ xfs_alloc_newroot(
1089 return 0; 1091 return 0;
1090} 1092}
1091 1093
1092/*
1093 * Split cur/level block in half.
1094 * Return new block number and its first record (to be inserted into parent).
1095 */
1096STATIC int /* error */
1097xfs_alloc_split(
1098 xfs_btree_cur_t *cur, /* btree cursor */
1099 int level, /* level to split */
1100 xfs_agblock_t *bnop, /* output: block number allocated */
1101 xfs_alloc_key_t *keyp, /* output: first key of new block */
1102 xfs_btree_cur_t **curp, /* output: new cursor */
1103 int *stat) /* success/failure */
1104{
1105 int error; /* error return value */
1106 int i; /* loop index/record number */
1107 xfs_agblock_t lbno; /* left (current) block number */
1108 xfs_buf_t *lbp; /* buffer for left block */
1109 xfs_alloc_block_t *left; /* left (current) btree block */
1110 xfs_agblock_t rbno; /* right (new) block number */
1111 xfs_buf_t *rbp; /* buffer for right block */
1112 xfs_alloc_block_t *right; /* right (new) btree block */
1113
1114 /*
1115 * Allocate the new block from the freelist.
1116 * If we can't do it, we're toast. Give up.
1117 */
1118 error = xfs_alloc_get_freelist(cur->bc_tp,
1119 cur->bc_private.a.agbp, &rbno, 1);
1120 if (error)
1121 return error;
1122 if (rbno == NULLAGBLOCK) {
1123 *stat = 0;
1124 return 0;
1125 }
1126 xfs_trans_agbtree_delta(cur->bc_tp, 1);
1127 rbp = xfs_btree_get_bufs(cur->bc_mp, cur->bc_tp, cur->bc_private.a.agno,
1128 rbno, 0);
1129 /*
1130 * Set up the new block as "right".
1131 */
1132 right = XFS_BUF_TO_ALLOC_BLOCK(rbp);
1133 /*
1134 * "Left" is the current (according to the cursor) block.
1135 */
1136 lbp = cur->bc_bufs[level];
1137 left = XFS_BUF_TO_ALLOC_BLOCK(lbp);
1138#ifdef DEBUG
1139 if ((error = xfs_btree_check_sblock(cur, left, level, lbp)))
1140 return error;
1141#endif
1142 /*
1143 * Fill in the btree header for the new block.
1144 */
1145 right->bb_magic = cpu_to_be32(xfs_magics[cur->bc_btnum]);
1146 right->bb_level = left->bb_level;
1147 right->bb_numrecs = cpu_to_be16(be16_to_cpu(left->bb_numrecs) / 2);
1148 /*
1149 * Make sure that if there's an odd number of entries now, that
1150 * each new block will have the same number of entries.
1151 */
1152 if ((be16_to_cpu(left->bb_numrecs) & 1) &&
1153 cur->bc_ptrs[level] <= be16_to_cpu(right->bb_numrecs) + 1)
1154 be16_add_cpu(&right->bb_numrecs, 1);
1155 i = be16_to_cpu(left->bb_numrecs) - be16_to_cpu(right->bb_numrecs) + 1;
1156 /*
1157 * For non-leaf blocks, copy keys and addresses over to the new block.
1158 */
1159 if (level > 0) {
1160 xfs_alloc_key_t *lkp; /* left btree key pointer */
1161 xfs_alloc_ptr_t *lpp; /* left btree address pointer */
1162 xfs_alloc_key_t *rkp; /* right btree key pointer */
1163 xfs_alloc_ptr_t *rpp; /* right btree address pointer */
1164
1165 lkp = XFS_ALLOC_KEY_ADDR(left, i, cur);
1166 lpp = XFS_ALLOC_PTR_ADDR(left, i, cur);
1167 rkp = XFS_ALLOC_KEY_ADDR(right, 1, cur);
1168 rpp = XFS_ALLOC_PTR_ADDR(right, 1, cur);
1169#ifdef DEBUG
1170 for (i = 0; i < be16_to_cpu(right->bb_numrecs); i++) {
1171 if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(lpp[i]), level)))
1172 return error;
1173 }
1174#endif
1175 memcpy(rkp, lkp, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp));
1176 memcpy(rpp, lpp, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp));
1177 xfs_alloc_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
1178 xfs_alloc_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
1179 *keyp = *rkp;
1180 }
1181 /*
1182 * For leaf blocks, copy records over to the new block.
1183 */
1184 else {
1185 xfs_alloc_rec_t *lrp; /* left btree record pointer */
1186 xfs_alloc_rec_t *rrp; /* right btree record pointer */
1187
1188 lrp = XFS_ALLOC_REC_ADDR(left, i, cur);
1189 rrp = XFS_ALLOC_REC_ADDR(right, 1, cur);
1190 memcpy(rrp, lrp, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp));
1191 xfs_alloc_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
1192 keyp->ar_startblock = rrp->ar_startblock;
1193 keyp->ar_blockcount = rrp->ar_blockcount;
1194 }
1195 /*
1196 * Find the left block number by looking in the buffer.
1197 * Adjust numrecs, sibling pointers.
1198 */
1199 lbno = XFS_DADDR_TO_AGBNO(cur->bc_mp, XFS_BUF_ADDR(lbp));
1200 be16_add_cpu(&left->bb_numrecs, -(be16_to_cpu(right->bb_numrecs)));
1201 right->bb_rightsib = left->bb_rightsib;
1202 left->bb_rightsib = cpu_to_be32(rbno);
1203 right->bb_leftsib = cpu_to_be32(lbno);
1204 xfs_alloc_log_block(cur->bc_tp, rbp, XFS_BB_ALL_BITS);
1205 xfs_alloc_log_block(cur->bc_tp, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB);
1206 /*
1207 * If there's a block to the new block's right, make that block
1208 * point back to right instead of to left.
1209 */
1210 if (be32_to_cpu(right->bb_rightsib) != NULLAGBLOCK) {
1211 xfs_alloc_block_t *rrblock; /* rr btree block */
1212 xfs_buf_t *rrbp; /* buffer for rrblock */
1213
1214 if ((error = xfs_btree_read_bufs(cur->bc_mp, cur->bc_tp,
1215 cur->bc_private.a.agno, be32_to_cpu(right->bb_rightsib), 0,
1216 &rrbp, XFS_ALLOC_BTREE_REF)))
1217 return error;
1218 rrblock = XFS_BUF_TO_ALLOC_BLOCK(rrbp);
1219 if ((error = xfs_btree_check_sblock(cur, rrblock, level, rrbp)))
1220 return error;
1221 rrblock->bb_leftsib = cpu_to_be32(rbno);
1222 xfs_alloc_log_block(cur->bc_tp, rrbp, XFS_BB_LEFTSIB);
1223 }
1224 /*
1225 * If the cursor is really in the right block, move it there.
1226 * If it's just pointing past the last entry in left, then we'll
1227 * insert there, so don't change anything in that case.
1228 */
1229 if (cur->bc_ptrs[level] > be16_to_cpu(left->bb_numrecs) + 1) {
1230 xfs_btree_setbuf(cur, level, rbp);
1231 cur->bc_ptrs[level] -= be16_to_cpu(left->bb_numrecs);
1232 }
1233 /*
1234 * If there are more levels, we'll need another cursor which refers to
1235 * the right block, no matter where this cursor was.
1236 */
1237 if (level + 1 < cur->bc_nlevels) {
1238 if ((error = xfs_btree_dup_cursor(cur, curp)))
1239 return error;
1240 (*curp)->bc_ptrs[level + 1]++;
1241 }
1242 *bnop = rbno;
1243 *stat = 1;
1244 return 0;
1245}
1246 1094
1247/* 1095/*
1248 * Externally visible routines. 1096 * Externally visible routines.
@@ -1396,6 +1244,41 @@ xfs_allocbt_dup_cursor(
1396 cur->bc_btnum); 1244 cur->bc_btnum);
1397} 1245}
1398 1246
1247STATIC int
1248xfs_allocbt_alloc_block(
1249 struct xfs_btree_cur *cur,
1250 union xfs_btree_ptr *start,
1251 union xfs_btree_ptr *new,
1252 int length,
1253 int *stat)
1254{
1255 int error;
1256 xfs_agblock_t bno;
1257
1258 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
1259
1260 /* Allocate the new block from the freelist. If we can't, give up. */
1261 error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_private.a.agbp,
1262 &bno, 1);
1263 if (error) {
1264 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
1265 return error;
1266 }
1267
1268 if (bno == NULLAGBLOCK) {
1269 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
1270 *stat = 0;
1271 return 0;
1272 }
1273
1274 xfs_trans_agbtree_delta(cur->bc_tp, 1);
1275 new->s = cpu_to_be32(bno);
1276
1277 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
1278 *stat = 1;
1279 return 0;
1280}
1281
1399/* 1282/*
1400 * Update the longest extent in the AGF 1283 * Update the longest extent in the AGF
1401 */ 1284 */
@@ -1557,6 +1440,7 @@ static const struct xfs_btree_ops xfs_allocbt_ops = {
1557 .key_len = sizeof(xfs_alloc_key_t), 1440 .key_len = sizeof(xfs_alloc_key_t),
1558 1441
1559 .dup_cursor = xfs_allocbt_dup_cursor, 1442 .dup_cursor = xfs_allocbt_dup_cursor,
1443 .alloc_block = xfs_allocbt_alloc_block,
1560 .update_lastrec = xfs_allocbt_update_lastrec, 1444 .update_lastrec = xfs_allocbt_update_lastrec,
1561 .get_maxrecs = xfs_allocbt_get_maxrecs, 1445 .get_maxrecs = xfs_allocbt_get_maxrecs,
1562 .init_key_from_rec = xfs_allocbt_init_key_from_rec, 1446 .init_key_from_rec = xfs_allocbt_init_key_from_rec,
diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c
index 809bd6ee177e..e7539263457f 100644
--- a/fs/xfs/xfs_bmap_btree.c
+++ b/fs/xfs/xfs_bmap_btree.c
@@ -52,8 +52,6 @@
52STATIC int xfs_bmbt_killroot(xfs_btree_cur_t *); 52STATIC int xfs_bmbt_killroot(xfs_btree_cur_t *);
53STATIC void xfs_bmbt_log_keys(xfs_btree_cur_t *, xfs_buf_t *, int, int); 53STATIC void xfs_bmbt_log_keys(xfs_btree_cur_t *, xfs_buf_t *, int, int);
54STATIC void xfs_bmbt_log_ptrs(xfs_btree_cur_t *, xfs_buf_t *, int, int); 54STATIC void xfs_bmbt_log_ptrs(xfs_btree_cur_t *, xfs_buf_t *, int, int);
55STATIC int xfs_bmbt_split(xfs_btree_cur_t *, int, xfs_fsblock_t *,
56 __uint64_t *, xfs_btree_cur_t **, int *);
57 55
58#undef EXIT 56#undef EXIT
59 57
@@ -550,13 +548,17 @@ xfs_bmbt_insrec(
550 if (i) { 548 if (i) {
551 optr = ptr = cur->bc_ptrs[level]; 549 optr = ptr = cur->bc_ptrs[level];
552 } else { 550 } else {
553 if ((error = xfs_bmbt_split(cur, level, 551 union xfs_btree_ptr bno = { .l = cpu_to_be64(nbno) };
554 &nbno, &startoff, &ncur, 552 union xfs_btree_key skey;
553 if ((error = xfs_btree_split(cur, level,
554 &bno, &skey, &ncur,
555 &i))) { 555 &i))) {
556 XFS_BMBT_TRACE_CURSOR(cur, 556 XFS_BMBT_TRACE_CURSOR(cur,
557 ERROR); 557 ERROR);
558 return error; 558 return error;
559 } 559 }
560 nbno = be64_to_cpu(bno.l);
561 startoff = be64_to_cpu(skey.bmbt.br_startoff);
560 if (i) { 562 if (i) {
561 block = xfs_bmbt_get_block( 563 block = xfs_bmbt_get_block(
562 cur, level, &bp); 564 cur, level, &bp);
@@ -825,184 +827,6 @@ xfs_extent_state(
825 return XFS_EXT_NORM; 827 return XFS_EXT_NORM;
826} 828}
827 829
828
829/*
830 * Split cur/level block in half.
831 * Return new block number and its first record (to be inserted into parent).
832 */
833STATIC int /* error */
834xfs_bmbt_split(
835 xfs_btree_cur_t *cur,
836 int level,
837 xfs_fsblock_t *bnop,
838 __uint64_t *startoff,
839 xfs_btree_cur_t **curp,
840 int *stat) /* success/failure */
841{
842 xfs_alloc_arg_t args; /* block allocation args */
843 int error; /* error return value */
844 int i; /* loop counter */
845 xfs_fsblock_t lbno; /* left sibling block number */
846 xfs_buf_t *lbp; /* left buffer pointer */
847 xfs_bmbt_block_t *left; /* left btree block */
848 xfs_bmbt_key_t *lkp; /* left btree key */
849 xfs_bmbt_ptr_t *lpp; /* left address pointer */
850 xfs_bmbt_rec_t *lrp; /* left record pointer */
851 xfs_buf_t *rbp; /* right buffer pointer */
852 xfs_bmbt_block_t *right; /* right btree block */
853 xfs_bmbt_key_t *rkp; /* right btree key */
854 xfs_bmbt_ptr_t *rpp; /* right address pointer */
855 xfs_bmbt_block_t *rrblock; /* right-right btree block */
856 xfs_buf_t *rrbp; /* right-right buffer pointer */
857 xfs_bmbt_rec_t *rrp; /* right record pointer */
858
859 XFS_BMBT_TRACE_CURSOR(cur, ENTRY);
860 // disable until merged into common code
861// XFS_BMBT_TRACE_ARGIFK(cur, level, *bnop, *startoff);
862 args.tp = cur->bc_tp;
863 args.mp = cur->bc_mp;
864 lbp = cur->bc_bufs[level];
865 lbno = XFS_DADDR_TO_FSB(args.mp, XFS_BUF_ADDR(lbp));
866 left = XFS_BUF_TO_BMBT_BLOCK(lbp);
867 args.fsbno = cur->bc_private.b.firstblock;
868 args.firstblock = args.fsbno;
869 args.minleft = 0;
870 if (args.fsbno == NULLFSBLOCK) {
871 args.fsbno = lbno;
872 args.type = XFS_ALLOCTYPE_START_BNO;
873 /*
874 * Make sure there is sufficient room left in the AG to
875 * complete a full tree split for an extent insert. If
876 * we are converting the middle part of an extent then
877 * we may need space for two tree splits.
878 *
879 * We are relying on the caller to make the correct block
880 * reservation for this operation to succeed. If the
881 * reservation amount is insufficient then we may fail a
882 * block allocation here and corrupt the filesystem.
883 */
884 args.minleft = xfs_trans_get_block_res(args.tp);
885 } else if (cur->bc_private.b.flist->xbf_low)
886 args.type = XFS_ALLOCTYPE_START_BNO;
887 else
888 args.type = XFS_ALLOCTYPE_NEAR_BNO;
889 args.mod = args.alignment = args.total = args.isfl =
890 args.userdata = args.minalignslop = 0;
891 args.minlen = args.maxlen = args.prod = 1;
892 args.wasdel = cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL;
893 if (!args.wasdel && xfs_trans_get_block_res(args.tp) == 0) {
894 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
895 return XFS_ERROR(ENOSPC);
896 }
897 if ((error = xfs_alloc_vextent(&args))) {
898 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
899 return error;
900 }
901 if (args.fsbno == NULLFSBLOCK && args.minleft) {
902 /*
903 * Could not find an AG with enough free space to satisfy
904 * a full btree split. Try again without minleft and if
905 * successful activate the lowspace algorithm.
906 */
907 args.fsbno = 0;
908 args.type = XFS_ALLOCTYPE_FIRST_AG;
909 args.minleft = 0;
910 if ((error = xfs_alloc_vextent(&args))) {
911 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
912 return error;
913 }
914 cur->bc_private.b.flist->xbf_low = 1;
915 }
916 if (args.fsbno == NULLFSBLOCK) {
917 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
918 *stat = 0;
919 return 0;
920 }
921 ASSERT(args.len == 1);
922 cur->bc_private.b.firstblock = args.fsbno;
923 cur->bc_private.b.allocated++;
924 cur->bc_private.b.ip->i_d.di_nblocks++;
925 xfs_trans_log_inode(args.tp, cur->bc_private.b.ip, XFS_ILOG_CORE);
926 XFS_TRANS_MOD_DQUOT_BYINO(args.mp, args.tp, cur->bc_private.b.ip,
927 XFS_TRANS_DQ_BCOUNT, 1L);
928 rbp = xfs_btree_get_bufl(args.mp, args.tp, args.fsbno, 0);
929 right = XFS_BUF_TO_BMBT_BLOCK(rbp);
930#ifdef DEBUG
931 if ((error = xfs_btree_check_lblock(cur, left, level, rbp))) {
932 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
933 return error;
934 }
935#endif
936 right->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC);
937 right->bb_level = left->bb_level;
938 right->bb_numrecs = cpu_to_be16(be16_to_cpu(left->bb_numrecs) / 2);
939 if ((be16_to_cpu(left->bb_numrecs) & 1) &&
940 cur->bc_ptrs[level] <= be16_to_cpu(right->bb_numrecs) + 1)
941 be16_add_cpu(&right->bb_numrecs, 1);
942 i = be16_to_cpu(left->bb_numrecs) - be16_to_cpu(right->bb_numrecs) + 1;
943 if (level > 0) {
944 lkp = XFS_BMAP_KEY_IADDR(left, i, cur);
945 lpp = XFS_BMAP_PTR_IADDR(left, i, cur);
946 rkp = XFS_BMAP_KEY_IADDR(right, 1, cur);
947 rpp = XFS_BMAP_PTR_IADDR(right, 1, cur);
948#ifdef DEBUG
949 for (i = 0; i < be16_to_cpu(right->bb_numrecs); i++) {
950 if ((error = xfs_btree_check_lptr_disk(cur, lpp[i], level))) {
951 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
952 return error;
953 }
954 }
955#endif
956 memcpy(rkp, lkp, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp));
957 memcpy(rpp, lpp, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp));
958 xfs_bmbt_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
959 xfs_bmbt_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
960 *startoff = be64_to_cpu(rkp->br_startoff);
961 } else {
962 lrp = XFS_BMAP_REC_IADDR(left, i, cur);
963 rrp = XFS_BMAP_REC_IADDR(right, 1, cur);
964 memcpy(rrp, lrp, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp));
965 xfs_bmbt_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
966 *startoff = xfs_bmbt_disk_get_startoff(rrp);
967 }
968 be16_add_cpu(&left->bb_numrecs, -(be16_to_cpu(right->bb_numrecs)));
969 right->bb_rightsib = left->bb_rightsib;
970 left->bb_rightsib = cpu_to_be64(args.fsbno);
971 right->bb_leftsib = cpu_to_be64(lbno);
972 xfs_bmbt_log_block(cur, rbp, XFS_BB_ALL_BITS);
973 xfs_bmbt_log_block(cur, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB);
974 if (be64_to_cpu(right->bb_rightsib) != NULLDFSBNO) {
975 if ((error = xfs_btree_read_bufl(args.mp, args.tp,
976 be64_to_cpu(right->bb_rightsib), 0, &rrbp,
977 XFS_BMAP_BTREE_REF))) {
978 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
979 return error;
980 }
981 rrblock = XFS_BUF_TO_BMBT_BLOCK(rrbp);
982 if ((error = xfs_btree_check_lblock(cur, rrblock, level, rrbp))) {
983 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
984 return error;
985 }
986 rrblock->bb_leftsib = cpu_to_be64(args.fsbno);
987 xfs_bmbt_log_block(cur, rrbp, XFS_BB_LEFTSIB);
988 }
989 if (cur->bc_ptrs[level] > be16_to_cpu(left->bb_numrecs) + 1) {
990 xfs_btree_setbuf(cur, level, rbp);
991 cur->bc_ptrs[level] -= be16_to_cpu(left->bb_numrecs);
992 }
993 if (level + 1 < cur->bc_nlevels) {
994 if ((error = xfs_btree_dup_cursor(cur, curp))) {
995 XFS_BMBT_TRACE_CURSOR(cur, ERROR);
996 return error;
997 }
998 (*curp)->bc_ptrs[level + 1]++;
999 }
1000 *bnop = args.fsbno;
1001 XFS_BMBT_TRACE_CURSOR(cur, EXIT);
1002 *stat = 1;
1003 return 0;
1004}
1005
1006/* 830/*
1007 * Convert on-disk form of btree root to in-memory form. 831 * Convert on-disk form of btree root to in-memory form.
1008 */ 832 */
@@ -1738,6 +1562,92 @@ xfs_bmbt_dup_cursor(
1738} 1562}
1739 1563
1740STATIC int 1564STATIC int
1565xfs_bmbt_alloc_block(
1566 struct xfs_btree_cur *cur,
1567 union xfs_btree_ptr *start,
1568 union xfs_btree_ptr *new,
1569 int length,
1570 int *stat)
1571{
1572 xfs_alloc_arg_t args; /* block allocation args */
1573 int error; /* error return value */
1574
1575 memset(&args, 0, sizeof(args));
1576 args.tp = cur->bc_tp;
1577 args.mp = cur->bc_mp;
1578 args.fsbno = cur->bc_private.b.firstblock;
1579 args.firstblock = args.fsbno;
1580
1581 if (args.fsbno == NULLFSBLOCK) {
1582 args.fsbno = be64_to_cpu(start->l);
1583 args.type = XFS_ALLOCTYPE_START_BNO;
1584 /*
1585 * Make sure there is sufficient room left in the AG to
1586 * complete a full tree split for an extent insert. If
1587 * we are converting the middle part of an extent then
1588 * we may need space for two tree splits.
1589 *
1590 * We are relying on the caller to make the correct block
1591 * reservation for this operation to succeed. If the
1592 * reservation amount is insufficient then we may fail a
1593 * block allocation here and corrupt the filesystem.
1594 */
1595 args.minleft = xfs_trans_get_block_res(args.tp);
1596 } else if (cur->bc_private.b.flist->xbf_low) {
1597 args.type = XFS_ALLOCTYPE_START_BNO;
1598 } else {
1599 args.type = XFS_ALLOCTYPE_NEAR_BNO;
1600 }
1601
1602 args.minlen = args.maxlen = args.prod = 1;
1603 args.wasdel = cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL;
1604 if (!args.wasdel && xfs_trans_get_block_res(args.tp) == 0) {
1605 error = XFS_ERROR(ENOSPC);
1606 goto error0;
1607 }
1608 error = xfs_alloc_vextent(&args);
1609 if (error)
1610 goto error0;
1611
1612 if (args.fsbno == NULLFSBLOCK && args.minleft) {
1613 /*
1614 * Could not find an AG with enough free space to satisfy
1615 * a full btree split. Try again without minleft and if
1616 * successful activate the lowspace algorithm.
1617 */
1618 args.fsbno = 0;
1619 args.type = XFS_ALLOCTYPE_FIRST_AG;
1620 args.minleft = 0;
1621 error = xfs_alloc_vextent(&args);
1622 if (error)
1623 goto error0;
1624 cur->bc_private.b.flist->xbf_low = 1;
1625 }
1626 if (args.fsbno == NULLFSBLOCK) {
1627 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
1628 *stat = 0;
1629 return 0;
1630 }
1631 ASSERT(args.len == 1);
1632 cur->bc_private.b.firstblock = args.fsbno;
1633 cur->bc_private.b.allocated++;
1634 cur->bc_private.b.ip->i_d.di_nblocks++;
1635 xfs_trans_log_inode(args.tp, cur->bc_private.b.ip, XFS_ILOG_CORE);
1636 XFS_TRANS_MOD_DQUOT_BYINO(args.mp, args.tp, cur->bc_private.b.ip,
1637 XFS_TRANS_DQ_BCOUNT, 1L);
1638
1639 new->l = cpu_to_be64(args.fsbno);
1640
1641 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
1642 *stat = 1;
1643 return 0;
1644
1645 error0:
1646 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
1647 return error;
1648}
1649
1650STATIC int
1741xfs_bmbt_get_maxrecs( 1651xfs_bmbt_get_maxrecs(
1742 struct xfs_btree_cur *cur, 1652 struct xfs_btree_cur *cur,
1743 int level) 1653 int level)
@@ -1861,6 +1771,7 @@ static const struct xfs_btree_ops xfs_bmbt_ops = {
1861 .key_len = sizeof(xfs_bmbt_key_t), 1771 .key_len = sizeof(xfs_bmbt_key_t),
1862 1772
1863 .dup_cursor = xfs_bmbt_dup_cursor, 1773 .dup_cursor = xfs_bmbt_dup_cursor,
1774 .alloc_block = xfs_bmbt_alloc_block,
1864 .get_maxrecs = xfs_bmbt_get_maxrecs, 1775 .get_maxrecs = xfs_bmbt_get_maxrecs,
1865 .init_key_from_rec = xfs_bmbt_init_key_from_rec, 1776 .init_key_from_rec = xfs_bmbt_init_key_from_rec,
1866 .init_ptr_from_cur = xfs_bmbt_init_ptr_from_cur, 1777 .init_ptr_from_cur = xfs_bmbt_init_ptr_from_cur,
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c
index 2b0d1422c4c6..80576695fbe5 100644
--- a/fs/xfs/xfs_btree.c
+++ b/fs/xfs/xfs_btree.c
@@ -988,6 +988,48 @@ xfs_btree_get_sibling(
988 } 988 }
989} 989}
990 990
991STATIC void
992xfs_btree_set_sibling(
993 struct xfs_btree_cur *cur,
994 struct xfs_btree_block *block,
995 union xfs_btree_ptr *ptr,
996 int lr)
997{
998 ASSERT(lr == XFS_BB_LEFTSIB || lr == XFS_BB_RIGHTSIB);
999
1000 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
1001 if (lr == XFS_BB_RIGHTSIB)
1002 block->bb_u.l.bb_rightsib = ptr->l;
1003 else
1004 block->bb_u.l.bb_leftsib = ptr->l;
1005 } else {
1006 if (lr == XFS_BB_RIGHTSIB)
1007 block->bb_u.s.bb_rightsib = ptr->s;
1008 else
1009 block->bb_u.s.bb_leftsib = ptr->s;
1010 }
1011}
1012
1013STATIC void
1014xfs_btree_init_block(
1015 struct xfs_btree_cur *cur,
1016 int level,
1017 int numrecs,
1018 struct xfs_btree_block *new) /* new block */
1019{
1020 new->bb_magic = cpu_to_be32(xfs_magics[cur->bc_btnum]);
1021 new->bb_level = cpu_to_be16(level);
1022 new->bb_numrecs = cpu_to_be16(numrecs);
1023
1024 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
1025 new->bb_u.l.bb_leftsib = cpu_to_be64(NULLFSBLOCK);
1026 new->bb_u.l.bb_rightsib = cpu_to_be64(NULLFSBLOCK);
1027 } else {
1028 new->bb_u.s.bb_leftsib = cpu_to_be32(NULLAGBLOCK);
1029 new->bb_u.s.bb_rightsib = cpu_to_be32(NULLAGBLOCK);
1030 }
1031}
1032
991/* 1033/*
992 * Return true if ptr is the last record in the btree and 1034 * Return true if ptr is the last record in the btree and
993 * we need to track updateѕ to this record. The decision 1035 * we need to track updateѕ to this record. The decision
@@ -1012,6 +1054,21 @@ xfs_btree_is_lastrec(
1012 return 1; 1054 return 1;
1013} 1055}
1014 1056
1057STATIC void
1058xfs_btree_buf_to_ptr(
1059 struct xfs_btree_cur *cur,
1060 struct xfs_buf *bp,
1061 union xfs_btree_ptr *ptr)
1062{
1063 if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
1064 ptr->l = cpu_to_be64(XFS_DADDR_TO_FSB(cur->bc_mp,
1065 XFS_BUF_ADDR(bp)));
1066 else {
1067 ptr->s = cpu_to_be32(XFS_DADDR_TO_AGBNO(cur->bc_mp,
1068 XFS_BUF_ADDR(bp)));
1069 }
1070}
1071
1015STATIC xfs_daddr_t 1072STATIC xfs_daddr_t
1016xfs_btree_ptr_to_daddr( 1073xfs_btree_ptr_to_daddr(
1017 struct xfs_btree_cur *cur, 1074 struct xfs_btree_cur *cur,
@@ -1051,6 +1108,31 @@ xfs_btree_set_refs(
1051 } 1108 }
1052} 1109}
1053 1110
1111STATIC int
1112xfs_btree_get_buf_block(
1113 struct xfs_btree_cur *cur,
1114 union xfs_btree_ptr *ptr,
1115 int flags,
1116 struct xfs_btree_block **block,
1117 struct xfs_buf **bpp)
1118{
1119 struct xfs_mount *mp = cur->bc_mp;
1120 xfs_daddr_t d;
1121
1122 /* need to sort out how callers deal with failures first */
1123 ASSERT(!(flags & XFS_BUF_TRYLOCK));
1124
1125 d = xfs_btree_ptr_to_daddr(cur, ptr);
1126 *bpp = xfs_trans_get_buf(cur->bc_tp, mp->m_ddev_targp, d,
1127 mp->m_bsize, flags);
1128
1129 ASSERT(*bpp);
1130 ASSERT(!XFS_BUF_GETERROR(*bpp));
1131
1132 *block = XFS_BUF_TO_BLOCK(*bpp);
1133 return 0;
1134}
1135
1054/* 1136/*
1055 * Read in the buffer at the given ptr and return the buffer and 1137 * Read in the buffer at the given ptr and return the buffer and
1056 * the block pointer within the buffer. 1138 * the block pointer within the buffer.
@@ -2199,3 +2281,189 @@ error1:
2199 xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR); 2281 xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
2200 return error; 2282 return error;
2201} 2283}
2284
2285/*
2286 * Split cur/level block in half.
2287 * Return new block number and the key to its first
2288 * record (to be inserted into parent).
2289 */
2290int /* error */
2291xfs_btree_split(
2292 struct xfs_btree_cur *cur,
2293 int level,
2294 union xfs_btree_ptr *ptrp,
2295 union xfs_btree_key *key,
2296 struct xfs_btree_cur **curp,
2297 int *stat) /* success/failure */
2298{
2299 union xfs_btree_ptr lptr; /* left sibling block ptr */
2300 struct xfs_buf *lbp; /* left buffer pointer */
2301 struct xfs_btree_block *left; /* left btree block */
2302 union xfs_btree_ptr rptr; /* right sibling block ptr */
2303 struct xfs_buf *rbp; /* right buffer pointer */
2304 struct xfs_btree_block *right; /* right btree block */
2305 union xfs_btree_ptr rrptr; /* right-right sibling ptr */
2306 struct xfs_buf *rrbp; /* right-right buffer pointer */
2307 struct xfs_btree_block *rrblock; /* right-right btree block */
2308 int lrecs;
2309 int rrecs;
2310 int src_index;
2311 int error; /* error return value */
2312#ifdef DEBUG
2313 int i;
2314#endif
2315
2316 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
2317 XFS_BTREE_TRACE_ARGIPK(cur, level, *ptrp, key);
2318
2319 XFS_BTREE_STATS_INC(cur, split);
2320
2321 /* Set up left block (current one). */
2322 left = xfs_btree_get_block(cur, level, &lbp);
2323
2324#ifdef DEBUG
2325 error = xfs_btree_check_block(cur, left, level, lbp);
2326 if (error)
2327 goto error0;
2328#endif
2329
2330 xfs_btree_buf_to_ptr(cur, lbp, &lptr);
2331
2332 /* Allocate the new block. If we can't do it, we're toast. Give up. */
2333 error = cur->bc_ops->alloc_block(cur, &lptr, &rptr, 1, stat);
2334 if (error)
2335 goto error0;
2336 if (*stat == 0)
2337 goto out0;
2338 XFS_BTREE_STATS_INC(cur, alloc);
2339
2340 /* Set up the new block as "right". */
2341 error = xfs_btree_get_buf_block(cur, &rptr, 0, &right, &rbp);
2342 if (error)
2343 goto error0;
2344
2345 /* Fill in the btree header for the new right block. */
2346 xfs_btree_init_block(cur, xfs_btree_get_level(left), 0, right);
2347
2348 /*
2349 * Split the entries between the old and the new block evenly.
2350 * Make sure that if there's an odd number of entries now, that
2351 * each new block will have the same number of entries.
2352 */
2353 lrecs = xfs_btree_get_numrecs(left);
2354 rrecs = lrecs / 2;
2355 if ((lrecs & 1) && cur->bc_ptrs[level] <= rrecs + 1)
2356 rrecs++;
2357 src_index = (lrecs - rrecs + 1);
2358
2359 XFS_BTREE_STATS_ADD(cur, moves, rrecs);
2360
2361 /*
2362 * Copy btree block entries from the left block over to the
2363 * new block, the right. Update the right block and log the
2364 * changes.
2365 */
2366 if (level > 0) {
2367 /* It's a non-leaf. Move keys and pointers. */
2368 union xfs_btree_key *lkp; /* left btree key */
2369 union xfs_btree_ptr *lpp; /* left address pointer */
2370 union xfs_btree_key *rkp; /* right btree key */
2371 union xfs_btree_ptr *rpp; /* right address pointer */
2372
2373 lkp = xfs_btree_key_addr(cur, src_index, left);
2374 lpp = xfs_btree_ptr_addr(cur, src_index, left);
2375 rkp = xfs_btree_key_addr(cur, 1, right);
2376 rpp = xfs_btree_ptr_addr(cur, 1, right);
2377
2378#ifdef DEBUG
2379 for (i = src_index; i < rrecs; i++) {
2380 error = xfs_btree_check_ptr(cur, lpp, i, level);
2381 if (error)
2382 goto error0;
2383 }
2384#endif
2385
2386 xfs_btree_copy_keys(cur, rkp, lkp, rrecs);
2387 xfs_btree_copy_ptrs(cur, rpp, lpp, rrecs);
2388
2389 xfs_btree_log_keys(cur, rbp, 1, rrecs);
2390 xfs_btree_log_ptrs(cur, rbp, 1, rrecs);
2391
2392 /* Grab the keys to the entries moved to the right block */
2393 xfs_btree_copy_keys(cur, key, rkp, 1);
2394 } else {
2395 /* It's a leaf. Move records. */
2396 union xfs_btree_rec *lrp; /* left record pointer */
2397 union xfs_btree_rec *rrp; /* right record pointer */
2398
2399 lrp = xfs_btree_rec_addr(cur, src_index, left);
2400 rrp = xfs_btree_rec_addr(cur, 1, right);
2401
2402 xfs_btree_copy_recs(cur, rrp, lrp, rrecs);
2403 xfs_btree_log_recs(cur, rbp, 1, rrecs);
2404
2405 cur->bc_ops->init_key_from_rec(key,
2406 xfs_btree_rec_addr(cur, 1, right));
2407 }
2408
2409
2410 /*
2411 * Find the left block number by looking in the buffer.
2412 * Adjust numrecs, sibling pointers.
2413 */
2414 xfs_btree_get_sibling(cur, left, &rrptr, XFS_BB_RIGHTSIB);
2415 xfs_btree_set_sibling(cur, right, &rrptr, XFS_BB_RIGHTSIB);
2416 xfs_btree_set_sibling(cur, right, &lptr, XFS_BB_LEFTSIB);
2417 xfs_btree_set_sibling(cur, left, &rptr, XFS_BB_RIGHTSIB);
2418
2419 lrecs -= rrecs;
2420 xfs_btree_set_numrecs(left, lrecs);
2421 xfs_btree_set_numrecs(right, xfs_btree_get_numrecs(right) + rrecs);
2422
2423 xfs_btree_log_block(cur, rbp, XFS_BB_ALL_BITS);
2424 xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB);
2425
2426 /*
2427 * If there's a block to the new block's right, make that block
2428 * point back to right instead of to left.
2429 */
2430 if (!xfs_btree_ptr_is_null(cur, &rrptr)) {
2431 error = xfs_btree_read_buf_block(cur, &rrptr, level,
2432 0, &rrblock, &rrbp);
2433 if (error)
2434 goto error0;
2435 xfs_btree_set_sibling(cur, rrblock, &rptr, XFS_BB_LEFTSIB);
2436 xfs_btree_log_block(cur, rrbp, XFS_BB_LEFTSIB);
2437 }
2438 /*
2439 * If the cursor is really in the right block, move it there.
2440 * If it's just pointing past the last entry in left, then we'll
2441 * insert there, so don't change anything in that case.
2442 */
2443 if (cur->bc_ptrs[level] > lrecs + 1) {
2444 xfs_btree_setbuf(cur, level, rbp);
2445 cur->bc_ptrs[level] -= lrecs;
2446 }
2447 /*
2448 * If there are more levels, we'll need another cursor which refers
2449 * the right block, no matter where this cursor was.
2450 */
2451 if (level + 1 < cur->bc_nlevels) {
2452 error = xfs_btree_dup_cursor(cur, curp);
2453 if (error)
2454 goto error0;
2455 (*curp)->bc_ptrs[level + 1]++;
2456 }
2457 *ptrp = rptr;
2458 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
2459 *stat = 1;
2460 return 0;
2461out0:
2462 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
2463 *stat = 0;
2464 return 0;
2465
2466error0:
2467 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
2468 return error;
2469}
diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h
index 7cde287b5c9c..354a6656fad5 100644
--- a/fs/xfs/xfs_btree.h
+++ b/fs/xfs/xfs_btree.h
@@ -187,6 +187,12 @@ struct xfs_btree_ops {
187 /* cursor operations */ 187 /* cursor operations */
188 struct xfs_btree_cur *(*dup_cursor)(struct xfs_btree_cur *); 188 struct xfs_btree_cur *(*dup_cursor)(struct xfs_btree_cur *);
189 189
190 /* block allocation / freeing */
191 int (*alloc_block)(struct xfs_btree_cur *cur,
192 union xfs_btree_ptr *start_bno,
193 union xfs_btree_ptr *new_bno,
194 int length, int *stat);
195
190 /* update last record information */ 196 /* update last record information */
191 void (*update_lastrec)(struct xfs_btree_cur *cur, 197 void (*update_lastrec)(struct xfs_btree_cur *cur,
192 struct xfs_btree_block *block, 198 struct xfs_btree_block *block,
@@ -535,6 +541,8 @@ int xfs_btree_updkey(struct xfs_btree_cur *, union xfs_btree_key *, int);
535int xfs_btree_update(struct xfs_btree_cur *, union xfs_btree_rec *); 541int xfs_btree_update(struct xfs_btree_cur *, union xfs_btree_rec *);
536int xfs_btree_lshift(struct xfs_btree_cur *, int, int *); 542int xfs_btree_lshift(struct xfs_btree_cur *, int, int *);
537int xfs_btree_rshift(struct xfs_btree_cur *, int, int *); 543int xfs_btree_rshift(struct xfs_btree_cur *, int, int *);
544int xfs_btree_split(struct xfs_btree_cur *, int, union xfs_btree_ptr *,
545 union xfs_btree_key *, struct xfs_btree_cur **, int *);
538 546
539/* 547/*
540 * Helpers. 548 * Helpers.
diff --git a/fs/xfs/xfs_ialloc_btree.c b/fs/xfs/xfs_ialloc_btree.c
index 60f5db5d6dfa..c76190a83e4e 100644
--- a/fs/xfs/xfs_ialloc_btree.c
+++ b/fs/xfs/xfs_ialloc_btree.c
@@ -35,6 +35,7 @@
35#include "xfs_dinode.h" 35#include "xfs_dinode.h"
36#include "xfs_inode.h" 36#include "xfs_inode.h"
37#include "xfs_btree.h" 37#include "xfs_btree.h"
38#include "xfs_btree_trace.h"
38#include "xfs_ialloc.h" 39#include "xfs_ialloc.h"
39#include "xfs_alloc.h" 40#include "xfs_alloc.h"
40#include "xfs_error.h" 41#include "xfs_error.h"
@@ -44,8 +45,6 @@ STATIC void xfs_inobt_log_keys(xfs_btree_cur_t *, xfs_buf_t *, int, int);
44STATIC void xfs_inobt_log_ptrs(xfs_btree_cur_t *, xfs_buf_t *, int, int); 45STATIC void xfs_inobt_log_ptrs(xfs_btree_cur_t *, xfs_buf_t *, int, int);
45STATIC void xfs_inobt_log_recs(xfs_btree_cur_t *, xfs_buf_t *, int, int); 46STATIC void xfs_inobt_log_recs(xfs_btree_cur_t *, xfs_buf_t *, int, int);
46STATIC int xfs_inobt_newroot(xfs_btree_cur_t *, int *); 47STATIC int xfs_inobt_newroot(xfs_btree_cur_t *, int *);
47STATIC int xfs_inobt_split(xfs_btree_cur_t *, int, xfs_agblock_t *,
48 xfs_inobt_key_t *, xfs_btree_cur_t **, int *);
49 48
50/* 49/*
51 * Single level of the xfs_inobt_delete record deletion routine. 50 * Single level of the xfs_inobt_delete record deletion routine.
@@ -620,15 +619,18 @@ xfs_inobt_insrec(
620 if (i) { 619 if (i) {
621 optr = ptr = cur->bc_ptrs[level]; 620 optr = ptr = cur->bc_ptrs[level];
622 } else { 621 } else {
622 union xfs_btree_ptr bno = { .s = cpu_to_be32(nbno) };
623 /* 623 /*
624 * Next, try splitting the current block 624 * Next, try splitting the current block
625 * in half. If this works we have to 625 * in half. If this works we have to
626 * re-set our variables because 626 * re-set our variables because
627 * we could be in a different block now. 627 * we could be in a different block now.
628 */ 628 */
629 if ((error = xfs_inobt_split(cur, level, &nbno, 629 if ((error = xfs_btree_split(cur, level, &bno,
630 &nkey, &ncur, &i))) 630 (union xfs_btree_key *)&nkey,
631 &ncur, &i)))
631 return error; 632 return error;
633 nbno = be32_to_cpu(bno.s);
632 if (i) { 634 if (i) {
633 bp = cur->bc_bufs[level]; 635 bp = cur->bc_bufs[level];
634 block = XFS_BUF_TO_INOBT_BLOCK(bp); 636 block = XFS_BUF_TO_INOBT_BLOCK(bp);
@@ -973,165 +975,6 @@ xfs_inobt_newroot(
973} 975}
974 976
975/* 977/*
976 * Split cur/level block in half.
977 * Return new block number and its first record (to be inserted into parent).
978 */
979STATIC int /* error */
980xfs_inobt_split(
981 xfs_btree_cur_t *cur, /* btree cursor */
982 int level, /* level to split */
983 xfs_agblock_t *bnop, /* output: block number allocated */
984 xfs_inobt_key_t *keyp, /* output: first key of new block */
985 xfs_btree_cur_t **curp, /* output: new cursor */
986 int *stat) /* success/failure */
987{
988 xfs_alloc_arg_t args; /* allocation argument structure */
989 int error; /* error return value */
990 int i; /* loop index/record number */
991 xfs_agblock_t lbno; /* left (current) block number */
992 xfs_buf_t *lbp; /* buffer for left block */
993 xfs_inobt_block_t *left; /* left (current) btree block */
994 xfs_inobt_key_t *lkp; /* left btree key pointer */
995 xfs_inobt_ptr_t *lpp; /* left btree address pointer */
996 xfs_inobt_rec_t *lrp; /* left btree record pointer */
997 xfs_buf_t *rbp; /* buffer for right block */
998 xfs_inobt_block_t *right; /* right (new) btree block */
999 xfs_inobt_key_t *rkp; /* right btree key pointer */
1000 xfs_inobt_ptr_t *rpp; /* right btree address pointer */
1001 xfs_inobt_rec_t *rrp; /* right btree record pointer */
1002
1003 /*
1004 * Set up left block (current one).
1005 */
1006 lbp = cur->bc_bufs[level];
1007 args.tp = cur->bc_tp;
1008 args.mp = cur->bc_mp;
1009 lbno = XFS_DADDR_TO_AGBNO(args.mp, XFS_BUF_ADDR(lbp));
1010 /*
1011 * Allocate the new block.
1012 * If we can't do it, we're toast. Give up.
1013 */
1014 args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_private.a.agno, lbno);
1015 args.mod = args.minleft = args.alignment = args.total = args.wasdel =
1016 args.isfl = args.userdata = args.minalignslop = 0;
1017 args.minlen = args.maxlen = args.prod = 1;
1018 args.type = XFS_ALLOCTYPE_NEAR_BNO;
1019 if ((error = xfs_alloc_vextent(&args)))
1020 return error;
1021 if (args.fsbno == NULLFSBLOCK) {
1022 *stat = 0;
1023 return 0;
1024 }
1025 ASSERT(args.len == 1);
1026 rbp = xfs_btree_get_bufs(args.mp, args.tp, args.agno, args.agbno, 0);
1027 /*
1028 * Set up the new block as "right".
1029 */
1030 right = XFS_BUF_TO_INOBT_BLOCK(rbp);
1031 /*
1032 * "Left" is the current (according to the cursor) block.
1033 */
1034 left = XFS_BUF_TO_INOBT_BLOCK(lbp);
1035#ifdef DEBUG
1036 if ((error = xfs_btree_check_sblock(cur, left, level, lbp)))
1037 return error;
1038#endif
1039 /*
1040 * Fill in the btree header for the new block.
1041 */
1042 right->bb_magic = cpu_to_be32(xfs_magics[cur->bc_btnum]);
1043 right->bb_level = left->bb_level;
1044 right->bb_numrecs = cpu_to_be16(be16_to_cpu(left->bb_numrecs) / 2);
1045 /*
1046 * Make sure that if there's an odd number of entries now, that
1047 * each new block will have the same number of entries.
1048 */
1049 if ((be16_to_cpu(left->bb_numrecs) & 1) &&
1050 cur->bc_ptrs[level] <= be16_to_cpu(right->bb_numrecs) + 1)
1051 be16_add_cpu(&right->bb_numrecs, 1);
1052 i = be16_to_cpu(left->bb_numrecs) - be16_to_cpu(right->bb_numrecs) + 1;
1053 /*
1054 * For non-leaf blocks, copy keys and addresses over to the new block.
1055 */
1056 if (level > 0) {
1057 lkp = XFS_INOBT_KEY_ADDR(left, i, cur);
1058 lpp = XFS_INOBT_PTR_ADDR(left, i, cur);
1059 rkp = XFS_INOBT_KEY_ADDR(right, 1, cur);
1060 rpp = XFS_INOBT_PTR_ADDR(right, 1, cur);
1061#ifdef DEBUG
1062 for (i = 0; i < be16_to_cpu(right->bb_numrecs); i++) {
1063 if ((error = xfs_btree_check_sptr(cur, be32_to_cpu(lpp[i]), level)))
1064 return error;
1065 }
1066#endif
1067 memcpy(rkp, lkp, be16_to_cpu(right->bb_numrecs) * sizeof(*rkp));
1068 memcpy(rpp, lpp, be16_to_cpu(right->bb_numrecs) * sizeof(*rpp));
1069 xfs_inobt_log_keys(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
1070 xfs_inobt_log_ptrs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
1071 *keyp = *rkp;
1072 }
1073 /*
1074 * For leaf blocks, copy records over to the new block.
1075 */
1076 else {
1077 lrp = XFS_INOBT_REC_ADDR(left, i, cur);
1078 rrp = XFS_INOBT_REC_ADDR(right, 1, cur);
1079 memcpy(rrp, lrp, be16_to_cpu(right->bb_numrecs) * sizeof(*rrp));
1080 xfs_inobt_log_recs(cur, rbp, 1, be16_to_cpu(right->bb_numrecs));
1081 keyp->ir_startino = rrp->ir_startino;
1082 }
1083 /*
1084 * Find the left block number by looking in the buffer.
1085 * Adjust numrecs, sibling pointers.
1086 */
1087 be16_add_cpu(&left->bb_numrecs, -(be16_to_cpu(right->bb_numrecs)));
1088 right->bb_rightsib = left->bb_rightsib;
1089 left->bb_rightsib = cpu_to_be32(args.agbno);
1090 right->bb_leftsib = cpu_to_be32(lbno);
1091 xfs_inobt_log_block(args.tp, rbp, XFS_BB_ALL_BITS);
1092 xfs_inobt_log_block(args.tp, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB);
1093 /*
1094 * If there's a block to the new block's right, make that block
1095 * point back to right instead of to left.
1096 */
1097 if (be32_to_cpu(right->bb_rightsib) != NULLAGBLOCK) {
1098 xfs_inobt_block_t *rrblock; /* rr btree block */
1099 xfs_buf_t *rrbp; /* buffer for rrblock */
1100
1101 if ((error = xfs_btree_read_bufs(args.mp, args.tp, args.agno,
1102 be32_to_cpu(right->bb_rightsib), 0, &rrbp,
1103 XFS_INO_BTREE_REF)))
1104 return error;
1105 rrblock = XFS_BUF_TO_INOBT_BLOCK(rrbp);
1106 if ((error = xfs_btree_check_sblock(cur, rrblock, level, rrbp)))
1107 return error;
1108 rrblock->bb_leftsib = cpu_to_be32(args.agbno);
1109 xfs_inobt_log_block(args.tp, rrbp, XFS_BB_LEFTSIB);
1110 }
1111 /*
1112 * If the cursor is really in the right block, move it there.
1113 * If it's just pointing past the last entry in left, then we'll
1114 * insert there, so don't change anything in that case.
1115 */
1116 if (cur->bc_ptrs[level] > be16_to_cpu(left->bb_numrecs) + 1) {
1117 xfs_btree_setbuf(cur, level, rbp);
1118 cur->bc_ptrs[level] -= be16_to_cpu(left->bb_numrecs);
1119 }
1120 /*
1121 * If there are more levels, we'll need another cursor which refers
1122 * the right block, no matter where this cursor was.
1123 */
1124 if (level + 1 < cur->bc_nlevels) {
1125 if ((error = xfs_btree_dup_cursor(cur, curp)))
1126 return error;
1127 (*curp)->bc_ptrs[level + 1]++;
1128 }
1129 *bnop = args.agbno;
1130 *stat = 1;
1131 return 0;
1132}
1133
1134/*
1135 * Externally visible routines. 978 * Externally visible routines.
1136 */ 979 */
1137 980
@@ -1286,6 +1129,48 @@ xfs_inobt_dup_cursor(
1286} 1129}
1287 1130
1288STATIC int 1131STATIC int
1132xfs_inobt_alloc_block(
1133 struct xfs_btree_cur *cur,
1134 union xfs_btree_ptr *start,
1135 union xfs_btree_ptr *new,
1136 int length,
1137 int *stat)
1138{
1139 xfs_alloc_arg_t args; /* block allocation args */
1140 int error; /* error return value */
1141 xfs_agblock_t sbno = be32_to_cpu(start->s);
1142
1143 XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
1144
1145 memset(&args, 0, sizeof(args));
1146 args.tp = cur->bc_tp;
1147 args.mp = cur->bc_mp;
1148 args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_private.a.agno, sbno);
1149 args.minlen = 1;
1150 args.maxlen = 1;
1151 args.prod = 1;
1152 args.type = XFS_ALLOCTYPE_NEAR_BNO;
1153
1154 error = xfs_alloc_vextent(&args);
1155 if (error) {
1156 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
1157 return error;
1158 }
1159 if (args.fsbno == NULLFSBLOCK) {
1160 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
1161 *stat = 0;
1162 return 0;
1163 }
1164 ASSERT(args.len == 1);
1165 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
1166
1167 new->s = cpu_to_be32(XFS_FSB_TO_AGBNO(args.mp, args.fsbno));
1168 *stat = 1;
1169 return 0;
1170}
1171
1172
1173STATIC int
1289xfs_inobt_get_maxrecs( 1174xfs_inobt_get_maxrecs(
1290 struct xfs_btree_cur *cur, 1175 struct xfs_btree_cur *cur,
1291 int level) 1176 int level)
@@ -1396,6 +1281,7 @@ static const struct xfs_btree_ops xfs_inobt_ops = {
1396 .key_len = sizeof(xfs_inobt_key_t), 1281 .key_len = sizeof(xfs_inobt_key_t),
1397 1282
1398 .dup_cursor = xfs_inobt_dup_cursor, 1283 .dup_cursor = xfs_inobt_dup_cursor,
1284 .alloc_block = xfs_inobt_alloc_block,
1399 .get_maxrecs = xfs_inobt_get_maxrecs, 1285 .get_maxrecs = xfs_inobt_get_maxrecs,
1400 .init_key_from_rec = xfs_inobt_init_key_from_rec, 1286 .init_key_from_rec = xfs_inobt_init_key_from_rec,
1401 .init_ptr_from_cur = xfs_inobt_init_ptr_from_cur, 1287 .init_ptr_from_cur = xfs_inobt_init_ptr_from_cur,