aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c104
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c174
-rw-r--r--fs/xfs/libxfs/xfs_btree.c24
-rw-r--r--fs/xfs/libxfs/xfs_dir2_data.c39
-rw-r--r--fs/xfs/libxfs/xfs_format.h62
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.c48
-rw-r--r--fs/xfs/libxfs/xfs_sb.c20
-rw-r--r--fs/xfs/xfs_buf_item.c4
-rw-r--r--fs/xfs/xfs_discard.c2
-rw-r--r--fs/xfs/xfs_error.h8
-rw-r--r--fs/xfs/xfs_file.c14
-rw-r--r--fs/xfs/xfs_fsops.c20
-rw-r--r--fs/xfs/xfs_icache.c4
-rw-r--r--fs/xfs/xfs_inode.c26
-rw-r--r--fs/xfs/xfs_inode.h31
-rw-r--r--fs/xfs/xfs_iomap.c3
-rw-r--r--fs/xfs/xfs_iops.c60
-rw-r--r--fs/xfs/xfs_iops.h2
-rw-r--r--fs/xfs/xfs_itable.c2
-rw-r--r--fs/xfs/xfs_linux.h9
-rw-r--r--fs/xfs/xfs_log_recover.c4
-rw-r--r--fs/xfs/xfs_mount.c918
-rw-r--r--fs/xfs/xfs_mount.h95
-rw-r--r--fs/xfs/xfs_pnfs.c4
-rw-r--r--fs/xfs/xfs_qm.c18
-rw-r--r--fs/xfs/xfs_super.c124
-rw-r--r--fs/xfs/xfs_super.h2
-rw-r--r--fs/xfs/xfs_symlink.c58
-rw-r--r--fs/xfs/xfs_trans.c234
29 files changed, 700 insertions, 1413 deletions
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index a6fbf4472017..516162be1398 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -260,6 +260,7 @@ xfs_alloc_fix_len(
260 rlen = rlen - (k - args->mod); 260 rlen = rlen - (k - args->mod);
261 else 261 else
262 rlen = rlen - args->prod + (args->mod - k); 262 rlen = rlen - args->prod + (args->mod - k);
263 /* casts to (int) catch length underflows */
263 if ((int)rlen < (int)args->minlen) 264 if ((int)rlen < (int)args->minlen)
264 return; 265 return;
265 ASSERT(rlen >= args->minlen && rlen <= args->maxlen); 266 ASSERT(rlen >= args->minlen && rlen <= args->maxlen);
@@ -286,7 +287,8 @@ xfs_alloc_fix_minleft(
286 if (diff >= 0) 287 if (diff >= 0)
287 return 1; 288 return 1;
288 args->len += diff; /* shrink the allocated space */ 289 args->len += diff; /* shrink the allocated space */
289 if (args->len >= args->minlen) 290 /* casts to (int) catch length underflows */
291 if ((int)args->len >= (int)args->minlen)
290 return 1; 292 return 1;
291 args->agbno = NULLAGBLOCK; 293 args->agbno = NULLAGBLOCK;
292 return 0; 294 return 0;
@@ -315,6 +317,9 @@ xfs_alloc_fixup_trees(
315 xfs_agblock_t nfbno2; /* second new free startblock */ 317 xfs_agblock_t nfbno2; /* second new free startblock */
316 xfs_extlen_t nflen1=0; /* first new free length */ 318 xfs_extlen_t nflen1=0; /* first new free length */
317 xfs_extlen_t nflen2=0; /* second new free length */ 319 xfs_extlen_t nflen2=0; /* second new free length */
320 struct xfs_mount *mp;
321
322 mp = cnt_cur->bc_mp;
318 323
319 /* 324 /*
320 * Look up the record in the by-size tree if necessary. 325 * Look up the record in the by-size tree if necessary.
@@ -323,13 +328,13 @@ xfs_alloc_fixup_trees(
323#ifdef DEBUG 328#ifdef DEBUG
324 if ((error = xfs_alloc_get_rec(cnt_cur, &nfbno1, &nflen1, &i))) 329 if ((error = xfs_alloc_get_rec(cnt_cur, &nfbno1, &nflen1, &i)))
325 return error; 330 return error;
326 XFS_WANT_CORRUPTED_RETURN( 331 XFS_WANT_CORRUPTED_RETURN(mp,
327 i == 1 && nfbno1 == fbno && nflen1 == flen); 332 i == 1 && nfbno1 == fbno && nflen1 == flen);
328#endif 333#endif
329 } else { 334 } else {
330 if ((error = xfs_alloc_lookup_eq(cnt_cur, fbno, flen, &i))) 335 if ((error = xfs_alloc_lookup_eq(cnt_cur, fbno, flen, &i)))
331 return error; 336 return error;
332 XFS_WANT_CORRUPTED_RETURN(i == 1); 337 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
333 } 338 }
334 /* 339 /*
335 * Look up the record in the by-block tree if necessary. 340 * Look up the record in the by-block tree if necessary.
@@ -338,13 +343,13 @@ xfs_alloc_fixup_trees(
338#ifdef DEBUG 343#ifdef DEBUG
339 if ((error = xfs_alloc_get_rec(bno_cur, &nfbno1, &nflen1, &i))) 344 if ((error = xfs_alloc_get_rec(bno_cur, &nfbno1, &nflen1, &i)))
340 return error; 345 return error;
341 XFS_WANT_CORRUPTED_RETURN( 346 XFS_WANT_CORRUPTED_RETURN(mp,
342 i == 1 && nfbno1 == fbno && nflen1 == flen); 347 i == 1 && nfbno1 == fbno && nflen1 == flen);
343#endif 348#endif
344 } else { 349 } else {
345 if ((error = xfs_alloc_lookup_eq(bno_cur, fbno, flen, &i))) 350 if ((error = xfs_alloc_lookup_eq(bno_cur, fbno, flen, &i)))
346 return error; 351 return error;
347 XFS_WANT_CORRUPTED_RETURN(i == 1); 352 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
348 } 353 }
349 354
350#ifdef DEBUG 355#ifdef DEBUG
@@ -355,7 +360,7 @@ xfs_alloc_fixup_trees(
355 bnoblock = XFS_BUF_TO_BLOCK(bno_cur->bc_bufs[0]); 360 bnoblock = XFS_BUF_TO_BLOCK(bno_cur->bc_bufs[0]);
356 cntblock = XFS_BUF_TO_BLOCK(cnt_cur->bc_bufs[0]); 361 cntblock = XFS_BUF_TO_BLOCK(cnt_cur->bc_bufs[0]);
357 362
358 XFS_WANT_CORRUPTED_RETURN( 363 XFS_WANT_CORRUPTED_RETURN(mp,
359 bnoblock->bb_numrecs == cntblock->bb_numrecs); 364 bnoblock->bb_numrecs == cntblock->bb_numrecs);
360 } 365 }
361#endif 366#endif
@@ -386,25 +391,25 @@ xfs_alloc_fixup_trees(
386 */ 391 */
387 if ((error = xfs_btree_delete(cnt_cur, &i))) 392 if ((error = xfs_btree_delete(cnt_cur, &i)))
388 return error; 393 return error;
389 XFS_WANT_CORRUPTED_RETURN(i == 1); 394 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
390 /* 395 /*
391 * Add new by-size btree entry(s). 396 * Add new by-size btree entry(s).
392 */ 397 */
393 if (nfbno1 != NULLAGBLOCK) { 398 if (nfbno1 != NULLAGBLOCK) {
394 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i))) 399 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i)))
395 return error; 400 return error;
396 XFS_WANT_CORRUPTED_RETURN(i == 0); 401 XFS_WANT_CORRUPTED_RETURN(mp, i == 0);
397 if ((error = xfs_btree_insert(cnt_cur, &i))) 402 if ((error = xfs_btree_insert(cnt_cur, &i)))
398 return error; 403 return error;
399 XFS_WANT_CORRUPTED_RETURN(i == 1); 404 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
400 } 405 }
401 if (nfbno2 != NULLAGBLOCK) { 406 if (nfbno2 != NULLAGBLOCK) {
402 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i))) 407 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i)))
403 return error; 408 return error;
404 XFS_WANT_CORRUPTED_RETURN(i == 0); 409 XFS_WANT_CORRUPTED_RETURN(mp, i == 0);
405 if ((error = xfs_btree_insert(cnt_cur, &i))) 410 if ((error = xfs_btree_insert(cnt_cur, &i)))
406 return error; 411 return error;
407 XFS_WANT_CORRUPTED_RETURN(i == 1); 412 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
408 } 413 }
409 /* 414 /*
410 * Fix up the by-block btree entry(s). 415 * Fix up the by-block btree entry(s).
@@ -415,7 +420,7 @@ xfs_alloc_fixup_trees(
415 */ 420 */
416 if ((error = xfs_btree_delete(bno_cur, &i))) 421 if ((error = xfs_btree_delete(bno_cur, &i)))
417 return error; 422 return error;
418 XFS_WANT_CORRUPTED_RETURN(i == 1); 423 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
419 } else { 424 } else {
420 /* 425 /*
421 * Update the by-block entry to start later|be shorter. 426 * Update the by-block entry to start later|be shorter.
@@ -429,10 +434,10 @@ xfs_alloc_fixup_trees(
429 */ 434 */
430 if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i))) 435 if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i)))
431 return error; 436 return error;
432 XFS_WANT_CORRUPTED_RETURN(i == 0); 437 XFS_WANT_CORRUPTED_RETURN(mp, i == 0);
433 if ((error = xfs_btree_insert(bno_cur, &i))) 438 if ((error = xfs_btree_insert(bno_cur, &i)))
434 return error; 439 return error;
435 XFS_WANT_CORRUPTED_RETURN(i == 1); 440 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
436 } 441 }
437 return 0; 442 return 0;
438} 443}
@@ -682,7 +687,7 @@ xfs_alloc_ag_vextent_exact(
682 error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i); 687 error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i);
683 if (error) 688 if (error)
684 goto error0; 689 goto error0;
685 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 690 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
686 ASSERT(fbno <= args->agbno); 691 ASSERT(fbno <= args->agbno);
687 692
688 /* 693 /*
@@ -783,7 +788,7 @@ xfs_alloc_find_best_extent(
783 error = xfs_alloc_get_rec(*scur, sbno, slen, &i); 788 error = xfs_alloc_get_rec(*scur, sbno, slen, &i);
784 if (error) 789 if (error)
785 goto error0; 790 goto error0;
786 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 791 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
787 xfs_alloc_compute_aligned(args, *sbno, *slen, sbnoa, slena); 792 xfs_alloc_compute_aligned(args, *sbno, *slen, sbnoa, slena);
788 793
789 /* 794 /*
@@ -946,7 +951,7 @@ restart:
946 if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, 951 if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno,
947 &ltlen, &i))) 952 &ltlen, &i)))
948 goto error0; 953 goto error0;
949 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 954 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
950 if (ltlen >= args->minlen) 955 if (ltlen >= args->minlen)
951 break; 956 break;
952 if ((error = xfs_btree_increment(cnt_cur, 0, &i))) 957 if ((error = xfs_btree_increment(cnt_cur, 0, &i)))
@@ -966,7 +971,7 @@ restart:
966 */ 971 */
967 if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i))) 972 if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i)))
968 goto error0; 973 goto error0;
969 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 974 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
970 xfs_alloc_compute_aligned(args, ltbno, ltlen, 975 xfs_alloc_compute_aligned(args, ltbno, ltlen,
971 &ltbnoa, &ltlena); 976 &ltbnoa, &ltlena);
972 if (ltlena < args->minlen) 977 if (ltlena < args->minlen)
@@ -999,7 +1004,7 @@ restart:
999 cnt_cur->bc_ptrs[0] = besti; 1004 cnt_cur->bc_ptrs[0] = besti;
1000 if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i))) 1005 if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i)))
1001 goto error0; 1006 goto error0;
1002 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1007 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1003 ASSERT(ltbno + ltlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length)); 1008 ASSERT(ltbno + ltlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
1004 args->len = blen; 1009 args->len = blen;
1005 if (!xfs_alloc_fix_minleft(args)) { 1010 if (!xfs_alloc_fix_minleft(args)) {
@@ -1088,7 +1093,7 @@ restart:
1088 if (bno_cur_lt) { 1093 if (bno_cur_lt) {
1089 if ((error = xfs_alloc_get_rec(bno_cur_lt, &ltbno, &ltlen, &i))) 1094 if ((error = xfs_alloc_get_rec(bno_cur_lt, &ltbno, &ltlen, &i)))
1090 goto error0; 1095 goto error0;
1091 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1096 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1092 xfs_alloc_compute_aligned(args, ltbno, ltlen, 1097 xfs_alloc_compute_aligned(args, ltbno, ltlen,
1093 &ltbnoa, &ltlena); 1098 &ltbnoa, &ltlena);
1094 if (ltlena >= args->minlen) 1099 if (ltlena >= args->minlen)
@@ -1104,7 +1109,7 @@ restart:
1104 if (bno_cur_gt) { 1109 if (bno_cur_gt) {
1105 if ((error = xfs_alloc_get_rec(bno_cur_gt, &gtbno, &gtlen, &i))) 1110 if ((error = xfs_alloc_get_rec(bno_cur_gt, &gtbno, &gtlen, &i)))
1106 goto error0; 1111 goto error0;
1107 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1112 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1108 xfs_alloc_compute_aligned(args, gtbno, gtlen, 1113 xfs_alloc_compute_aligned(args, gtbno, gtlen,
1109 &gtbnoa, &gtlena); 1114 &gtbnoa, &gtlena);
1110 if (gtlena >= args->minlen) 1115 if (gtlena >= args->minlen)
@@ -1303,7 +1308,7 @@ restart:
1303 error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, &i); 1308 error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, &i);
1304 if (error) 1309 if (error)
1305 goto error0; 1310 goto error0;
1306 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1311 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1307 1312
1308 xfs_alloc_compute_aligned(args, fbno, flen, 1313 xfs_alloc_compute_aligned(args, fbno, flen,
1309 &rbno, &rlen); 1314 &rbno, &rlen);
@@ -1342,7 +1347,7 @@ restart:
1342 * This can't happen in the second case above. 1347 * This can't happen in the second case above.
1343 */ 1348 */
1344 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen); 1349 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1345 XFS_WANT_CORRUPTED_GOTO(rlen == 0 || 1350 XFS_WANT_CORRUPTED_GOTO(args->mp, rlen == 0 ||
1346 (rlen <= flen && rbno + rlen <= fbno + flen), error0); 1351 (rlen <= flen && rbno + rlen <= fbno + flen), error0);
1347 if (rlen < args->maxlen) { 1352 if (rlen < args->maxlen) {
1348 xfs_agblock_t bestfbno; 1353 xfs_agblock_t bestfbno;
@@ -1362,13 +1367,13 @@ restart:
1362 if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, 1367 if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen,
1363 &i))) 1368 &i)))
1364 goto error0; 1369 goto error0;
1365 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1370 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1366 if (flen < bestrlen) 1371 if (flen < bestrlen)
1367 break; 1372 break;
1368 xfs_alloc_compute_aligned(args, fbno, flen, 1373 xfs_alloc_compute_aligned(args, fbno, flen,
1369 &rbno, &rlen); 1374 &rbno, &rlen);
1370 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen); 1375 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1371 XFS_WANT_CORRUPTED_GOTO(rlen == 0 || 1376 XFS_WANT_CORRUPTED_GOTO(args->mp, rlen == 0 ||
1372 (rlen <= flen && rbno + rlen <= fbno + flen), 1377 (rlen <= flen && rbno + rlen <= fbno + flen),
1373 error0); 1378 error0);
1374 if (rlen > bestrlen) { 1379 if (rlen > bestrlen) {
@@ -1383,7 +1388,7 @@ restart:
1383 if ((error = xfs_alloc_lookup_eq(cnt_cur, bestfbno, bestflen, 1388 if ((error = xfs_alloc_lookup_eq(cnt_cur, bestfbno, bestflen,
1384 &i))) 1389 &i)))
1385 goto error0; 1390 goto error0;
1386 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1391 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1387 rlen = bestrlen; 1392 rlen = bestrlen;
1388 rbno = bestrbno; 1393 rbno = bestrbno;
1389 flen = bestflen; 1394 flen = bestflen;
@@ -1408,7 +1413,7 @@ restart:
1408 if (!xfs_alloc_fix_minleft(args)) 1413 if (!xfs_alloc_fix_minleft(args))
1409 goto out_nominleft; 1414 goto out_nominleft;
1410 rlen = args->len; 1415 rlen = args->len;
1411 XFS_WANT_CORRUPTED_GOTO(rlen <= flen, error0); 1416 XFS_WANT_CORRUPTED_GOTO(args->mp, rlen <= flen, error0);
1412 /* 1417 /*
1413 * Allocate and initialize a cursor for the by-block tree. 1418 * Allocate and initialize a cursor for the by-block tree.
1414 */ 1419 */
@@ -1422,7 +1427,7 @@ restart:
1422 cnt_cur = bno_cur = NULL; 1427 cnt_cur = bno_cur = NULL;
1423 args->len = rlen; 1428 args->len = rlen;
1424 args->agbno = rbno; 1429 args->agbno = rbno;
1425 XFS_WANT_CORRUPTED_GOTO( 1430 XFS_WANT_CORRUPTED_GOTO(args->mp,
1426 args->agbno + args->len <= 1431 args->agbno + args->len <=
1427 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length), 1432 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
1428 error0); 1433 error0);
@@ -1467,7 +1472,7 @@ xfs_alloc_ag_vextent_small(
1467 if (i) { 1472 if (i) {
1468 if ((error = xfs_alloc_get_rec(ccur, &fbno, &flen, &i))) 1473 if ((error = xfs_alloc_get_rec(ccur, &fbno, &flen, &i)))
1469 goto error0; 1474 goto error0;
1470 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1475 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1471 } 1476 }
1472 /* 1477 /*
1473 * Nothing in the btree, try the freelist. Make sure 1478 * Nothing in the btree, try the freelist. Make sure
@@ -1493,7 +1498,7 @@ xfs_alloc_ag_vextent_small(
1493 } 1498 }
1494 args->len = 1; 1499 args->len = 1;
1495 args->agbno = fbno; 1500 args->agbno = fbno;
1496 XFS_WANT_CORRUPTED_GOTO( 1501 XFS_WANT_CORRUPTED_GOTO(args->mp,
1497 args->agbno + args->len <= 1502 args->agbno + args->len <=
1498 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length), 1503 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
1499 error0); 1504 error0);
@@ -1579,7 +1584,7 @@ xfs_free_ag_extent(
1579 */ 1584 */
1580 if ((error = xfs_alloc_get_rec(bno_cur, &ltbno, &ltlen, &i))) 1585 if ((error = xfs_alloc_get_rec(bno_cur, &ltbno, &ltlen, &i)))
1581 goto error0; 1586 goto error0;
1582 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1587 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1583 /* 1588 /*
1584 * It's not contiguous, though. 1589 * It's not contiguous, though.
1585 */ 1590 */
@@ -1591,7 +1596,8 @@ xfs_free_ag_extent(
1591 * space was invalid, it's (partly) already free. 1596 * space was invalid, it's (partly) already free.
1592 * Very bad. 1597 * Very bad.
1593 */ 1598 */
1594 XFS_WANT_CORRUPTED_GOTO(ltbno + ltlen <= bno, error0); 1599 XFS_WANT_CORRUPTED_GOTO(mp,
1600 ltbno + ltlen <= bno, error0);
1595 } 1601 }
1596 } 1602 }
1597 /* 1603 /*
@@ -1606,7 +1612,7 @@ xfs_free_ag_extent(
1606 */ 1612 */
1607 if ((error = xfs_alloc_get_rec(bno_cur, &gtbno, &gtlen, &i))) 1613 if ((error = xfs_alloc_get_rec(bno_cur, &gtbno, &gtlen, &i)))
1608 goto error0; 1614 goto error0;
1609 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1615 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1610 /* 1616 /*
1611 * It's not contiguous, though. 1617 * It's not contiguous, though.
1612 */ 1618 */
@@ -1618,7 +1624,7 @@ xfs_free_ag_extent(
1618 * space was invalid, it's (partly) already free. 1624 * space was invalid, it's (partly) already free.
1619 * Very bad. 1625 * Very bad.
1620 */ 1626 */
1621 XFS_WANT_CORRUPTED_GOTO(gtbno >= bno + len, error0); 1627 XFS_WANT_CORRUPTED_GOTO(mp, gtbno >= bno + len, error0);
1622 } 1628 }
1623 } 1629 }
1624 /* 1630 /*
@@ -1635,31 +1641,31 @@ xfs_free_ag_extent(
1635 */ 1641 */
1636 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i))) 1642 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
1637 goto error0; 1643 goto error0;
1638 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1644 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1639 if ((error = xfs_btree_delete(cnt_cur, &i))) 1645 if ((error = xfs_btree_delete(cnt_cur, &i)))
1640 goto error0; 1646 goto error0;
1641 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1647 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1642 /* 1648 /*
1643 * Delete the old by-size entry on the right. 1649 * Delete the old by-size entry on the right.
1644 */ 1650 */
1645 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i))) 1651 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
1646 goto error0; 1652 goto error0;
1647 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1653 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1648 if ((error = xfs_btree_delete(cnt_cur, &i))) 1654 if ((error = xfs_btree_delete(cnt_cur, &i)))
1649 goto error0; 1655 goto error0;
1650 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1656 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1651 /* 1657 /*
1652 * Delete the old by-block entry for the right block. 1658 * Delete the old by-block entry for the right block.
1653 */ 1659 */
1654 if ((error = xfs_btree_delete(bno_cur, &i))) 1660 if ((error = xfs_btree_delete(bno_cur, &i)))
1655 goto error0; 1661 goto error0;
1656 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1662 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1657 /* 1663 /*
1658 * Move the by-block cursor back to the left neighbor. 1664 * Move the by-block cursor back to the left neighbor.
1659 */ 1665 */
1660 if ((error = xfs_btree_decrement(bno_cur, 0, &i))) 1666 if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
1661 goto error0; 1667 goto error0;
1662 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1668 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1663#ifdef DEBUG 1669#ifdef DEBUG
1664 /* 1670 /*
1665 * Check that this is the right record: delete didn't 1671 * Check that this is the right record: delete didn't
@@ -1672,7 +1678,7 @@ xfs_free_ag_extent(
1672 if ((error = xfs_alloc_get_rec(bno_cur, &xxbno, &xxlen, 1678 if ((error = xfs_alloc_get_rec(bno_cur, &xxbno, &xxlen,
1673 &i))) 1679 &i)))
1674 goto error0; 1680 goto error0;
1675 XFS_WANT_CORRUPTED_GOTO( 1681 XFS_WANT_CORRUPTED_GOTO(mp,
1676 i == 1 && xxbno == ltbno && xxlen == ltlen, 1682 i == 1 && xxbno == ltbno && xxlen == ltlen,
1677 error0); 1683 error0);
1678 } 1684 }
@@ -1695,17 +1701,17 @@ xfs_free_ag_extent(
1695 */ 1701 */
1696 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i))) 1702 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
1697 goto error0; 1703 goto error0;
1698 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1704 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1699 if ((error = xfs_btree_delete(cnt_cur, &i))) 1705 if ((error = xfs_btree_delete(cnt_cur, &i)))
1700 goto error0; 1706 goto error0;
1701 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1707 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1702 /* 1708 /*
1703 * Back up the by-block cursor to the left neighbor, and 1709 * Back up the by-block cursor to the left neighbor, and
1704 * update its length. 1710 * update its length.
1705 */ 1711 */
1706 if ((error = xfs_btree_decrement(bno_cur, 0, &i))) 1712 if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
1707 goto error0; 1713 goto error0;
1708 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1714 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1709 nbno = ltbno; 1715 nbno = ltbno;
1710 nlen = len + ltlen; 1716 nlen = len + ltlen;
1711 if ((error = xfs_alloc_update(bno_cur, nbno, nlen))) 1717 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
@@ -1721,10 +1727,10 @@ xfs_free_ag_extent(
1721 */ 1727 */
1722 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i))) 1728 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
1723 goto error0; 1729 goto error0;
1724 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1730 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1725 if ((error = xfs_btree_delete(cnt_cur, &i))) 1731 if ((error = xfs_btree_delete(cnt_cur, &i)))
1726 goto error0; 1732 goto error0;
1727 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1733 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1728 /* 1734 /*
1729 * Update the starting block and length of the right 1735 * Update the starting block and length of the right
1730 * neighbor in the by-block tree. 1736 * neighbor in the by-block tree.
@@ -1743,7 +1749,7 @@ xfs_free_ag_extent(
1743 nlen = len; 1749 nlen = len;
1744 if ((error = xfs_btree_insert(bno_cur, &i))) 1750 if ((error = xfs_btree_insert(bno_cur, &i)))
1745 goto error0; 1751 goto error0;
1746 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1752 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1747 } 1753 }
1748 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR); 1754 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1749 bno_cur = NULL; 1755 bno_cur = NULL;
@@ -1752,10 +1758,10 @@ xfs_free_ag_extent(
1752 */ 1758 */
1753 if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i))) 1759 if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i)))
1754 goto error0; 1760 goto error0;
1755 XFS_WANT_CORRUPTED_GOTO(i == 0, error0); 1761 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, error0);
1756 if ((error = xfs_btree_insert(cnt_cur, &i))) 1762 if ((error = xfs_btree_insert(cnt_cur, &i)))
1757 goto error0; 1763 goto error0;
1758 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1764 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1759 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); 1765 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1760 cnt_cur = NULL; 1766 cnt_cur = NULL;
1761 1767
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 61ec015dca16..8ae3775a9b63 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -410,7 +410,7 @@ xfs_bmap_check_leaf_extents(
410 goto error_norelse; 410 goto error_norelse;
411 } 411 }
412 block = XFS_BUF_TO_BLOCK(bp); 412 block = XFS_BUF_TO_BLOCK(bp);
413 XFS_WANT_CORRUPTED_GOTO( 413 XFS_WANT_CORRUPTED_GOTO(mp,
414 xfs_bmap_sanity_check(mp, bp, level), 414 xfs_bmap_sanity_check(mp, bp, level),
415 error0); 415 error0);
416 if (level == 0) 416 if (level == 0)
@@ -424,7 +424,8 @@ xfs_bmap_check_leaf_extents(
424 xfs_check_block(block, mp, 0, 0); 424 xfs_check_block(block, mp, 0, 0);
425 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]); 425 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
426 bno = be64_to_cpu(*pp); 426 bno = be64_to_cpu(*pp);
427 XFS_WANT_CORRUPTED_GOTO(XFS_FSB_SANITY_CHECK(mp, bno), error0); 427 XFS_WANT_CORRUPTED_GOTO(mp,
428 XFS_FSB_SANITY_CHECK(mp, bno), error0);
428 if (bp_release) { 429 if (bp_release) {
429 bp_release = 0; 430 bp_release = 0;
430 xfs_trans_brelse(NULL, bp); 431 xfs_trans_brelse(NULL, bp);
@@ -1029,7 +1030,7 @@ xfs_bmap_add_attrfork_btree(
1029 if ((error = xfs_bmbt_lookup_ge(cur, 0, 0, 0, &stat))) 1030 if ((error = xfs_bmbt_lookup_ge(cur, 0, 0, 0, &stat)))
1030 goto error0; 1031 goto error0;
1031 /* must be at least one entry */ 1032 /* must be at least one entry */
1032 XFS_WANT_CORRUPTED_GOTO(stat == 1, error0); 1033 XFS_WANT_CORRUPTED_GOTO(mp, stat == 1, error0);
1033 if ((error = xfs_btree_new_iroot(cur, flags, &stat))) 1034 if ((error = xfs_btree_new_iroot(cur, flags, &stat)))
1034 goto error0; 1035 goto error0;
1035 if (stat == 0) { 1036 if (stat == 0) {
@@ -1311,14 +1312,14 @@ xfs_bmap_read_extents(
1311 if (error) 1312 if (error)
1312 return error; 1313 return error;
1313 block = XFS_BUF_TO_BLOCK(bp); 1314 block = XFS_BUF_TO_BLOCK(bp);
1314 XFS_WANT_CORRUPTED_GOTO( 1315 XFS_WANT_CORRUPTED_GOTO(mp,
1315 xfs_bmap_sanity_check(mp, bp, level), 1316 xfs_bmap_sanity_check(mp, bp, level), error0);
1316 error0);
1317 if (level == 0) 1317 if (level == 0)
1318 break; 1318 break;
1319 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]); 1319 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
1320 bno = be64_to_cpu(*pp); 1320 bno = be64_to_cpu(*pp);
1321 XFS_WANT_CORRUPTED_GOTO(XFS_FSB_SANITY_CHECK(mp, bno), error0); 1321 XFS_WANT_CORRUPTED_GOTO(mp,
1322 XFS_FSB_SANITY_CHECK(mp, bno), error0);
1322 xfs_trans_brelse(tp, bp); 1323 xfs_trans_brelse(tp, bp);
1323 } 1324 }
1324 /* 1325 /*
@@ -1345,7 +1346,7 @@ xfs_bmap_read_extents(
1345 XFS_ERRLEVEL_LOW, ip->i_mount, block); 1346 XFS_ERRLEVEL_LOW, ip->i_mount, block);
1346 goto error0; 1347 goto error0;
1347 } 1348 }
1348 XFS_WANT_CORRUPTED_GOTO( 1349 XFS_WANT_CORRUPTED_GOTO(mp,
1349 xfs_bmap_sanity_check(mp, bp, 0), 1350 xfs_bmap_sanity_check(mp, bp, 0),
1350 error0); 1351 error0);
1351 /* 1352 /*
@@ -1755,7 +1756,9 @@ xfs_bmap_add_extent_delay_real(
1755 xfs_filblks_t temp=0; /* value for da_new calculations */ 1756 xfs_filblks_t temp=0; /* value for da_new calculations */
1756 xfs_filblks_t temp2=0;/* value for da_new calculations */ 1757 xfs_filblks_t temp2=0;/* value for da_new calculations */
1757 int tmp_rval; /* partial logging flags */ 1758 int tmp_rval; /* partial logging flags */
1759 struct xfs_mount *mp;
1758 1760
1761 mp = bma->tp ? bma->tp->t_mountp : NULL;
1759 ifp = XFS_IFORK_PTR(bma->ip, XFS_DATA_FORK); 1762 ifp = XFS_IFORK_PTR(bma->ip, XFS_DATA_FORK);
1760 1763
1761 ASSERT(bma->idx >= 0); 1764 ASSERT(bma->idx >= 0);
@@ -1866,15 +1869,15 @@ xfs_bmap_add_extent_delay_real(
1866 RIGHT.br_blockcount, &i); 1869 RIGHT.br_blockcount, &i);
1867 if (error) 1870 if (error)
1868 goto done; 1871 goto done;
1869 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1872 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1870 error = xfs_btree_delete(bma->cur, &i); 1873 error = xfs_btree_delete(bma->cur, &i);
1871 if (error) 1874 if (error)
1872 goto done; 1875 goto done;
1873 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1876 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1874 error = xfs_btree_decrement(bma->cur, 0, &i); 1877 error = xfs_btree_decrement(bma->cur, 0, &i);
1875 if (error) 1878 if (error)
1876 goto done; 1879 goto done;
1877 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1880 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1878 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff, 1881 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
1879 LEFT.br_startblock, 1882 LEFT.br_startblock,
1880 LEFT.br_blockcount + 1883 LEFT.br_blockcount +
@@ -1907,7 +1910,7 @@ xfs_bmap_add_extent_delay_real(
1907 &i); 1910 &i);
1908 if (error) 1911 if (error)
1909 goto done; 1912 goto done;
1910 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1913 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1911 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff, 1914 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
1912 LEFT.br_startblock, 1915 LEFT.br_startblock,
1913 LEFT.br_blockcount + 1916 LEFT.br_blockcount +
@@ -1938,7 +1941,7 @@ xfs_bmap_add_extent_delay_real(
1938 RIGHT.br_blockcount, &i); 1941 RIGHT.br_blockcount, &i);
1939 if (error) 1942 if (error)
1940 goto done; 1943 goto done;
1941 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1944 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1942 error = xfs_bmbt_update(bma->cur, PREV.br_startoff, 1945 error = xfs_bmbt_update(bma->cur, PREV.br_startoff,
1943 new->br_startblock, 1946 new->br_startblock,
1944 PREV.br_blockcount + 1947 PREV.br_blockcount +
@@ -1968,12 +1971,12 @@ xfs_bmap_add_extent_delay_real(
1968 &i); 1971 &i);
1969 if (error) 1972 if (error)
1970 goto done; 1973 goto done;
1971 XFS_WANT_CORRUPTED_GOTO(i == 0, done); 1974 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
1972 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM; 1975 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
1973 error = xfs_btree_insert(bma->cur, &i); 1976 error = xfs_btree_insert(bma->cur, &i);
1974 if (error) 1977 if (error)
1975 goto done; 1978 goto done;
1976 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 1979 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1977 } 1980 }
1978 break; 1981 break;
1979 1982
@@ -2001,7 +2004,7 @@ xfs_bmap_add_extent_delay_real(
2001 &i); 2004 &i);
2002 if (error) 2005 if (error)
2003 goto done; 2006 goto done;
2004 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 2007 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2005 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff, 2008 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
2006 LEFT.br_startblock, 2009 LEFT.br_startblock,
2007 LEFT.br_blockcount + 2010 LEFT.br_blockcount +
@@ -2038,12 +2041,12 @@ xfs_bmap_add_extent_delay_real(
2038 &i); 2041 &i);
2039 if (error) 2042 if (error)
2040 goto done; 2043 goto done;
2041 XFS_WANT_CORRUPTED_GOTO(i == 0, done); 2044 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2042 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM; 2045 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
2043 error = xfs_btree_insert(bma->cur, &i); 2046 error = xfs_btree_insert(bma->cur, &i);
2044 if (error) 2047 if (error)
2045 goto done; 2048 goto done;
2046 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 2049 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2047 } 2050 }
2048 2051
2049 if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) { 2052 if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) {
@@ -2084,7 +2087,7 @@ xfs_bmap_add_extent_delay_real(
2084 RIGHT.br_blockcount, &i); 2087 RIGHT.br_blockcount, &i);
2085 if (error) 2088 if (error)
2086 goto done; 2089 goto done;
2087 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 2090 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2088 error = xfs_bmbt_update(bma->cur, new->br_startoff, 2091 error = xfs_bmbt_update(bma->cur, new->br_startoff,
2089 new->br_startblock, 2092 new->br_startblock,
2090 new->br_blockcount + 2093 new->br_blockcount +
@@ -2122,12 +2125,12 @@ xfs_bmap_add_extent_delay_real(
2122 &i); 2125 &i);
2123 if (error) 2126 if (error)
2124 goto done; 2127 goto done;
2125 XFS_WANT_CORRUPTED_GOTO(i == 0, done); 2128 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2126 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM; 2129 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
2127 error = xfs_btree_insert(bma->cur, &i); 2130 error = xfs_btree_insert(bma->cur, &i);
2128 if (error) 2131 if (error)
2129 goto done; 2132 goto done;
2130 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 2133 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2131 } 2134 }
2132 2135
2133 if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) { 2136 if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) {
@@ -2191,12 +2194,12 @@ xfs_bmap_add_extent_delay_real(
2191 &i); 2194 &i);
2192 if (error) 2195 if (error)
2193 goto done; 2196 goto done;
2194 XFS_WANT_CORRUPTED_GOTO(i == 0, done); 2197 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2195 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM; 2198 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
2196 error = xfs_btree_insert(bma->cur, &i); 2199 error = xfs_btree_insert(bma->cur, &i);
2197 if (error) 2200 if (error)
2198 goto done; 2201 goto done;
2199 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 2202 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2200 } 2203 }
2201 2204
2202 if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) { 2205 if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) {
@@ -2212,9 +2215,8 @@ xfs_bmap_add_extent_delay_real(
2212 diff = (int)(temp + temp2 - startblockval(PREV.br_startblock) - 2215 diff = (int)(temp + temp2 - startblockval(PREV.br_startblock) -
2213 (bma->cur ? bma->cur->bc_private.b.allocated : 0)); 2216 (bma->cur ? bma->cur->bc_private.b.allocated : 0));
2214 if (diff > 0) { 2217 if (diff > 0) {
2215 error = xfs_icsb_modify_counters(bma->ip->i_mount, 2218 error = xfs_mod_fdblocks(bma->ip->i_mount,
2216 XFS_SBS_FDBLOCKS, 2219 -((int64_t)diff), false);
2217 -((int64_t)diff), 0);
2218 ASSERT(!error); 2220 ASSERT(!error);
2219 if (error) 2221 if (error)
2220 goto done; 2222 goto done;
@@ -2265,9 +2267,8 @@ xfs_bmap_add_extent_delay_real(
2265 temp += bma->cur->bc_private.b.allocated; 2267 temp += bma->cur->bc_private.b.allocated;
2266 ASSERT(temp <= da_old); 2268 ASSERT(temp <= da_old);
2267 if (temp < da_old) 2269 if (temp < da_old)
2268 xfs_icsb_modify_counters(bma->ip->i_mount, 2270 xfs_mod_fdblocks(bma->ip->i_mount,
2269 XFS_SBS_FDBLOCKS, 2271 (int64_t)(da_old - temp), false);
2270 (int64_t)(da_old - temp), 0);
2271 } 2272 }
2272 2273
2273 /* clear out the allocated field, done with it now in any case. */ 2274 /* clear out the allocated field, done with it now in any case. */
@@ -2309,6 +2310,7 @@ xfs_bmap_add_extent_unwritten_real(
2309 /* left is 0, right is 1, prev is 2 */ 2310 /* left is 0, right is 1, prev is 2 */
2310 int rval=0; /* return value (logging flags) */ 2311 int rval=0; /* return value (logging flags) */
2311 int state = 0;/* state bits, accessed thru macros */ 2312 int state = 0;/* state bits, accessed thru macros */
2313 struct xfs_mount *mp = tp->t_mountp;
2312 2314
2313 *logflagsp = 0; 2315 *logflagsp = 0;
2314 2316
@@ -2421,19 +2423,19 @@ xfs_bmap_add_extent_unwritten_real(
2421 RIGHT.br_startblock, 2423 RIGHT.br_startblock,
2422 RIGHT.br_blockcount, &i))) 2424 RIGHT.br_blockcount, &i)))
2423 goto done; 2425 goto done;
2424 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 2426 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2425 if ((error = xfs_btree_delete(cur, &i))) 2427 if ((error = xfs_btree_delete(cur, &i)))
2426 goto done; 2428 goto done;
2427 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 2429 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2428 if ((error = xfs_btree_decrement(cur, 0, &i))) 2430 if ((error = xfs_btree_decrement(cur, 0, &i)))
2429 goto done; 2431 goto done;
2430 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 2432 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2431 if ((error = xfs_btree_delete(cur, &i))) 2433 if ((error = xfs_btree_delete(cur, &i)))
2432 goto done; 2434 goto done;
2433 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 2435 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2434 if ((error = xfs_btree_decrement(cur, 0, &i))) 2436 if ((error = xfs_btree_decrement(cur, 0, &i)))
2435 goto done; 2437 goto done;
2436 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 2438 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2437 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff, 2439 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
2438 LEFT.br_startblock, 2440 LEFT.br_startblock,
2439 LEFT.br_blockcount + PREV.br_blockcount + 2441 LEFT.br_blockcount + PREV.br_blockcount +
@@ -2464,13 +2466,13 @@ xfs_bmap_add_extent_unwritten_real(
2464 PREV.br_startblock, PREV.br_blockcount, 2466 PREV.br_startblock, PREV.br_blockcount,
2465 &i))) 2467 &i)))
2466 goto done; 2468 goto done;
2467 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 2469 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2468 if ((error = xfs_btree_delete(cur, &i))) 2470 if ((error = xfs_btree_delete(cur, &i)))
2469 goto done; 2471 goto done;
2470 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 2472 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2471 if ((error = xfs_btree_decrement(cur, 0, &i))) 2473 if ((error = xfs_btree_decrement(cur, 0, &i)))
2472 goto done; 2474 goto done;
2473 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 2475 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2474 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff, 2476 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
2475 LEFT.br_startblock, 2477 LEFT.br_startblock,
2476 LEFT.br_blockcount + PREV.br_blockcount, 2478 LEFT.br_blockcount + PREV.br_blockcount,
@@ -2499,13 +2501,13 @@ xfs_bmap_add_extent_unwritten_real(
2499 RIGHT.br_startblock, 2501 RIGHT.br_startblock,
2500 RIGHT.br_blockcount, &i))) 2502 RIGHT.br_blockcount, &i)))
2501 goto done; 2503 goto done;
2502 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 2504 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2503 if ((error = xfs_btree_delete(cur, &i))) 2505 if ((error = xfs_btree_delete(cur, &i)))
2504 goto done; 2506 goto done;
2505 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 2507 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2506 if ((error = xfs_btree_decrement(cur, 0, &i))) 2508 if ((error = xfs_btree_decrement(cur, 0, &i)))
2507 goto done; 2509 goto done;
2508 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 2510 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2509 if ((error = xfs_bmbt_update(cur, new->br_startoff, 2511 if ((error = xfs_bmbt_update(cur, new->br_startoff,
2510 new->br_startblock, 2512 new->br_startblock,
2511 new->br_blockcount + RIGHT.br_blockcount, 2513 new->br_blockcount + RIGHT.br_blockcount,
@@ -2532,7 +2534,7 @@ xfs_bmap_add_extent_unwritten_real(
2532 new->br_startblock, new->br_blockcount, 2534 new->br_startblock, new->br_blockcount,
2533 &i))) 2535 &i)))
2534 goto done; 2536 goto done;
2535 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 2537 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2536 if ((error = xfs_bmbt_update(cur, new->br_startoff, 2538 if ((error = xfs_bmbt_update(cur, new->br_startoff,
2537 new->br_startblock, new->br_blockcount, 2539 new->br_startblock, new->br_blockcount,
2538 newext))) 2540 newext)))
@@ -2569,7 +2571,7 @@ xfs_bmap_add_extent_unwritten_real(
2569 PREV.br_startblock, PREV.br_blockcount, 2571 PREV.br_startblock, PREV.br_blockcount,
2570 &i))) 2572 &i)))
2571 goto done; 2573 goto done;
2572 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 2574 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2573 if ((error = xfs_bmbt_update(cur, 2575 if ((error = xfs_bmbt_update(cur,
2574 PREV.br_startoff + new->br_blockcount, 2576 PREV.br_startoff + new->br_blockcount,
2575 PREV.br_startblock + new->br_blockcount, 2577 PREV.br_startblock + new->br_blockcount,
@@ -2611,7 +2613,7 @@ xfs_bmap_add_extent_unwritten_real(
2611 PREV.br_startblock, PREV.br_blockcount, 2613 PREV.br_startblock, PREV.br_blockcount,
2612 &i))) 2614 &i)))
2613 goto done; 2615 goto done;
2614 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 2616 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2615 if ((error = xfs_bmbt_update(cur, 2617 if ((error = xfs_bmbt_update(cur,
2616 PREV.br_startoff + new->br_blockcount, 2618 PREV.br_startoff + new->br_blockcount,
2617 PREV.br_startblock + new->br_blockcount, 2619 PREV.br_startblock + new->br_blockcount,
@@ -2621,7 +2623,7 @@ xfs_bmap_add_extent_unwritten_real(
2621 cur->bc_rec.b = *new; 2623 cur->bc_rec.b = *new;
2622 if ((error = xfs_btree_insert(cur, &i))) 2624 if ((error = xfs_btree_insert(cur, &i)))
2623 goto done; 2625 goto done;
2624 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 2626 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2625 } 2627 }
2626 break; 2628 break;
2627 2629
@@ -2651,7 +2653,7 @@ xfs_bmap_add_extent_unwritten_real(
2651 PREV.br_startblock, 2653 PREV.br_startblock,
2652 PREV.br_blockcount, &i))) 2654 PREV.br_blockcount, &i)))
2653 goto done; 2655 goto done;
2654 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 2656 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2655 if ((error = xfs_bmbt_update(cur, PREV.br_startoff, 2657 if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
2656 PREV.br_startblock, 2658 PREV.br_startblock,
2657 PREV.br_blockcount - new->br_blockcount, 2659 PREV.br_blockcount - new->br_blockcount,
@@ -2689,7 +2691,7 @@ xfs_bmap_add_extent_unwritten_real(
2689 PREV.br_startblock, PREV.br_blockcount, 2691 PREV.br_startblock, PREV.br_blockcount,
2690 &i))) 2692 &i)))
2691 goto done; 2693 goto done;
2692 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 2694 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2693 if ((error = xfs_bmbt_update(cur, PREV.br_startoff, 2695 if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
2694 PREV.br_startblock, 2696 PREV.br_startblock,
2695 PREV.br_blockcount - new->br_blockcount, 2697 PREV.br_blockcount - new->br_blockcount,
@@ -2699,11 +2701,11 @@ xfs_bmap_add_extent_unwritten_real(
2699 new->br_startblock, new->br_blockcount, 2701 new->br_startblock, new->br_blockcount,
2700 &i))) 2702 &i)))
2701 goto done; 2703 goto done;
2702 XFS_WANT_CORRUPTED_GOTO(i == 0, done); 2704 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2703 cur->bc_rec.b.br_state = XFS_EXT_NORM; 2705 cur->bc_rec.b.br_state = XFS_EXT_NORM;
2704 if ((error = xfs_btree_insert(cur, &i))) 2706 if ((error = xfs_btree_insert(cur, &i)))
2705 goto done; 2707 goto done;
2706 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 2708 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2707 } 2709 }
2708 break; 2710 break;
2709 2711
@@ -2737,7 +2739,7 @@ xfs_bmap_add_extent_unwritten_real(
2737 PREV.br_startblock, PREV.br_blockcount, 2739 PREV.br_startblock, PREV.br_blockcount,
2738 &i))) 2740 &i)))
2739 goto done; 2741 goto done;
2740 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 2742 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2741 /* new right extent - oldext */ 2743 /* new right extent - oldext */
2742 if ((error = xfs_bmbt_update(cur, r[1].br_startoff, 2744 if ((error = xfs_bmbt_update(cur, r[1].br_startoff,
2743 r[1].br_startblock, r[1].br_blockcount, 2745 r[1].br_startblock, r[1].br_blockcount,
@@ -2749,7 +2751,7 @@ xfs_bmap_add_extent_unwritten_real(
2749 new->br_startoff - PREV.br_startoff; 2751 new->br_startoff - PREV.br_startoff;
2750 if ((error = xfs_btree_insert(cur, &i))) 2752 if ((error = xfs_btree_insert(cur, &i)))
2751 goto done; 2753 goto done;
2752 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 2754 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2753 /* 2755 /*
2754 * Reset the cursor to the position of the new extent 2756 * Reset the cursor to the position of the new extent
2755 * we are about to insert as we can't trust it after 2757 * we are about to insert as we can't trust it after
@@ -2759,12 +2761,12 @@ xfs_bmap_add_extent_unwritten_real(
2759 new->br_startblock, new->br_blockcount, 2761 new->br_startblock, new->br_blockcount,
2760 &i))) 2762 &i)))
2761 goto done; 2763 goto done;
2762 XFS_WANT_CORRUPTED_GOTO(i == 0, done); 2764 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2763 /* new middle extent - newext */ 2765 /* new middle extent - newext */
2764 cur->bc_rec.b.br_state = new->br_state; 2766 cur->bc_rec.b.br_state = new->br_state;
2765 if ((error = xfs_btree_insert(cur, &i))) 2767 if ((error = xfs_btree_insert(cur, &i)))
2766 goto done; 2768 goto done;
2767 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 2769 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2768 } 2770 }
2769 break; 2771 break;
2770 2772
@@ -2944,8 +2946,8 @@ xfs_bmap_add_extent_hole_delay(
2944 } 2946 }
2945 if (oldlen != newlen) { 2947 if (oldlen != newlen) {
2946 ASSERT(oldlen > newlen); 2948 ASSERT(oldlen > newlen);
2947 xfs_icsb_modify_counters(ip->i_mount, XFS_SBS_FDBLOCKS, 2949 xfs_mod_fdblocks(ip->i_mount, (int64_t)(oldlen - newlen),
2948 (int64_t)(oldlen - newlen), 0); 2950 false);
2949 /* 2951 /*
2950 * Nothing to do for disk quota accounting here. 2952 * Nothing to do for disk quota accounting here.
2951 */ 2953 */
@@ -2968,7 +2970,9 @@ xfs_bmap_add_extent_hole_real(
2968 xfs_bmbt_irec_t right; /* right neighbor extent entry */ 2970 xfs_bmbt_irec_t right; /* right neighbor extent entry */
2969 int rval=0; /* return value (logging flags) */ 2971 int rval=0; /* return value (logging flags) */
2970 int state; /* state bits, accessed thru macros */ 2972 int state; /* state bits, accessed thru macros */
2973 struct xfs_mount *mp;
2971 2974
2975 mp = bma->tp ? bma->tp->t_mountp : NULL;
2972 ifp = XFS_IFORK_PTR(bma->ip, whichfork); 2976 ifp = XFS_IFORK_PTR(bma->ip, whichfork);
2973 2977
2974 ASSERT(bma->idx >= 0); 2978 ASSERT(bma->idx >= 0);
@@ -3056,15 +3060,15 @@ xfs_bmap_add_extent_hole_real(
3056 &i); 3060 &i);
3057 if (error) 3061 if (error)
3058 goto done; 3062 goto done;
3059 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 3063 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
3060 error = xfs_btree_delete(bma->cur, &i); 3064 error = xfs_btree_delete(bma->cur, &i);
3061 if (error) 3065 if (error)
3062 goto done; 3066 goto done;
3063 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 3067 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
3064 error = xfs_btree_decrement(bma->cur, 0, &i); 3068 error = xfs_btree_decrement(bma->cur, 0, &i);
3065 if (error) 3069 if (error)
3066 goto done; 3070 goto done;
3067 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 3071 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
3068 error = xfs_bmbt_update(bma->cur, left.br_startoff, 3072 error = xfs_bmbt_update(bma->cur, left.br_startoff,
3069 left.br_startblock, 3073 left.br_startblock,
3070 left.br_blockcount + 3074 left.br_blockcount +
@@ -3097,7 +3101,7 @@ xfs_bmap_add_extent_hole_real(
3097 &i); 3101 &i);
3098 if (error) 3102 if (error)
3099 goto done; 3103 goto done;
3100 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 3104 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
3101 error = xfs_bmbt_update(bma->cur, left.br_startoff, 3105 error = xfs_bmbt_update(bma->cur, left.br_startoff,
3102 left.br_startblock, 3106 left.br_startblock,
3103 left.br_blockcount + 3107 left.br_blockcount +
@@ -3131,7 +3135,7 @@ xfs_bmap_add_extent_hole_real(
3131 right.br_blockcount, &i); 3135 right.br_blockcount, &i);
3132 if (error) 3136 if (error)
3133 goto done; 3137 goto done;
3134 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 3138 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
3135 error = xfs_bmbt_update(bma->cur, new->br_startoff, 3139 error = xfs_bmbt_update(bma->cur, new->br_startoff,
3136 new->br_startblock, 3140 new->br_startblock,
3137 new->br_blockcount + 3141 new->br_blockcount +
@@ -3161,12 +3165,12 @@ xfs_bmap_add_extent_hole_real(
3161 new->br_blockcount, &i); 3165 new->br_blockcount, &i);
3162 if (error) 3166 if (error)
3163 goto done; 3167 goto done;
3164 XFS_WANT_CORRUPTED_GOTO(i == 0, done); 3168 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
3165 bma->cur->bc_rec.b.br_state = new->br_state; 3169 bma->cur->bc_rec.b.br_state = new->br_state;
3166 error = xfs_btree_insert(bma->cur, &i); 3170 error = xfs_btree_insert(bma->cur, &i);
3167 if (error) 3171 if (error)
3168 goto done; 3172 goto done;
3169 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 3173 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
3170 } 3174 }
3171 break; 3175 break;
3172 } 3176 }
@@ -4160,18 +4164,15 @@ xfs_bmapi_reserve_delalloc(
4160 ASSERT(indlen > 0); 4164 ASSERT(indlen > 0);
4161 4165
4162 if (rt) { 4166 if (rt) {
4163 error = xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS, 4167 error = xfs_mod_frextents(mp, -((int64_t)extsz));
4164 -((int64_t)extsz), 0);
4165 } else { 4168 } else {
4166 error = xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, 4169 error = xfs_mod_fdblocks(mp, -((int64_t)alen), false);
4167 -((int64_t)alen), 0);
4168 } 4170 }
4169 4171
4170 if (error) 4172 if (error)
4171 goto out_unreserve_quota; 4173 goto out_unreserve_quota;
4172 4174
4173 error = xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, 4175 error = xfs_mod_fdblocks(mp, -((int64_t)indlen), false);
4174 -((int64_t)indlen), 0);
4175 if (error) 4176 if (error)
4176 goto out_unreserve_blocks; 4177 goto out_unreserve_blocks;
4177 4178
@@ -4198,9 +4199,9 @@ xfs_bmapi_reserve_delalloc(
4198 4199
4199out_unreserve_blocks: 4200out_unreserve_blocks:
4200 if (rt) 4201 if (rt)
4201 xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS, extsz, 0); 4202 xfs_mod_frextents(mp, extsz);
4202 else 4203 else
4203 xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, alen, 0); 4204 xfs_mod_fdblocks(mp, alen, false);
4204out_unreserve_quota: 4205out_unreserve_quota:
4205 if (XFS_IS_QUOTA_ON(mp)) 4206 if (XFS_IS_QUOTA_ON(mp))
4206 xfs_trans_unreserve_quota_nblks(NULL, ip, (long)alen, 0, rt ? 4207 xfs_trans_unreserve_quota_nblks(NULL, ip, (long)alen, 0, rt ?
@@ -4801,7 +4802,7 @@ xfs_bmap_del_extent(
4801 got.br_startblock, got.br_blockcount, 4802 got.br_startblock, got.br_blockcount,
4802 &i))) 4803 &i)))
4803 goto done; 4804 goto done;
4804 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 4805 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
4805 } 4806 }
4806 da_old = da_new = 0; 4807 da_old = da_new = 0;
4807 } else { 4808 } else {
@@ -4835,7 +4836,7 @@ xfs_bmap_del_extent(
4835 } 4836 }
4836 if ((error = xfs_btree_delete(cur, &i))) 4837 if ((error = xfs_btree_delete(cur, &i)))
4837 goto done; 4838 goto done;
4838 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 4839 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
4839 break; 4840 break;
4840 4841
4841 case 2: 4842 case 2:
@@ -4935,7 +4936,8 @@ xfs_bmap_del_extent(
4935 got.br_startblock, 4936 got.br_startblock,
4936 temp, &i))) 4937 temp, &i)))
4937 goto done; 4938 goto done;
4938 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 4939 XFS_WANT_CORRUPTED_GOTO(mp,
4940 i == 1, done);
4939 /* 4941 /*
4940 * Update the btree record back 4942 * Update the btree record back
4941 * to the original value. 4943 * to the original value.
@@ -4956,7 +4958,7 @@ xfs_bmap_del_extent(
4956 error = -ENOSPC; 4958 error = -ENOSPC;
4957 goto done; 4959 goto done;
4958 } 4960 }
4959 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 4961 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
4960 } else 4962 } else
4961 flags |= xfs_ilog_fext(whichfork); 4963 flags |= xfs_ilog_fext(whichfork);
4962 XFS_IFORK_NEXT_SET(ip, whichfork, 4964 XFS_IFORK_NEXT_SET(ip, whichfork,
@@ -5012,10 +5014,8 @@ xfs_bmap_del_extent(
5012 * Nothing to do for disk quota accounting here. 5014 * Nothing to do for disk quota accounting here.
5013 */ 5015 */
5014 ASSERT(da_old >= da_new); 5016 ASSERT(da_old >= da_new);
5015 if (da_old > da_new) { 5017 if (da_old > da_new)
5016 xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, 5018 xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new), false);
5017 (int64_t)(da_old - da_new), 0);
5018 }
5019done: 5019done:
5020 *logflagsp = flags; 5020 *logflagsp = flags;
5021 return error; 5021 return error;
@@ -5284,14 +5284,13 @@ xfs_bunmapi(
5284 5284
5285 rtexts = XFS_FSB_TO_B(mp, del.br_blockcount); 5285 rtexts = XFS_FSB_TO_B(mp, del.br_blockcount);
5286 do_div(rtexts, mp->m_sb.sb_rextsize); 5286 do_div(rtexts, mp->m_sb.sb_rextsize);
5287 xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS, 5287 xfs_mod_frextents(mp, (int64_t)rtexts);
5288 (int64_t)rtexts, 0);
5289 (void)xfs_trans_reserve_quota_nblks(NULL, 5288 (void)xfs_trans_reserve_quota_nblks(NULL,
5290 ip, -((long)del.br_blockcount), 0, 5289 ip, -((long)del.br_blockcount), 0,
5291 XFS_QMOPT_RES_RTBLKS); 5290 XFS_QMOPT_RES_RTBLKS);
5292 } else { 5291 } else {
5293 xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, 5292 xfs_mod_fdblocks(mp, (int64_t)del.br_blockcount,
5294 (int64_t)del.br_blockcount, 0); 5293 false);
5295 (void)xfs_trans_reserve_quota_nblks(NULL, 5294 (void)xfs_trans_reserve_quota_nblks(NULL,
5296 ip, -((long)del.br_blockcount), 0, 5295 ip, -((long)del.br_blockcount), 0,
5297 XFS_QMOPT_RES_REGBLKS); 5296 XFS_QMOPT_RES_REGBLKS);
@@ -5453,6 +5452,7 @@ xfs_bmse_merge(
5453 struct xfs_bmbt_irec left; 5452 struct xfs_bmbt_irec left;
5454 xfs_filblks_t blockcount; 5453 xfs_filblks_t blockcount;
5455 int error, i; 5454 int error, i;
5455 struct xfs_mount *mp = ip->i_mount;
5456 5456
5457 xfs_bmbt_get_all(gotp, &got); 5457 xfs_bmbt_get_all(gotp, &got);
5458 xfs_bmbt_get_all(leftp, &left); 5458 xfs_bmbt_get_all(leftp, &left);
@@ -5487,19 +5487,19 @@ xfs_bmse_merge(
5487 got.br_blockcount, &i); 5487 got.br_blockcount, &i);
5488 if (error) 5488 if (error)
5489 return error; 5489 return error;
5490 XFS_WANT_CORRUPTED_RETURN(i == 1); 5490 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5491 5491
5492 error = xfs_btree_delete(cur, &i); 5492 error = xfs_btree_delete(cur, &i);
5493 if (error) 5493 if (error)
5494 return error; 5494 return error;
5495 XFS_WANT_CORRUPTED_RETURN(i == 1); 5495 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5496 5496
5497 /* lookup and update size of the previous extent */ 5497 /* lookup and update size of the previous extent */
5498 error = xfs_bmbt_lookup_eq(cur, left.br_startoff, left.br_startblock, 5498 error = xfs_bmbt_lookup_eq(cur, left.br_startoff, left.br_startblock,
5499 left.br_blockcount, &i); 5499 left.br_blockcount, &i);
5500 if (error) 5500 if (error)
5501 return error; 5501 return error;
5502 XFS_WANT_CORRUPTED_RETURN(i == 1); 5502 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5503 5503
5504 left.br_blockcount = blockcount; 5504 left.br_blockcount = blockcount;
5505 5505
@@ -5521,6 +5521,7 @@ xfs_bmse_shift_one(
5521 int *logflags) 5521 int *logflags)
5522{ 5522{
5523 struct xfs_ifork *ifp; 5523 struct xfs_ifork *ifp;
5524 struct xfs_mount *mp;
5524 xfs_fileoff_t startoff; 5525 xfs_fileoff_t startoff;
5525 struct xfs_bmbt_rec_host *leftp; 5526 struct xfs_bmbt_rec_host *leftp;
5526 struct xfs_bmbt_irec got; 5527 struct xfs_bmbt_irec got;
@@ -5528,13 +5529,14 @@ xfs_bmse_shift_one(
5528 int error; 5529 int error;
5529 int i; 5530 int i;
5530 5531
5532 mp = ip->i_mount;
5531 ifp = XFS_IFORK_PTR(ip, whichfork); 5533 ifp = XFS_IFORK_PTR(ip, whichfork);
5532 5534
5533 xfs_bmbt_get_all(gotp, &got); 5535 xfs_bmbt_get_all(gotp, &got);
5534 startoff = got.br_startoff - offset_shift_fsb; 5536 startoff = got.br_startoff - offset_shift_fsb;
5535 5537
5536 /* delalloc extents should be prevented by caller */ 5538 /* delalloc extents should be prevented by caller */
5537 XFS_WANT_CORRUPTED_RETURN(!isnullstartblock(got.br_startblock)); 5539 XFS_WANT_CORRUPTED_RETURN(mp, !isnullstartblock(got.br_startblock));
5538 5540
5539 /* 5541 /*
5540 * Check for merge if we've got an extent to the left, otherwise make 5542 * Check for merge if we've got an extent to the left, otherwise make
@@ -5573,7 +5575,7 @@ xfs_bmse_shift_one(
5573 got.br_blockcount, &i); 5575 got.br_blockcount, &i);
5574 if (error) 5576 if (error)
5575 return error; 5577 return error;
5576 XFS_WANT_CORRUPTED_RETURN(i == 1); 5578 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5577 5579
5578 got.br_startoff = startoff; 5580 got.br_startoff = startoff;
5579 return xfs_bmbt_update(cur, got.br_startoff, got.br_startblock, 5581 return xfs_bmbt_update(cur, got.br_startoff, got.br_startblock,
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
index 81cad433df85..c72283dd8d44 100644
--- a/fs/xfs/libxfs/xfs_btree.c
+++ b/fs/xfs/libxfs/xfs_btree.c
@@ -168,7 +168,7 @@ xfs_btree_check_lptr(
168 xfs_fsblock_t bno, /* btree block disk address */ 168 xfs_fsblock_t bno, /* btree block disk address */
169 int level) /* btree block level */ 169 int level) /* btree block level */
170{ 170{
171 XFS_WANT_CORRUPTED_RETURN( 171 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp,
172 level > 0 && 172 level > 0 &&
173 bno != NULLFSBLOCK && 173 bno != NULLFSBLOCK &&
174 XFS_FSB_SANITY_CHECK(cur->bc_mp, bno)); 174 XFS_FSB_SANITY_CHECK(cur->bc_mp, bno));
@@ -187,7 +187,7 @@ xfs_btree_check_sptr(
187{ 187{
188 xfs_agblock_t agblocks = cur->bc_mp->m_sb.sb_agblocks; 188 xfs_agblock_t agblocks = cur->bc_mp->m_sb.sb_agblocks;
189 189
190 XFS_WANT_CORRUPTED_RETURN( 190 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp,
191 level > 0 && 191 level > 0 &&
192 bno != NULLAGBLOCK && 192 bno != NULLAGBLOCK &&
193 bno != 0 && 193 bno != 0 &&
@@ -1825,7 +1825,7 @@ xfs_btree_lookup(
1825 error = xfs_btree_increment(cur, 0, &i); 1825 error = xfs_btree_increment(cur, 0, &i);
1826 if (error) 1826 if (error)
1827 goto error0; 1827 goto error0;
1828 XFS_WANT_CORRUPTED_RETURN(i == 1); 1828 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
1829 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); 1829 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
1830 *stat = 1; 1830 *stat = 1;
1831 return 0; 1831 return 0;
@@ -2285,7 +2285,7 @@ xfs_btree_rshift(
2285 if (error) 2285 if (error)
2286 goto error0; 2286 goto error0;
2287 i = xfs_btree_lastrec(tcur, level); 2287 i = xfs_btree_lastrec(tcur, level);
2288 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 2288 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
2289 2289
2290 error = xfs_btree_increment(tcur, level, &i); 2290 error = xfs_btree_increment(tcur, level, &i);
2291 if (error) 2291 if (error)
@@ -3138,7 +3138,7 @@ xfs_btree_insert(
3138 goto error0; 3138 goto error0;
3139 } 3139 }
3140 3140
3141 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 3141 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
3142 level++; 3142 level++;
3143 3143
3144 /* 3144 /*
@@ -3582,15 +3582,15 @@ xfs_btree_delrec(
3582 * Actually any entry but the first would suffice. 3582 * Actually any entry but the first would suffice.
3583 */ 3583 */
3584 i = xfs_btree_lastrec(tcur, level); 3584 i = xfs_btree_lastrec(tcur, level);
3585 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 3585 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
3586 3586
3587 error = xfs_btree_increment(tcur, level, &i); 3587 error = xfs_btree_increment(tcur, level, &i);
3588 if (error) 3588 if (error)
3589 goto error0; 3589 goto error0;
3590 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 3590 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
3591 3591
3592 i = xfs_btree_lastrec(tcur, level); 3592 i = xfs_btree_lastrec(tcur, level);
3593 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 3593 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
3594 3594
3595 /* Grab a pointer to the block. */ 3595 /* Grab a pointer to the block. */
3596 right = xfs_btree_get_block(tcur, level, &rbp); 3596 right = xfs_btree_get_block(tcur, level, &rbp);
@@ -3634,12 +3634,12 @@ xfs_btree_delrec(
3634 rrecs = xfs_btree_get_numrecs(right); 3634 rrecs = xfs_btree_get_numrecs(right);
3635 if (!xfs_btree_ptr_is_null(cur, &lptr)) { 3635 if (!xfs_btree_ptr_is_null(cur, &lptr)) {
3636 i = xfs_btree_firstrec(tcur, level); 3636 i = xfs_btree_firstrec(tcur, level);
3637 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 3637 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
3638 3638
3639 error = xfs_btree_decrement(tcur, level, &i); 3639 error = xfs_btree_decrement(tcur, level, &i);
3640 if (error) 3640 if (error)
3641 goto error0; 3641 goto error0;
3642 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 3642 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
3643 } 3643 }
3644 } 3644 }
3645 3645
@@ -3653,13 +3653,13 @@ xfs_btree_delrec(
3653 * previous block. 3653 * previous block.
3654 */ 3654 */
3655 i = xfs_btree_firstrec(tcur, level); 3655 i = xfs_btree_firstrec(tcur, level);
3656 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 3656 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
3657 3657
3658 error = xfs_btree_decrement(tcur, level, &i); 3658 error = xfs_btree_decrement(tcur, level, &i);
3659 if (error) 3659 if (error)
3660 goto error0; 3660 goto error0;
3661 i = xfs_btree_firstrec(tcur, level); 3661 i = xfs_btree_firstrec(tcur, level);
3662 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 3662 XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
3663 3663
3664 /* Grab a pointer to the block. */ 3664 /* Grab a pointer to the block. */
3665 left = xfs_btree_get_block(tcur, level, &lbp); 3665 left = xfs_btree_get_block(tcur, level, &lbp);
diff --git a/fs/xfs/libxfs/xfs_dir2_data.c b/fs/xfs/libxfs/xfs_dir2_data.c
index 5ff31be9b1cd..de1ea16f5748 100644
--- a/fs/xfs/libxfs/xfs_dir2_data.c
+++ b/fs/xfs/libxfs/xfs_dir2_data.c
@@ -89,7 +89,7 @@ __xfs_dir3_data_check(
89 * so just ensure that the count falls somewhere inside the 89 * so just ensure that the count falls somewhere inside the
90 * block right now. 90 * block right now.
91 */ 91 */
92 XFS_WANT_CORRUPTED_RETURN(be32_to_cpu(btp->count) < 92 XFS_WANT_CORRUPTED_RETURN(mp, be32_to_cpu(btp->count) <
93 ((char *)btp - p) / sizeof(struct xfs_dir2_leaf_entry)); 93 ((char *)btp - p) / sizeof(struct xfs_dir2_leaf_entry));
94 break; 94 break;
95 case cpu_to_be32(XFS_DIR3_DATA_MAGIC): 95 case cpu_to_be32(XFS_DIR3_DATA_MAGIC):
@@ -107,21 +107,21 @@ __xfs_dir3_data_check(
107 bf = ops->data_bestfree_p(hdr); 107 bf = ops->data_bestfree_p(hdr);
108 count = lastfree = freeseen = 0; 108 count = lastfree = freeseen = 0;
109 if (!bf[0].length) { 109 if (!bf[0].length) {
110 XFS_WANT_CORRUPTED_RETURN(!bf[0].offset); 110 XFS_WANT_CORRUPTED_RETURN(mp, !bf[0].offset);
111 freeseen |= 1 << 0; 111 freeseen |= 1 << 0;
112 } 112 }
113 if (!bf[1].length) { 113 if (!bf[1].length) {
114 XFS_WANT_CORRUPTED_RETURN(!bf[1].offset); 114 XFS_WANT_CORRUPTED_RETURN(mp, !bf[1].offset);
115 freeseen |= 1 << 1; 115 freeseen |= 1 << 1;
116 } 116 }
117 if (!bf[2].length) { 117 if (!bf[2].length) {
118 XFS_WANT_CORRUPTED_RETURN(!bf[2].offset); 118 XFS_WANT_CORRUPTED_RETURN(mp, !bf[2].offset);
119 freeseen |= 1 << 2; 119 freeseen |= 1 << 2;
120 } 120 }
121 121
122 XFS_WANT_CORRUPTED_RETURN(be16_to_cpu(bf[0].length) >= 122 XFS_WANT_CORRUPTED_RETURN(mp, be16_to_cpu(bf[0].length) >=
123 be16_to_cpu(bf[1].length)); 123 be16_to_cpu(bf[1].length));
124 XFS_WANT_CORRUPTED_RETURN(be16_to_cpu(bf[1].length) >= 124 XFS_WANT_CORRUPTED_RETURN(mp, be16_to_cpu(bf[1].length) >=
125 be16_to_cpu(bf[2].length)); 125 be16_to_cpu(bf[2].length));
126 /* 126 /*
127 * Loop over the data/unused entries. 127 * Loop over the data/unused entries.
@@ -134,18 +134,18 @@ __xfs_dir3_data_check(
134 * doesn't need to be there. 134 * doesn't need to be there.
135 */ 135 */
136 if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) { 136 if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) {
137 XFS_WANT_CORRUPTED_RETURN(lastfree == 0); 137 XFS_WANT_CORRUPTED_RETURN(mp, lastfree == 0);
138 XFS_WANT_CORRUPTED_RETURN( 138 XFS_WANT_CORRUPTED_RETURN(mp,
139 be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup)) == 139 be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup)) ==
140 (char *)dup - (char *)hdr); 140 (char *)dup - (char *)hdr);
141 dfp = xfs_dir2_data_freefind(hdr, bf, dup); 141 dfp = xfs_dir2_data_freefind(hdr, bf, dup);
142 if (dfp) { 142 if (dfp) {
143 i = (int)(dfp - bf); 143 i = (int)(dfp - bf);
144 XFS_WANT_CORRUPTED_RETURN( 144 XFS_WANT_CORRUPTED_RETURN(mp,
145 (freeseen & (1 << i)) == 0); 145 (freeseen & (1 << i)) == 0);
146 freeseen |= 1 << i; 146 freeseen |= 1 << i;
147 } else { 147 } else {
148 XFS_WANT_CORRUPTED_RETURN( 148 XFS_WANT_CORRUPTED_RETURN(mp,
149 be16_to_cpu(dup->length) <= 149 be16_to_cpu(dup->length) <=
150 be16_to_cpu(bf[2].length)); 150 be16_to_cpu(bf[2].length));
151 } 151 }
@@ -160,13 +160,13 @@ __xfs_dir3_data_check(
160 * The linear search is crude but this is DEBUG code. 160 * The linear search is crude but this is DEBUG code.
161 */ 161 */
162 dep = (xfs_dir2_data_entry_t *)p; 162 dep = (xfs_dir2_data_entry_t *)p;
163 XFS_WANT_CORRUPTED_RETURN(dep->namelen != 0); 163 XFS_WANT_CORRUPTED_RETURN(mp, dep->namelen != 0);
164 XFS_WANT_CORRUPTED_RETURN( 164 XFS_WANT_CORRUPTED_RETURN(mp,
165 !xfs_dir_ino_validate(mp, be64_to_cpu(dep->inumber))); 165 !xfs_dir_ino_validate(mp, be64_to_cpu(dep->inumber)));
166 XFS_WANT_CORRUPTED_RETURN( 166 XFS_WANT_CORRUPTED_RETURN(mp,
167 be16_to_cpu(*ops->data_entry_tag_p(dep)) == 167 be16_to_cpu(*ops->data_entry_tag_p(dep)) ==
168 (char *)dep - (char *)hdr); 168 (char *)dep - (char *)hdr);
169 XFS_WANT_CORRUPTED_RETURN( 169 XFS_WANT_CORRUPTED_RETURN(mp,
170 ops->data_get_ftype(dep) < XFS_DIR3_FT_MAX); 170 ops->data_get_ftype(dep) < XFS_DIR3_FT_MAX);
171 count++; 171 count++;
172 lastfree = 0; 172 lastfree = 0;
@@ -183,14 +183,15 @@ __xfs_dir3_data_check(
183 be32_to_cpu(lep[i].hashval) == hash) 183 be32_to_cpu(lep[i].hashval) == hash)
184 break; 184 break;
185 } 185 }
186 XFS_WANT_CORRUPTED_RETURN(i < be32_to_cpu(btp->count)); 186 XFS_WANT_CORRUPTED_RETURN(mp,
187 i < be32_to_cpu(btp->count));
187 } 188 }
188 p += ops->data_entsize(dep->namelen); 189 p += ops->data_entsize(dep->namelen);
189 } 190 }
190 /* 191 /*
191 * Need to have seen all the entries and all the bestfree slots. 192 * Need to have seen all the entries and all the bestfree slots.
192 */ 193 */
193 XFS_WANT_CORRUPTED_RETURN(freeseen == 7); 194 XFS_WANT_CORRUPTED_RETURN(mp, freeseen == 7);
194 if (hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC) || 195 if (hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC) ||
195 hdr->magic == cpu_to_be32(XFS_DIR3_BLOCK_MAGIC)) { 196 hdr->magic == cpu_to_be32(XFS_DIR3_BLOCK_MAGIC)) {
196 for (i = stale = 0; i < be32_to_cpu(btp->count); i++) { 197 for (i = stale = 0; i < be32_to_cpu(btp->count); i++) {
@@ -198,13 +199,13 @@ __xfs_dir3_data_check(
198 cpu_to_be32(XFS_DIR2_NULL_DATAPTR)) 199 cpu_to_be32(XFS_DIR2_NULL_DATAPTR))
199 stale++; 200 stale++;
200 if (i > 0) 201 if (i > 0)
201 XFS_WANT_CORRUPTED_RETURN( 202 XFS_WANT_CORRUPTED_RETURN(mp,
202 be32_to_cpu(lep[i].hashval) >= 203 be32_to_cpu(lep[i].hashval) >=
203 be32_to_cpu(lep[i - 1].hashval)); 204 be32_to_cpu(lep[i - 1].hashval));
204 } 205 }
205 XFS_WANT_CORRUPTED_RETURN(count == 206 XFS_WANT_CORRUPTED_RETURN(mp, count ==
206 be32_to_cpu(btp->count) - be32_to_cpu(btp->stale)); 207 be32_to_cpu(btp->count) - be32_to_cpu(btp->stale));
207 XFS_WANT_CORRUPTED_RETURN(stale == be32_to_cpu(btp->stale)); 208 XFS_WANT_CORRUPTED_RETURN(mp, stale == be32_to_cpu(btp->stale));
208 } 209 }
209 return 0; 210 return 0;
210} 211}
diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h
index 8eb718979383..4daaa662337b 100644
--- a/fs/xfs/libxfs/xfs_format.h
+++ b/fs/xfs/libxfs/xfs_format.h
@@ -264,68 +264,6 @@ typedef struct xfs_dsb {
264 /* must be padded to 64 bit alignment */ 264 /* must be padded to 64 bit alignment */
265} xfs_dsb_t; 265} xfs_dsb_t;
266 266
267/*
268 * Sequence number values for the fields.
269 */
270typedef enum {
271 XFS_SBS_MAGICNUM, XFS_SBS_BLOCKSIZE, XFS_SBS_DBLOCKS, XFS_SBS_RBLOCKS,
272 XFS_SBS_REXTENTS, XFS_SBS_UUID, XFS_SBS_LOGSTART, XFS_SBS_ROOTINO,
273 XFS_SBS_RBMINO, XFS_SBS_RSUMINO, XFS_SBS_REXTSIZE, XFS_SBS_AGBLOCKS,
274 XFS_SBS_AGCOUNT, XFS_SBS_RBMBLOCKS, XFS_SBS_LOGBLOCKS,
275 XFS_SBS_VERSIONNUM, XFS_SBS_SECTSIZE, XFS_SBS_INODESIZE,
276 XFS_SBS_INOPBLOCK, XFS_SBS_FNAME, XFS_SBS_BLOCKLOG,
277 XFS_SBS_SECTLOG, XFS_SBS_INODELOG, XFS_SBS_INOPBLOG, XFS_SBS_AGBLKLOG,
278 XFS_SBS_REXTSLOG, XFS_SBS_INPROGRESS, XFS_SBS_IMAX_PCT, XFS_SBS_ICOUNT,
279 XFS_SBS_IFREE, XFS_SBS_FDBLOCKS, XFS_SBS_FREXTENTS, XFS_SBS_UQUOTINO,
280 XFS_SBS_GQUOTINO, XFS_SBS_QFLAGS, XFS_SBS_FLAGS, XFS_SBS_SHARED_VN,
281 XFS_SBS_INOALIGNMT, XFS_SBS_UNIT, XFS_SBS_WIDTH, XFS_SBS_DIRBLKLOG,
282 XFS_SBS_LOGSECTLOG, XFS_SBS_LOGSECTSIZE, XFS_SBS_LOGSUNIT,
283 XFS_SBS_FEATURES2, XFS_SBS_BAD_FEATURES2, XFS_SBS_FEATURES_COMPAT,
284 XFS_SBS_FEATURES_RO_COMPAT, XFS_SBS_FEATURES_INCOMPAT,
285 XFS_SBS_FEATURES_LOG_INCOMPAT, XFS_SBS_CRC, XFS_SBS_PAD,
286 XFS_SBS_PQUOTINO, XFS_SBS_LSN,
287 XFS_SBS_FIELDCOUNT
288} xfs_sb_field_t;
289
290/*
291 * Mask values, defined based on the xfs_sb_field_t values.
292 * Only define the ones we're using.
293 */
294#define XFS_SB_MVAL(x) (1LL << XFS_SBS_ ## x)
295#define XFS_SB_UUID XFS_SB_MVAL(UUID)
296#define XFS_SB_FNAME XFS_SB_MVAL(FNAME)
297#define XFS_SB_ROOTINO XFS_SB_MVAL(ROOTINO)
298#define XFS_SB_RBMINO XFS_SB_MVAL(RBMINO)
299#define XFS_SB_RSUMINO XFS_SB_MVAL(RSUMINO)
300#define XFS_SB_VERSIONNUM XFS_SB_MVAL(VERSIONNUM)
301#define XFS_SB_UQUOTINO XFS_SB_MVAL(UQUOTINO)
302#define XFS_SB_GQUOTINO XFS_SB_MVAL(GQUOTINO)
303#define XFS_SB_QFLAGS XFS_SB_MVAL(QFLAGS)
304#define XFS_SB_SHARED_VN XFS_SB_MVAL(SHARED_VN)
305#define XFS_SB_UNIT XFS_SB_MVAL(UNIT)
306#define XFS_SB_WIDTH XFS_SB_MVAL(WIDTH)
307#define XFS_SB_ICOUNT XFS_SB_MVAL(ICOUNT)
308#define XFS_SB_IFREE XFS_SB_MVAL(IFREE)
309#define XFS_SB_FDBLOCKS XFS_SB_MVAL(FDBLOCKS)
310#define XFS_SB_FEATURES2 (XFS_SB_MVAL(FEATURES2) | \
311 XFS_SB_MVAL(BAD_FEATURES2))
312#define XFS_SB_FEATURES_COMPAT XFS_SB_MVAL(FEATURES_COMPAT)
313#define XFS_SB_FEATURES_RO_COMPAT XFS_SB_MVAL(FEATURES_RO_COMPAT)
314#define XFS_SB_FEATURES_INCOMPAT XFS_SB_MVAL(FEATURES_INCOMPAT)
315#define XFS_SB_FEATURES_LOG_INCOMPAT XFS_SB_MVAL(FEATURES_LOG_INCOMPAT)
316#define XFS_SB_CRC XFS_SB_MVAL(CRC)
317#define XFS_SB_PQUOTINO XFS_SB_MVAL(PQUOTINO)
318#define XFS_SB_NUM_BITS ((int)XFS_SBS_FIELDCOUNT)
319#define XFS_SB_ALL_BITS ((1LL << XFS_SB_NUM_BITS) - 1)
320#define XFS_SB_MOD_BITS \
321 (XFS_SB_UUID | XFS_SB_ROOTINO | XFS_SB_RBMINO | XFS_SB_RSUMINO | \
322 XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | XFS_SB_GQUOTINO | \
323 XFS_SB_QFLAGS | XFS_SB_SHARED_VN | XFS_SB_UNIT | XFS_SB_WIDTH | \
324 XFS_SB_ICOUNT | XFS_SB_IFREE | XFS_SB_FDBLOCKS | XFS_SB_FEATURES2 | \
325 XFS_SB_FEATURES_COMPAT | XFS_SB_FEATURES_RO_COMPAT | \
326 XFS_SB_FEATURES_INCOMPAT | XFS_SB_FEATURES_LOG_INCOMPAT | \
327 XFS_SB_PQUOTINO)
328
329 267
330/* 268/*
331 * Misc. Flags - warning - these will be cleared by xfs_repair unless 269 * Misc. Flags - warning - these will be cleared by xfs_repair unless
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
index 116ef1ddb3e3..07349a183a11 100644
--- a/fs/xfs/libxfs/xfs_ialloc.c
+++ b/fs/xfs/libxfs/xfs_ialloc.c
@@ -376,7 +376,8 @@ xfs_ialloc_ag_alloc(
376 */ 376 */
377 newlen = args.mp->m_ialloc_inos; 377 newlen = args.mp->m_ialloc_inos;
378 if (args.mp->m_maxicount && 378 if (args.mp->m_maxicount &&
379 args.mp->m_sb.sb_icount + newlen > args.mp->m_maxicount) 379 percpu_counter_read(&args.mp->m_icount) + newlen >
380 args.mp->m_maxicount)
380 return -ENOSPC; 381 return -ENOSPC;
381 args.minlen = args.maxlen = args.mp->m_ialloc_blks; 382 args.minlen = args.maxlen = args.mp->m_ialloc_blks;
382 /* 383 /*
@@ -700,7 +701,7 @@ xfs_ialloc_next_rec(
700 error = xfs_inobt_get_rec(cur, rec, &i); 701 error = xfs_inobt_get_rec(cur, rec, &i);
701 if (error) 702 if (error)
702 return error; 703 return error;
703 XFS_WANT_CORRUPTED_RETURN(i == 1); 704 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
704 } 705 }
705 706
706 return 0; 707 return 0;
@@ -724,7 +725,7 @@ xfs_ialloc_get_rec(
724 error = xfs_inobt_get_rec(cur, rec, &i); 725 error = xfs_inobt_get_rec(cur, rec, &i);
725 if (error) 726 if (error)
726 return error; 727 return error;
727 XFS_WANT_CORRUPTED_RETURN(i == 1); 728 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
728 } 729 }
729 730
730 return 0; 731 return 0;
@@ -783,12 +784,12 @@ xfs_dialloc_ag_inobt(
783 error = xfs_inobt_lookup(cur, pagino, XFS_LOOKUP_LE, &i); 784 error = xfs_inobt_lookup(cur, pagino, XFS_LOOKUP_LE, &i);
784 if (error) 785 if (error)
785 goto error0; 786 goto error0;
786 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 787 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
787 788
788 error = xfs_inobt_get_rec(cur, &rec, &j); 789 error = xfs_inobt_get_rec(cur, &rec, &j);
789 if (error) 790 if (error)
790 goto error0; 791 goto error0;
791 XFS_WANT_CORRUPTED_GOTO(j == 1, error0); 792 XFS_WANT_CORRUPTED_GOTO(mp, j == 1, error0);
792 793
793 if (rec.ir_freecount > 0) { 794 if (rec.ir_freecount > 0) {
794 /* 795 /*
@@ -944,19 +945,19 @@ newino:
944 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i); 945 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i);
945 if (error) 946 if (error)
946 goto error0; 947 goto error0;
947 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 948 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
948 949
949 for (;;) { 950 for (;;) {
950 error = xfs_inobt_get_rec(cur, &rec, &i); 951 error = xfs_inobt_get_rec(cur, &rec, &i);
951 if (error) 952 if (error)
952 goto error0; 953 goto error0;
953 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 954 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
954 if (rec.ir_freecount > 0) 955 if (rec.ir_freecount > 0)
955 break; 956 break;
956 error = xfs_btree_increment(cur, 0, &i); 957 error = xfs_btree_increment(cur, 0, &i);
957 if (error) 958 if (error)
958 goto error0; 959 goto error0;
959 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 960 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
960 } 961 }
961 962
962alloc_inode: 963alloc_inode:
@@ -1016,7 +1017,7 @@ xfs_dialloc_ag_finobt_near(
1016 error = xfs_inobt_get_rec(lcur, rec, &i); 1017 error = xfs_inobt_get_rec(lcur, rec, &i);
1017 if (error) 1018 if (error)
1018 return error; 1019 return error;
1019 XFS_WANT_CORRUPTED_RETURN(i == 1); 1020 XFS_WANT_CORRUPTED_RETURN(lcur->bc_mp, i == 1);
1020 1021
1021 /* 1022 /*
1022 * See if we've landed in the parent inode record. The finobt 1023 * See if we've landed in the parent inode record. The finobt
@@ -1039,10 +1040,10 @@ xfs_dialloc_ag_finobt_near(
1039 error = xfs_inobt_get_rec(rcur, &rrec, &j); 1040 error = xfs_inobt_get_rec(rcur, &rrec, &j);
1040 if (error) 1041 if (error)
1041 goto error_rcur; 1042 goto error_rcur;
1042 XFS_WANT_CORRUPTED_GOTO(j == 1, error_rcur); 1043 XFS_WANT_CORRUPTED_GOTO(lcur->bc_mp, j == 1, error_rcur);
1043 } 1044 }
1044 1045
1045 XFS_WANT_CORRUPTED_GOTO(i == 1 || j == 1, error_rcur); 1046 XFS_WANT_CORRUPTED_GOTO(lcur->bc_mp, i == 1 || j == 1, error_rcur);
1046 if (i == 1 && j == 1) { 1047 if (i == 1 && j == 1) {
1047 /* 1048 /*
1048 * Both the left and right records are valid. Choose the closer 1049 * Both the left and right records are valid. Choose the closer
@@ -1095,7 +1096,7 @@ xfs_dialloc_ag_finobt_newino(
1095 error = xfs_inobt_get_rec(cur, rec, &i); 1096 error = xfs_inobt_get_rec(cur, rec, &i);
1096 if (error) 1097 if (error)
1097 return error; 1098 return error;
1098 XFS_WANT_CORRUPTED_RETURN(i == 1); 1099 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
1099 return 0; 1100 return 0;
1100 } 1101 }
1101 } 1102 }
@@ -1106,12 +1107,12 @@ xfs_dialloc_ag_finobt_newino(
1106 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i); 1107 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i);
1107 if (error) 1108 if (error)
1108 return error; 1109 return error;
1109 XFS_WANT_CORRUPTED_RETURN(i == 1); 1110 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
1110 1111
1111 error = xfs_inobt_get_rec(cur, rec, &i); 1112 error = xfs_inobt_get_rec(cur, rec, &i);
1112 if (error) 1113 if (error)
1113 return error; 1114 return error;
1114 XFS_WANT_CORRUPTED_RETURN(i == 1); 1115 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
1115 1116
1116 return 0; 1117 return 0;
1117} 1118}
@@ -1133,19 +1134,19 @@ xfs_dialloc_ag_update_inobt(
1133 error = xfs_inobt_lookup(cur, frec->ir_startino, XFS_LOOKUP_EQ, &i); 1134 error = xfs_inobt_lookup(cur, frec->ir_startino, XFS_LOOKUP_EQ, &i);
1134 if (error) 1135 if (error)
1135 return error; 1136 return error;
1136 XFS_WANT_CORRUPTED_RETURN(i == 1); 1137 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
1137 1138
1138 error = xfs_inobt_get_rec(cur, &rec, &i); 1139 error = xfs_inobt_get_rec(cur, &rec, &i);
1139 if (error) 1140 if (error)
1140 return error; 1141 return error;
1141 XFS_WANT_CORRUPTED_RETURN(i == 1); 1142 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, i == 1);
1142 ASSERT((XFS_AGINO_TO_OFFSET(cur->bc_mp, rec.ir_startino) % 1143 ASSERT((XFS_AGINO_TO_OFFSET(cur->bc_mp, rec.ir_startino) %
1143 XFS_INODES_PER_CHUNK) == 0); 1144 XFS_INODES_PER_CHUNK) == 0);
1144 1145
1145 rec.ir_free &= ~XFS_INOBT_MASK(offset); 1146 rec.ir_free &= ~XFS_INOBT_MASK(offset);
1146 rec.ir_freecount--; 1147 rec.ir_freecount--;
1147 1148
1148 XFS_WANT_CORRUPTED_RETURN((rec.ir_free == frec->ir_free) && 1149 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, (rec.ir_free == frec->ir_free) &&
1149 (rec.ir_freecount == frec->ir_freecount)); 1150 (rec.ir_freecount == frec->ir_freecount));
1150 1151
1151 return xfs_inobt_update(cur, &rec); 1152 return xfs_inobt_update(cur, &rec);
@@ -1340,7 +1341,8 @@ xfs_dialloc(
1340 * inode. 1341 * inode.
1341 */ 1342 */
1342 if (mp->m_maxicount && 1343 if (mp->m_maxicount &&
1343 mp->m_sb.sb_icount + mp->m_ialloc_inos > mp->m_maxicount) { 1344 percpu_counter_read(&mp->m_icount) + mp->m_ialloc_inos >
1345 mp->m_maxicount) {
1344 noroom = 1; 1346 noroom = 1;
1345 okalloc = 0; 1347 okalloc = 0;
1346 } 1348 }
@@ -1475,14 +1477,14 @@ xfs_difree_inobt(
1475 __func__, error); 1477 __func__, error);
1476 goto error0; 1478 goto error0;
1477 } 1479 }
1478 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1480 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1479 error = xfs_inobt_get_rec(cur, &rec, &i); 1481 error = xfs_inobt_get_rec(cur, &rec, &i);
1480 if (error) { 1482 if (error) {
1481 xfs_warn(mp, "%s: xfs_inobt_get_rec() returned error %d.", 1483 xfs_warn(mp, "%s: xfs_inobt_get_rec() returned error %d.",
1482 __func__, error); 1484 __func__, error);
1483 goto error0; 1485 goto error0;
1484 } 1486 }
1485 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1487 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1486 /* 1488 /*
1487 * Get the offset in the inode chunk. 1489 * Get the offset in the inode chunk.
1488 */ 1490 */
@@ -1592,7 +1594,7 @@ xfs_difree_finobt(
1592 * freed an inode in a previously fully allocated chunk. If not, 1594 * freed an inode in a previously fully allocated chunk. If not,
1593 * something is out of sync. 1595 * something is out of sync.
1594 */ 1596 */
1595 XFS_WANT_CORRUPTED_GOTO(ibtrec->ir_freecount == 1, error); 1597 XFS_WANT_CORRUPTED_GOTO(mp, ibtrec->ir_freecount == 1, error);
1596 1598
1597 error = xfs_inobt_insert_rec(cur, ibtrec->ir_freecount, 1599 error = xfs_inobt_insert_rec(cur, ibtrec->ir_freecount,
1598 ibtrec->ir_free, &i); 1600 ibtrec->ir_free, &i);
@@ -1613,12 +1615,12 @@ xfs_difree_finobt(
1613 error = xfs_inobt_get_rec(cur, &rec, &i); 1615 error = xfs_inobt_get_rec(cur, &rec, &i);
1614 if (error) 1616 if (error)
1615 goto error; 1617 goto error;
1616 XFS_WANT_CORRUPTED_GOTO(i == 1, error); 1618 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error);
1617 1619
1618 rec.ir_free |= XFS_INOBT_MASK(offset); 1620 rec.ir_free |= XFS_INOBT_MASK(offset);
1619 rec.ir_freecount++; 1621 rec.ir_freecount++;
1620 1622
1621 XFS_WANT_CORRUPTED_GOTO((rec.ir_free == ibtrec->ir_free) && 1623 XFS_WANT_CORRUPTED_GOTO(mp, (rec.ir_free == ibtrec->ir_free) &&
1622 (rec.ir_freecount == ibtrec->ir_freecount), 1624 (rec.ir_freecount == ibtrec->ir_freecount),
1623 error); 1625 error);
1624 1626
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index b0a5fe95a3e2..dc4bfc5d88fc 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -111,14 +111,6 @@ xfs_mount_validate_sb(
111 bool check_inprogress, 111 bool check_inprogress,
112 bool check_version) 112 bool check_version)
113{ 113{
114
115 /*
116 * If the log device and data device have the
117 * same device number, the log is internal.
118 * Consequently, the sb_logstart should be non-zero. If
119 * we have a zero sb_logstart in this case, we may be trying to mount
120 * a volume filesystem in a non-volume manner.
121 */
122 if (sbp->sb_magicnum != XFS_SB_MAGIC) { 114 if (sbp->sb_magicnum != XFS_SB_MAGIC) {
123 xfs_warn(mp, "bad magic number"); 115 xfs_warn(mp, "bad magic number");
124 return -EWRONGFS; 116 return -EWRONGFS;
@@ -743,17 +735,15 @@ xfs_initialize_perag_data(
743 btree += pag->pagf_btreeblks; 735 btree += pag->pagf_btreeblks;
744 xfs_perag_put(pag); 736 xfs_perag_put(pag);
745 } 737 }
746 /* 738
747 * Overwrite incore superblock counters with just-read data 739 /* Overwrite incore superblock counters with just-read data */
748 */
749 spin_lock(&mp->m_sb_lock); 740 spin_lock(&mp->m_sb_lock);
750 sbp->sb_ifree = ifree; 741 sbp->sb_ifree = ifree;
751 sbp->sb_icount = ialloc; 742 sbp->sb_icount = ialloc;
752 sbp->sb_fdblocks = bfree + bfreelst + btree; 743 sbp->sb_fdblocks = bfree + bfreelst + btree;
753 spin_unlock(&mp->m_sb_lock); 744 spin_unlock(&mp->m_sb_lock);
754 745
755 /* Fixup the per-cpu counters as well. */ 746 xfs_reinit_percpu_counters(mp);
756 xfs_icsb_reinit_counters(mp);
757 747
758 return 0; 748 return 0;
759} 749}
@@ -771,6 +761,10 @@ xfs_log_sb(
771 struct xfs_mount *mp = tp->t_mountp; 761 struct xfs_mount *mp = tp->t_mountp;
772 struct xfs_buf *bp = xfs_trans_getsb(tp, mp, 0); 762 struct xfs_buf *bp = xfs_trans_getsb(tp, mp, 0);
773 763
764 mp->m_sb.sb_icount = percpu_counter_sum(&mp->m_icount);
765 mp->m_sb.sb_ifree = percpu_counter_sum(&mp->m_ifree);
766 mp->m_sb.sb_fdblocks = percpu_counter_sum(&mp->m_fdblocks);
767
774 xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb); 768 xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb);
775 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF); 769 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
776 xfs_trans_log_buf(tp, bp, 0, sizeof(struct xfs_dsb)); 770 xfs_trans_log_buf(tp, bp, 0, sizeof(struct xfs_dsb));
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 507d96a57ac7..092d652bc03d 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -537,9 +537,9 @@ xfs_buf_item_push(
537 537
538 /* has a previous flush failed due to IO errors? */ 538 /* has a previous flush failed due to IO errors? */
539 if ((bp->b_flags & XBF_WRITE_FAIL) && 539 if ((bp->b_flags & XBF_WRITE_FAIL) &&
540 ___ratelimit(&xfs_buf_write_fail_rl_state, "XFS:")) { 540 ___ratelimit(&xfs_buf_write_fail_rl_state, "XFS: Failing async write")) {
541 xfs_warn(bp->b_target->bt_mount, 541 xfs_warn(bp->b_target->bt_mount,
542"Detected failing async write on buffer block 0x%llx. Retrying async write.", 542"Failing async write on buffer block 0x%llx. Retrying async write.",
543 (long long)bp->b_bn); 543 (long long)bp->b_bn);
544 } 544 }
545 545
diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
index 799e5a2d334d..e85a9519a5ae 100644
--- a/fs/xfs/xfs_discard.c
+++ b/fs/xfs/xfs_discard.c
@@ -84,7 +84,7 @@ xfs_trim_extents(
84 error = xfs_alloc_get_rec(cur, &fbno, &flen, &i); 84 error = xfs_alloc_get_rec(cur, &fbno, &flen, &i);
85 if (error) 85 if (error)
86 goto out_del_cursor; 86 goto out_del_cursor;
87 XFS_WANT_CORRUPTED_GOTO(i == 1, out_del_cursor); 87 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_del_cursor);
88 ASSERT(flen <= be32_to_cpu(XFS_BUF_TO_AGF(agbp)->agf_longest)); 88 ASSERT(flen <= be32_to_cpu(XFS_BUF_TO_AGF(agbp)->agf_longest));
89 89
90 /* 90 /*
diff --git a/fs/xfs/xfs_error.h b/fs/xfs/xfs_error.h
index 279a76e52791..c0394ed126fc 100644
--- a/fs/xfs/xfs_error.h
+++ b/fs/xfs/xfs_error.h
@@ -40,25 +40,25 @@ extern void xfs_verifier_error(struct xfs_buf *bp);
40/* 40/*
41 * Macros to set EFSCORRUPTED & return/branch. 41 * Macros to set EFSCORRUPTED & return/branch.
42 */ 42 */
43#define XFS_WANT_CORRUPTED_GOTO(x,l) \ 43#define XFS_WANT_CORRUPTED_GOTO(mp, x, l) \
44 { \ 44 { \
45 int fs_is_ok = (x); \ 45 int fs_is_ok = (x); \
46 ASSERT(fs_is_ok); \ 46 ASSERT(fs_is_ok); \
47 if (unlikely(!fs_is_ok)) { \ 47 if (unlikely(!fs_is_ok)) { \
48 XFS_ERROR_REPORT("XFS_WANT_CORRUPTED_GOTO", \ 48 XFS_ERROR_REPORT("XFS_WANT_CORRUPTED_GOTO", \
49 XFS_ERRLEVEL_LOW, NULL); \ 49 XFS_ERRLEVEL_LOW, mp); \
50 error = -EFSCORRUPTED; \ 50 error = -EFSCORRUPTED; \
51 goto l; \ 51 goto l; \
52 } \ 52 } \
53 } 53 }
54 54
55#define XFS_WANT_CORRUPTED_RETURN(x) \ 55#define XFS_WANT_CORRUPTED_RETURN(mp, x) \
56 { \ 56 { \
57 int fs_is_ok = (x); \ 57 int fs_is_ok = (x); \
58 ASSERT(fs_is_ok); \ 58 ASSERT(fs_is_ok); \
59 if (unlikely(!fs_is_ok)) { \ 59 if (unlikely(!fs_is_ok)) { \
60 XFS_ERROR_REPORT("XFS_WANT_CORRUPTED_RETURN", \ 60 XFS_ERROR_REPORT("XFS_WANT_CORRUPTED_RETURN", \
61 XFS_ERRLEVEL_LOW, NULL); \ 61 XFS_ERRLEVEL_LOW, mp); \
62 return -EFSCORRUPTED; \ 62 return -EFSCORRUPTED; \
63 } \ 63 } \
64 } 64 }
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 609b5aaddd8e..b101e80f2862 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -397,7 +397,8 @@ STATIC int /* error (positive) */
397xfs_zero_last_block( 397xfs_zero_last_block(
398 struct xfs_inode *ip, 398 struct xfs_inode *ip,
399 xfs_fsize_t offset, 399 xfs_fsize_t offset,
400 xfs_fsize_t isize) 400 xfs_fsize_t isize,
401 bool *did_zeroing)
401{ 402{
402 struct xfs_mount *mp = ip->i_mount; 403 struct xfs_mount *mp = ip->i_mount;
403 xfs_fileoff_t last_fsb = XFS_B_TO_FSBT(mp, isize); 404 xfs_fileoff_t last_fsb = XFS_B_TO_FSBT(mp, isize);
@@ -425,6 +426,7 @@ xfs_zero_last_block(
425 zero_len = mp->m_sb.sb_blocksize - zero_offset; 426 zero_len = mp->m_sb.sb_blocksize - zero_offset;
426 if (isize + zero_len > offset) 427 if (isize + zero_len > offset)
427 zero_len = offset - isize; 428 zero_len = offset - isize;
429 *did_zeroing = true;
428 return xfs_iozero(ip, isize, zero_len); 430 return xfs_iozero(ip, isize, zero_len);
429} 431}
430 432
@@ -443,7 +445,8 @@ int /* error (positive) */
443xfs_zero_eof( 445xfs_zero_eof(
444 struct xfs_inode *ip, 446 struct xfs_inode *ip,
445 xfs_off_t offset, /* starting I/O offset */ 447 xfs_off_t offset, /* starting I/O offset */
446 xfs_fsize_t isize) /* current inode size */ 448 xfs_fsize_t isize, /* current inode size */
449 bool *did_zeroing)
447{ 450{
448 struct xfs_mount *mp = ip->i_mount; 451 struct xfs_mount *mp = ip->i_mount;
449 xfs_fileoff_t start_zero_fsb; 452 xfs_fileoff_t start_zero_fsb;
@@ -465,7 +468,7 @@ xfs_zero_eof(
465 * We only zero a part of that block so it is handled specially. 468 * We only zero a part of that block so it is handled specially.
466 */ 469 */
467 if (XFS_B_FSB_OFFSET(mp, isize) != 0) { 470 if (XFS_B_FSB_OFFSET(mp, isize) != 0) {
468 error = xfs_zero_last_block(ip, offset, isize); 471 error = xfs_zero_last_block(ip, offset, isize, did_zeroing);
469 if (error) 472 if (error)
470 return error; 473 return error;
471 } 474 }
@@ -525,6 +528,7 @@ xfs_zero_eof(
525 if (error) 528 if (error)
526 return error; 529 return error;
527 530
531 *did_zeroing = true;
528 start_zero_fsb = imap.br_startoff + imap.br_blockcount; 532 start_zero_fsb = imap.br_startoff + imap.br_blockcount;
529 ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); 533 ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
530 } 534 }
@@ -567,13 +571,15 @@ restart:
567 * having to redo all checks before. 571 * having to redo all checks before.
568 */ 572 */
569 if (*pos > i_size_read(inode)) { 573 if (*pos > i_size_read(inode)) {
574 bool zero = false;
575
570 if (*iolock == XFS_IOLOCK_SHARED) { 576 if (*iolock == XFS_IOLOCK_SHARED) {
571 xfs_rw_iunlock(ip, *iolock); 577 xfs_rw_iunlock(ip, *iolock);
572 *iolock = XFS_IOLOCK_EXCL; 578 *iolock = XFS_IOLOCK_EXCL;
573 xfs_rw_ilock(ip, *iolock); 579 xfs_rw_ilock(ip, *iolock);
574 goto restart; 580 goto restart;
575 } 581 }
576 error = xfs_zero_eof(ip, *pos, i_size_read(inode)); 582 error = xfs_zero_eof(ip, *pos, i_size_read(inode), &zero);
577 if (error) 583 if (error)
578 return error; 584 return error;
579 } 585 }
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index 74efe5b760dc..cb7e8a29dfb6 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -637,12 +637,13 @@ xfs_fs_counts(
637 xfs_mount_t *mp, 637 xfs_mount_t *mp,
638 xfs_fsop_counts_t *cnt) 638 xfs_fsop_counts_t *cnt)
639{ 639{
640 xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT); 640 cnt->allocino = percpu_counter_read_positive(&mp->m_icount);
641 cnt->freeino = percpu_counter_read_positive(&mp->m_ifree);
642 cnt->freedata = percpu_counter_read_positive(&mp->m_fdblocks) -
643 XFS_ALLOC_SET_ASIDE(mp);
644
641 spin_lock(&mp->m_sb_lock); 645 spin_lock(&mp->m_sb_lock);
642 cnt->freedata = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
643 cnt->freertx = mp->m_sb.sb_frextents; 646 cnt->freertx = mp->m_sb.sb_frextents;
644 cnt->freeino = mp->m_sb.sb_ifree;
645 cnt->allocino = mp->m_sb.sb_icount;
646 spin_unlock(&mp->m_sb_lock); 647 spin_unlock(&mp->m_sb_lock);
647 return 0; 648 return 0;
648} 649}
@@ -692,14 +693,9 @@ xfs_reserve_blocks(
692 * what to do. This means that the amount of free space can 693 * what to do. This means that the amount of free space can
693 * change while we do this, so we need to retry if we end up 694 * change while we do this, so we need to retry if we end up
694 * trying to reserve more space than is available. 695 * trying to reserve more space than is available.
695 *
696 * We also use the xfs_mod_incore_sb() interface so that we
697 * don't have to care about whether per cpu counter are
698 * enabled, disabled or even compiled in....
699 */ 696 */
700retry: 697retry:
701 spin_lock(&mp->m_sb_lock); 698 spin_lock(&mp->m_sb_lock);
702 xfs_icsb_sync_counters_locked(mp, 0);
703 699
704 /* 700 /*
705 * If our previous reservation was larger than the current value, 701 * If our previous reservation was larger than the current value,
@@ -716,7 +712,8 @@ retry:
716 } else { 712 } else {
717 __int64_t free; 713 __int64_t free;
718 714
719 free = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); 715 free = percpu_counter_sum(&mp->m_fdblocks) -
716 XFS_ALLOC_SET_ASIDE(mp);
720 if (!free) 717 if (!free)
721 goto out; /* ENOSPC and fdblks_delta = 0 */ 718 goto out; /* ENOSPC and fdblks_delta = 0 */
722 719
@@ -755,8 +752,7 @@ out:
755 * the extra reserve blocks from the reserve..... 752 * the extra reserve blocks from the reserve.....
756 */ 753 */
757 int error; 754 int error;
758 error = xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, 755 error = xfs_mod_fdblocks(mp, fdblks_delta, 0);
759 fdblks_delta, 0);
760 if (error == -ENOSPC) 756 if (error == -ENOSPC)
761 goto retry; 757 goto retry;
762 } 758 }
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 9771b7ef62ed..76a9f2783282 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -439,11 +439,11 @@ again:
439 *ipp = ip; 439 *ipp = ip;
440 440
441 /* 441 /*
442 * If we have a real type for an on-disk inode, we can set ops(&unlock) 442 * If we have a real type for an on-disk inode, we can setup the inode
443 * now. If it's a new inode being created, xfs_ialloc will handle it. 443 * now. If it's a new inode being created, xfs_ialloc will handle it.
444 */ 444 */
445 if (xfs_iflags_test(ip, XFS_INEW) && ip->i_d.di_mode != 0) 445 if (xfs_iflags_test(ip, XFS_INEW) && ip->i_d.di_mode != 0)
446 xfs_setup_inode(ip); 446 xfs_setup_existing_inode(ip);
447 return 0; 447 return 0;
448 448
449out_error_or_again: 449out_error_or_again:
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index ac24818f7b2d..5a44f1cc820c 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -884,7 +884,7 @@ xfs_ialloc(
884 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 884 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
885 xfs_trans_log_inode(tp, ip, flags); 885 xfs_trans_log_inode(tp, ip, flags);
886 886
887 /* now that we have an i_mode we can setup inode ops and unlock */ 887 /* now that we have an i_mode we can setup the inode structure */
888 xfs_setup_inode(ip); 888 xfs_setup_inode(ip);
889 889
890 *ipp = ip; 890 *ipp = ip;
@@ -1301,12 +1301,14 @@ xfs_create(
1301 xfs_trans_cancel(tp, cancel_flags); 1301 xfs_trans_cancel(tp, cancel_flags);
1302 out_release_inode: 1302 out_release_inode:
1303 /* 1303 /*
1304 * Wait until after the current transaction is aborted to 1304 * Wait until after the current transaction is aborted to finish the
1305 * release the inode. This prevents recursive transactions 1305 * setup of the inode and release the inode. This prevents recursive
1306 * and deadlocks from xfs_inactive. 1306 * transactions and deadlocks from xfs_inactive.
1307 */ 1307 */
1308 if (ip) 1308 if (ip) {
1309 xfs_finish_inode_setup(ip);
1309 IRELE(ip); 1310 IRELE(ip);
1311 }
1310 1312
1311 xfs_qm_dqrele(udqp); 1313 xfs_qm_dqrele(udqp);
1312 xfs_qm_dqrele(gdqp); 1314 xfs_qm_dqrele(gdqp);
@@ -1411,12 +1413,14 @@ xfs_create_tmpfile(
1411 xfs_trans_cancel(tp, cancel_flags); 1413 xfs_trans_cancel(tp, cancel_flags);
1412 out_release_inode: 1414 out_release_inode:
1413 /* 1415 /*
1414 * Wait until after the current transaction is aborted to 1416 * Wait until after the current transaction is aborted to finish the
1415 * release the inode. This prevents recursive transactions 1417 * setup of the inode and release the inode. This prevents recursive
1416 * and deadlocks from xfs_inactive. 1418 * transactions and deadlocks from xfs_inactive.
1417 */ 1419 */
1418 if (ip) 1420 if (ip) {
1421 xfs_finish_inode_setup(ip);
1419 IRELE(ip); 1422 IRELE(ip);
1423 }
1420 1424
1421 xfs_qm_dqrele(udqp); 1425 xfs_qm_dqrele(udqp);
1422 xfs_qm_dqrele(gdqp); 1426 xfs_qm_dqrele(gdqp);
@@ -2933,6 +2937,10 @@ xfs_rename(
2933 * Handle RENAME_EXCHANGE flags 2937 * Handle RENAME_EXCHANGE flags
2934 */ 2938 */
2935 if (flags & RENAME_EXCHANGE) { 2939 if (flags & RENAME_EXCHANGE) {
2940 if (target_ip == NULL) {
2941 error = -EINVAL;
2942 goto error_return;
2943 }
2936 error = xfs_cross_rename(tp, src_dp, src_name, src_ip, 2944 error = xfs_cross_rename(tp, src_dp, src_name, src_ip,
2937 target_dp, target_name, target_ip, 2945 target_dp, target_name, target_ip,
2938 &free_list, &first_block, spaceres); 2946 &free_list, &first_block, spaceres);
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index f65ab8176c2f..8f22d20368d8 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -399,12 +399,35 @@ enum xfs_prealloc_flags {
399 XFS_PREALLOC_INVISIBLE = (1 << 4), 399 XFS_PREALLOC_INVISIBLE = (1 << 4),
400}; 400};
401 401
402int xfs_update_prealloc_flags(struct xfs_inode *, 402int xfs_update_prealloc_flags(struct xfs_inode *ip,
403 enum xfs_prealloc_flags); 403 enum xfs_prealloc_flags flags);
404int xfs_zero_eof(struct xfs_inode *, xfs_off_t, xfs_fsize_t); 404int xfs_zero_eof(struct xfs_inode *ip, xfs_off_t offset,
405int xfs_iozero(struct xfs_inode *, loff_t, size_t); 405 xfs_fsize_t isize, bool *did_zeroing);
406int xfs_iozero(struct xfs_inode *ip, loff_t pos, size_t count);
406 407
407 408
409/* from xfs_iops.c */
410/*
411 * When setting up a newly allocated inode, we need to call
412 * xfs_finish_inode_setup() once the inode is fully instantiated at
413 * the VFS level to prevent the rest of the world seeing the inode
414 * before we've completed instantiation. Otherwise we can do it
415 * the moment the inode lookup is complete.
416 */
417extern void xfs_setup_inode(struct xfs_inode *ip);
418static inline void xfs_finish_inode_setup(struct xfs_inode *ip)
419{
420 xfs_iflags_clear(ip, XFS_INEW);
421 barrier();
422 unlock_new_inode(VFS_I(ip));
423}
424
425static inline void xfs_setup_existing_inode(struct xfs_inode *ip)
426{
427 xfs_setup_inode(ip);
428 xfs_finish_inode_setup(ip);
429}
430
408#define IHOLD(ip) \ 431#define IHOLD(ip) \
409do { \ 432do { \
410 ASSERT(atomic_read(&VFS_I(ip)->i_count) > 0) ; \ 433 ASSERT(atomic_read(&VFS_I(ip)->i_count) > 0) ; \
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index ccb1dd0d509e..38e633bad8c2 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -460,8 +460,7 @@ xfs_iomap_prealloc_size(
460 alloc_blocks = XFS_FILEOFF_MIN(roundup_pow_of_two(MAXEXTLEN), 460 alloc_blocks = XFS_FILEOFF_MIN(roundup_pow_of_two(MAXEXTLEN),
461 alloc_blocks); 461 alloc_blocks);
462 462
463 xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT); 463 freesp = percpu_counter_read_positive(&mp->m_fdblocks);
464 freesp = mp->m_sb.sb_fdblocks;
465 if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) { 464 if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) {
466 shift = 2; 465 shift = 2;
467 if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT]) 466 if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT])
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 5c0c27c6fb75..8b9e6887e315 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -187,6 +187,8 @@ xfs_generic_create(
187 else 187 else
188 d_instantiate(dentry, inode); 188 d_instantiate(dentry, inode);
189 189
190 xfs_finish_inode_setup(ip);
191
190 out_free_acl: 192 out_free_acl:
191 if (default_acl) 193 if (default_acl)
192 posix_acl_release(default_acl); 194 posix_acl_release(default_acl);
@@ -195,6 +197,7 @@ xfs_generic_create(
195 return error; 197 return error;
196 198
197 out_cleanup_inode: 199 out_cleanup_inode:
200 xfs_finish_inode_setup(ip);
198 if (!tmpfile) 201 if (!tmpfile)
199 xfs_cleanup_inode(dir, inode, dentry); 202 xfs_cleanup_inode(dir, inode, dentry);
200 iput(inode); 203 iput(inode);
@@ -367,9 +370,11 @@ xfs_vn_symlink(
367 goto out_cleanup_inode; 370 goto out_cleanup_inode;
368 371
369 d_instantiate(dentry, inode); 372 d_instantiate(dentry, inode);
373 xfs_finish_inode_setup(cip);
370 return 0; 374 return 0;
371 375
372 out_cleanup_inode: 376 out_cleanup_inode:
377 xfs_finish_inode_setup(cip);
373 xfs_cleanup_inode(dir, inode, dentry); 378 xfs_cleanup_inode(dir, inode, dentry);
374 iput(inode); 379 iput(inode);
375 out: 380 out:
@@ -751,6 +756,7 @@ xfs_setattr_size(
751 int error; 756 int error;
752 uint lock_flags = 0; 757 uint lock_flags = 0;
753 uint commit_flags = 0; 758 uint commit_flags = 0;
759 bool did_zeroing = false;
754 760
755 trace_xfs_setattr(ip); 761 trace_xfs_setattr(ip);
756 762
@@ -795,20 +801,16 @@ xfs_setattr_size(
795 return error; 801 return error;
796 802
797 /* 803 /*
798 * Now we can make the changes. Before we join the inode to the 804 * File data changes must be complete before we start the transaction to
799 * transaction, take care of the part of the truncation that must be 805 * modify the inode. This needs to be done before joining the inode to
800 * done without the inode lock. This needs to be done before joining 806 * the transaction because the inode cannot be unlocked once it is a
801 * the inode to the transaction, because the inode cannot be unlocked 807 * part of the transaction.
802 * once it is a part of the transaction. 808 *
809 * Start with zeroing any data block beyond EOF that we may expose on
810 * file extension.
803 */ 811 */
804 if (newsize > oldsize) { 812 if (newsize > oldsize) {
805 /* 813 error = xfs_zero_eof(ip, newsize, oldsize, &did_zeroing);
806 * Do the first part of growing a file: zero any data in the
807 * last block that is beyond the old EOF. We need to do this
808 * before the inode is joined to the transaction to modify
809 * i_size.
810 */
811 error = xfs_zero_eof(ip, newsize, oldsize);
812 if (error) 814 if (error)
813 return error; 815 return error;
814 } 816 }
@@ -818,23 +820,18 @@ xfs_setattr_size(
818 * any previous writes that are beyond the on disk EOF and the new 820 * any previous writes that are beyond the on disk EOF and the new
819 * EOF that have not been written out need to be written here. If we 821 * EOF that have not been written out need to be written here. If we
820 * do not write the data out, we expose ourselves to the null files 822 * do not write the data out, we expose ourselves to the null files
821 * problem. 823 * problem. Note that this includes any block zeroing we did above;
822 * 824 * otherwise those blocks may not be zeroed after a crash.
823 * Only flush from the on disk size to the smaller of the in memory
824 * file size or the new size as that's the range we really care about
825 * here and prevents waiting for other data not within the range we
826 * care about here.
827 */ 825 */
828 if (oldsize != ip->i_d.di_size && newsize > ip->i_d.di_size) { 826 if (newsize > ip->i_d.di_size &&
827 (oldsize != ip->i_d.di_size || did_zeroing)) {
829 error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, 828 error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
830 ip->i_d.di_size, newsize); 829 ip->i_d.di_size, newsize);
831 if (error) 830 if (error)
832 return error; 831 return error;
833 } 832 }
834 833
835 /* 834 /* Now wait for all direct I/O to complete. */
836 * Wait for all direct I/O to complete.
837 */
838 inode_dio_wait(inode); 835 inode_dio_wait(inode);
839 836
840 /* 837 /*
@@ -1213,16 +1210,12 @@ xfs_diflags_to_iflags(
1213} 1210}
1214 1211
1215/* 1212/*
1216 * Initialize the Linux inode, set up the operation vectors and 1213 * Initialize the Linux inode and set up the operation vectors.
1217 * unlock the inode.
1218 *
1219 * When reading existing inodes from disk this is called directly
1220 * from xfs_iget, when creating a new inode it is called from
1221 * xfs_ialloc after setting up the inode.
1222 * 1214 *
1223 * We are always called with an uninitialised linux inode here. 1215 * When reading existing inodes from disk this is called directly from xfs_iget,
1224 * We need to initialise the necessary fields and take a reference 1216 * when creating a new inode it is called from xfs_ialloc after setting up the
1225 * on it. 1217 * inode. These callers have different criteria for clearing XFS_INEW, so leave
1218 * it up to the caller to deal with unlocking the inode appropriately.
1226 */ 1219 */
1227void 1220void
1228xfs_setup_inode( 1221xfs_setup_inode(
@@ -1309,9 +1302,4 @@ xfs_setup_inode(
1309 inode_has_no_xattr(inode); 1302 inode_has_no_xattr(inode);
1310 cache_no_acl(inode); 1303 cache_no_acl(inode);
1311 } 1304 }
1312
1313 xfs_iflags_clear(ip, XFS_INEW);
1314 barrier();
1315
1316 unlock_new_inode(inode);
1317} 1305}
diff --git a/fs/xfs/xfs_iops.h b/fs/xfs/xfs_iops.h
index ea7a98e9cb70..a0f84abb0d09 100644
--- a/fs/xfs/xfs_iops.h
+++ b/fs/xfs/xfs_iops.h
@@ -25,8 +25,6 @@ extern const struct file_operations xfs_dir_file_operations;
25 25
26extern ssize_t xfs_vn_listxattr(struct dentry *, char *data, size_t size); 26extern ssize_t xfs_vn_listxattr(struct dentry *, char *data, size_t size);
27 27
28extern void xfs_setup_inode(struct xfs_inode *);
29
30/* 28/*
31 * Internal setattr interfaces. 29 * Internal setattr interfaces.
32 */ 30 */
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index 82e314258f73..80429891dc9b 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -229,7 +229,7 @@ xfs_bulkstat_grab_ichunk(
229 error = xfs_inobt_get_rec(cur, irec, &stat); 229 error = xfs_inobt_get_rec(cur, irec, &stat);
230 if (error) 230 if (error)
231 return error; 231 return error;
232 XFS_WANT_CORRUPTED_RETURN(stat == 1); 232 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp, stat == 1);
233 233
234 /* Check if the record contains the inode in request */ 234 /* Check if the record contains the inode in request */
235 if (irec->ir_startino + XFS_INODES_PER_CHUNK <= agino) { 235 if (irec->ir_startino + XFS_INODES_PER_CHUNK <= agino) {
diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
index c31d2c2eadc4..7c7842c85a08 100644
--- a/fs/xfs/xfs_linux.h
+++ b/fs/xfs/xfs_linux.h
@@ -116,15 +116,6 @@ typedef __uint64_t __psunsigned_t;
116#undef XFS_NATIVE_HOST 116#undef XFS_NATIVE_HOST
117#endif 117#endif
118 118
119/*
120 * Feature macros (disable/enable)
121 */
122#ifdef CONFIG_SMP
123#define HAVE_PERCPU_SB /* per cpu superblock counters are a 2.6 feature */
124#else
125#undef HAVE_PERCPU_SB /* per cpu superblock counters are a 2.6 feature */
126#endif
127
128#define irix_sgid_inherit xfs_params.sgid_inherit.val 119#define irix_sgid_inherit xfs_params.sgid_inherit.val
129#define irix_symlink_mode xfs_params.symlink_mode.val 120#define irix_symlink_mode xfs_params.symlink_mode.val
130#define xfs_panic_mask xfs_params.panic_mask.val 121#define xfs_panic_mask xfs_params.panic_mask.val
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index a5a945fc3bdc..4f5784f85a5b 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -4463,10 +4463,10 @@ xlog_do_recover(
4463 xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp)); 4463 xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp));
4464 ASSERT(sbp->sb_magicnum == XFS_SB_MAGIC); 4464 ASSERT(sbp->sb_magicnum == XFS_SB_MAGIC);
4465 ASSERT(xfs_sb_good_version(sbp)); 4465 ASSERT(xfs_sb_good_version(sbp));
4466 xfs_reinit_percpu_counters(log->l_mp);
4467
4466 xfs_buf_relse(bp); 4468 xfs_buf_relse(bp);
4467 4469
4468 /* We've re-read the superblock so re-initialize per-cpu counters */
4469 xfs_icsb_reinit_counters(log->l_mp);
4470 4470
4471 xlog_recover_check_summary(log); 4471 xlog_recover_check_summary(log);
4472 4472
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 4fa80e63eea2..2ce7ee3b4ec1 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -43,18 +43,6 @@
43#include "xfs_sysfs.h" 43#include "xfs_sysfs.h"
44 44
45 45
46#ifdef HAVE_PERCPU_SB
47STATIC void xfs_icsb_balance_counter(xfs_mount_t *, xfs_sb_field_t,
48 int);
49STATIC void xfs_icsb_balance_counter_locked(xfs_mount_t *, xfs_sb_field_t,
50 int);
51STATIC void xfs_icsb_disable_counter(xfs_mount_t *, xfs_sb_field_t);
52#else
53
54#define xfs_icsb_balance_counter(mp, a, b) do { } while (0)
55#define xfs_icsb_balance_counter_locked(mp, a, b) do { } while (0)
56#endif
57
58static DEFINE_MUTEX(xfs_uuid_table_mutex); 46static DEFINE_MUTEX(xfs_uuid_table_mutex);
59static int xfs_uuid_table_size; 47static int xfs_uuid_table_size;
60static uuid_t *xfs_uuid_table; 48static uuid_t *xfs_uuid_table;
@@ -347,8 +335,7 @@ reread:
347 goto reread; 335 goto reread;
348 } 336 }
349 337
350 /* Initialize per-cpu counters */ 338 xfs_reinit_percpu_counters(mp);
351 xfs_icsb_reinit_counters(mp);
352 339
353 /* no need to be quiet anymore, so reset the buf ops */ 340 /* no need to be quiet anymore, so reset the buf ops */
354 bp->b_ops = &xfs_sb_buf_ops; 341 bp->b_ops = &xfs_sb_buf_ops;
@@ -1087,8 +1074,6 @@ xfs_log_sbcount(xfs_mount_t *mp)
1087 if (!xfs_fs_writable(mp, SB_FREEZE_COMPLETE)) 1074 if (!xfs_fs_writable(mp, SB_FREEZE_COMPLETE))
1088 return 0; 1075 return 0;
1089 1076
1090 xfs_icsb_sync_counters(mp, 0);
1091
1092 /* 1077 /*
1093 * we don't need to do this if we are updating the superblock 1078 * we don't need to do this if we are updating the superblock
1094 * counters on every modification. 1079 * counters on every modification.
@@ -1099,253 +1084,136 @@ xfs_log_sbcount(xfs_mount_t *mp)
1099 return xfs_sync_sb(mp, true); 1084 return xfs_sync_sb(mp, true);
1100} 1085}
1101 1086
1102/* 1087int
1103 * xfs_mod_incore_sb_unlocked() is a utility routine commonly used to apply 1088xfs_mod_icount(
1104 * a delta to a specified field in the in-core superblock. Simply 1089 struct xfs_mount *mp,
1105 * switch on the field indicated and apply the delta to that field. 1090 int64_t delta)
1106 * Fields are not allowed to dip below zero, so if the delta would
1107 * do this do not apply it and return EINVAL.
1108 *
1109 * The m_sb_lock must be held when this routine is called.
1110 */
1111STATIC int
1112xfs_mod_incore_sb_unlocked(
1113 xfs_mount_t *mp,
1114 xfs_sb_field_t field,
1115 int64_t delta,
1116 int rsvd)
1117{ 1091{
1118 int scounter; /* short counter for 32 bit fields */ 1092 /* deltas are +/-64, hence the large batch size of 128. */
1119 long long lcounter; /* long counter for 64 bit fields */ 1093 __percpu_counter_add(&mp->m_icount, delta, 128);
1120 long long res_used, rem; 1094 if (percpu_counter_compare(&mp->m_icount, 0) < 0) {
1121
1122 /*
1123 * With the in-core superblock spin lock held, switch
1124 * on the indicated field. Apply the delta to the
1125 * proper field. If the fields value would dip below
1126 * 0, then do not apply the delta and return EINVAL.
1127 */
1128 switch (field) {
1129 case XFS_SBS_ICOUNT:
1130 lcounter = (long long)mp->m_sb.sb_icount;
1131 lcounter += delta;
1132 if (lcounter < 0) {
1133 ASSERT(0);
1134 return -EINVAL;
1135 }
1136 mp->m_sb.sb_icount = lcounter;
1137 return 0;
1138 case XFS_SBS_IFREE:
1139 lcounter = (long long)mp->m_sb.sb_ifree;
1140 lcounter += delta;
1141 if (lcounter < 0) {
1142 ASSERT(0);
1143 return -EINVAL;
1144 }
1145 mp->m_sb.sb_ifree = lcounter;
1146 return 0;
1147 case XFS_SBS_FDBLOCKS:
1148 lcounter = (long long)
1149 mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
1150 res_used = (long long)(mp->m_resblks - mp->m_resblks_avail);
1151
1152 if (delta > 0) { /* Putting blocks back */
1153 if (res_used > delta) {
1154 mp->m_resblks_avail += delta;
1155 } else {
1156 rem = delta - res_used;
1157 mp->m_resblks_avail = mp->m_resblks;
1158 lcounter += rem;
1159 }
1160 } else { /* Taking blocks away */
1161 lcounter += delta;
1162 if (lcounter >= 0) {
1163 mp->m_sb.sb_fdblocks = lcounter +
1164 XFS_ALLOC_SET_ASIDE(mp);
1165 return 0;
1166 }
1167
1168 /*
1169 * We are out of blocks, use any available reserved
1170 * blocks if were allowed to.
1171 */
1172 if (!rsvd)
1173 return -ENOSPC;
1174
1175 lcounter = (long long)mp->m_resblks_avail + delta;
1176 if (lcounter >= 0) {
1177 mp->m_resblks_avail = lcounter;
1178 return 0;
1179 }
1180 printk_once(KERN_WARNING
1181 "Filesystem \"%s\": reserve blocks depleted! "
1182 "Consider increasing reserve pool size.",
1183 mp->m_fsname);
1184 return -ENOSPC;
1185 }
1186
1187 mp->m_sb.sb_fdblocks = lcounter + XFS_ALLOC_SET_ASIDE(mp);
1188 return 0;
1189 case XFS_SBS_FREXTENTS:
1190 lcounter = (long long)mp->m_sb.sb_frextents;
1191 lcounter += delta;
1192 if (lcounter < 0) {
1193 return -ENOSPC;
1194 }
1195 mp->m_sb.sb_frextents = lcounter;
1196 return 0;
1197 case XFS_SBS_DBLOCKS:
1198 lcounter = (long long)mp->m_sb.sb_dblocks;
1199 lcounter += delta;
1200 if (lcounter < 0) {
1201 ASSERT(0);
1202 return -EINVAL;
1203 }
1204 mp->m_sb.sb_dblocks = lcounter;
1205 return 0;
1206 case XFS_SBS_AGCOUNT:
1207 scounter = mp->m_sb.sb_agcount;
1208 scounter += delta;
1209 if (scounter < 0) {
1210 ASSERT(0);
1211 return -EINVAL;
1212 }
1213 mp->m_sb.sb_agcount = scounter;
1214 return 0;
1215 case XFS_SBS_IMAX_PCT:
1216 scounter = mp->m_sb.sb_imax_pct;
1217 scounter += delta;
1218 if (scounter < 0) {
1219 ASSERT(0);
1220 return -EINVAL;
1221 }
1222 mp->m_sb.sb_imax_pct = scounter;
1223 return 0;
1224 case XFS_SBS_REXTSIZE:
1225 scounter = mp->m_sb.sb_rextsize;
1226 scounter += delta;
1227 if (scounter < 0) {
1228 ASSERT(0);
1229 return -EINVAL;
1230 }
1231 mp->m_sb.sb_rextsize = scounter;
1232 return 0;
1233 case XFS_SBS_RBMBLOCKS:
1234 scounter = mp->m_sb.sb_rbmblocks;
1235 scounter += delta;
1236 if (scounter < 0) {
1237 ASSERT(0);
1238 return -EINVAL;
1239 }
1240 mp->m_sb.sb_rbmblocks = scounter;
1241 return 0;
1242 case XFS_SBS_RBLOCKS:
1243 lcounter = (long long)mp->m_sb.sb_rblocks;
1244 lcounter += delta;
1245 if (lcounter < 0) {
1246 ASSERT(0);
1247 return -EINVAL;
1248 }
1249 mp->m_sb.sb_rblocks = lcounter;
1250 return 0;
1251 case XFS_SBS_REXTENTS:
1252 lcounter = (long long)mp->m_sb.sb_rextents;
1253 lcounter += delta;
1254 if (lcounter < 0) {
1255 ASSERT(0);
1256 return -EINVAL;
1257 }
1258 mp->m_sb.sb_rextents = lcounter;
1259 return 0;
1260 case XFS_SBS_REXTSLOG:
1261 scounter = mp->m_sb.sb_rextslog;
1262 scounter += delta;
1263 if (scounter < 0) {
1264 ASSERT(0);
1265 return -EINVAL;
1266 }
1267 mp->m_sb.sb_rextslog = scounter;
1268 return 0;
1269 default:
1270 ASSERT(0); 1095 ASSERT(0);
1096 percpu_counter_add(&mp->m_icount, -delta);
1271 return -EINVAL; 1097 return -EINVAL;
1272 } 1098 }
1099 return 0;
1273} 1100}
1274 1101
1275/*
1276 * xfs_mod_incore_sb() is used to change a field in the in-core
1277 * superblock structure by the specified delta. This modification
1278 * is protected by the m_sb_lock. Just use the xfs_mod_incore_sb_unlocked()
1279 * routine to do the work.
1280 */
1281int 1102int
1282xfs_mod_incore_sb( 1103xfs_mod_ifree(
1283 struct xfs_mount *mp, 1104 struct xfs_mount *mp,
1284 xfs_sb_field_t field, 1105 int64_t delta)
1285 int64_t delta,
1286 int rsvd)
1287{ 1106{
1288 int status; 1107 percpu_counter_add(&mp->m_ifree, delta);
1289 1108 if (percpu_counter_compare(&mp->m_ifree, 0) < 0) {
1290#ifdef HAVE_PERCPU_SB 1109 ASSERT(0);
1291 ASSERT(field < XFS_SBS_ICOUNT || field > XFS_SBS_FDBLOCKS); 1110 percpu_counter_add(&mp->m_ifree, -delta);
1292#endif 1111 return -EINVAL;
1293 spin_lock(&mp->m_sb_lock); 1112 }
1294 status = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd); 1113 return 0;
1295 spin_unlock(&mp->m_sb_lock);
1296
1297 return status;
1298} 1114}
1299 1115
1300/*
1301 * Change more than one field in the in-core superblock structure at a time.
1302 *
1303 * The fields and changes to those fields are specified in the array of
1304 * xfs_mod_sb structures passed in. Either all of the specified deltas
1305 * will be applied or none of them will. If any modified field dips below 0,
1306 * then all modifications will be backed out and EINVAL will be returned.
1307 *
1308 * Note that this function may not be used for the superblock values that
1309 * are tracked with the in-memory per-cpu counters - a direct call to
1310 * xfs_icsb_modify_counters is required for these.
1311 */
1312int 1116int
1313xfs_mod_incore_sb_batch( 1117xfs_mod_fdblocks(
1314 struct xfs_mount *mp, 1118 struct xfs_mount *mp,
1315 xfs_mod_sb_t *msb, 1119 int64_t delta,
1316 uint nmsb, 1120 bool rsvd)
1317 int rsvd)
1318{ 1121{
1319 xfs_mod_sb_t *msbp; 1122 int64_t lcounter;
1320 int error = 0; 1123 long long res_used;
1124 s32 batch;
1125
1126 if (delta > 0) {
1127 /*
1128 * If the reserve pool is depleted, put blocks back into it
1129 * first. Most of the time the pool is full.
1130 */
1131 if (likely(mp->m_resblks == mp->m_resblks_avail)) {
1132 percpu_counter_add(&mp->m_fdblocks, delta);
1133 return 0;
1134 }
1135
1136 spin_lock(&mp->m_sb_lock);
1137 res_used = (long long)(mp->m_resblks - mp->m_resblks_avail);
1138
1139 if (res_used > delta) {
1140 mp->m_resblks_avail += delta;
1141 } else {
1142 delta -= res_used;
1143 mp->m_resblks_avail = mp->m_resblks;
1144 percpu_counter_add(&mp->m_fdblocks, delta);
1145 }
1146 spin_unlock(&mp->m_sb_lock);
1147 return 0;
1148 }
1321 1149
1322 /* 1150 /*
1323 * Loop through the array of mod structures and apply each individually. 1151 * Taking blocks away, need to be more accurate the closer we
1324 * If any fail, then back out all those which have already been applied. 1152 * are to zero.
1325 * Do all of this within the scope of the m_sb_lock so that all of the 1153 *
1326 * changes will be atomic. 1154 * batch size is set to a maximum of 1024 blocks - if we are
1155 * allocating of freeing extents larger than this then we aren't
1156 * going to be hammering the counter lock so a lock per update
1157 * is not a problem.
1158 *
1159 * If the counter has a value of less than 2 * max batch size,
1160 * then make everything serialise as we are real close to
1161 * ENOSPC.
1162 */
1163#define __BATCH 1024
1164 if (percpu_counter_compare(&mp->m_fdblocks, 2 * __BATCH) < 0)
1165 batch = 1;
1166 else
1167 batch = __BATCH;
1168#undef __BATCH
1169
1170 __percpu_counter_add(&mp->m_fdblocks, delta, batch);
1171 if (percpu_counter_compare(&mp->m_fdblocks,
1172 XFS_ALLOC_SET_ASIDE(mp)) >= 0) {
1173 /* we had space! */
1174 return 0;
1175 }
1176
1177 /*
1178 * lock up the sb for dipping into reserves before releasing the space
1179 * that took us to ENOSPC.
1327 */ 1180 */
1328 spin_lock(&mp->m_sb_lock); 1181 spin_lock(&mp->m_sb_lock);
1329 for (msbp = msb; msbp < (msb + nmsb); msbp++) { 1182 percpu_counter_add(&mp->m_fdblocks, -delta);
1330 ASSERT(msbp->msb_field < XFS_SBS_ICOUNT || 1183 if (!rsvd)
1331 msbp->msb_field > XFS_SBS_FDBLOCKS); 1184 goto fdblocks_enospc;
1332 1185
1333 error = xfs_mod_incore_sb_unlocked(mp, msbp->msb_field, 1186 lcounter = (long long)mp->m_resblks_avail + delta;
1334 msbp->msb_delta, rsvd); 1187 if (lcounter >= 0) {
1335 if (error) 1188 mp->m_resblks_avail = lcounter;
1336 goto unwind; 1189 spin_unlock(&mp->m_sb_lock);
1190 return 0;
1337 } 1191 }
1192 printk_once(KERN_WARNING
1193 "Filesystem \"%s\": reserve blocks depleted! "
1194 "Consider increasing reserve pool size.",
1195 mp->m_fsname);
1196fdblocks_enospc:
1338 spin_unlock(&mp->m_sb_lock); 1197 spin_unlock(&mp->m_sb_lock);
1339 return 0; 1198 return -ENOSPC;
1199}
1340 1200
1341unwind: 1201int
1342 while (--msbp >= msb) { 1202xfs_mod_frextents(
1343 error = xfs_mod_incore_sb_unlocked(mp, msbp->msb_field, 1203 struct xfs_mount *mp,
1344 -msbp->msb_delta, rsvd); 1204 int64_t delta)
1345 ASSERT(error == 0); 1205{
1346 } 1206 int64_t lcounter;
1207 int ret = 0;
1208
1209 spin_lock(&mp->m_sb_lock);
1210 lcounter = mp->m_sb.sb_frextents + delta;
1211 if (lcounter < 0)
1212 ret = -ENOSPC;
1213 else
1214 mp->m_sb.sb_frextents = lcounter;
1347 spin_unlock(&mp->m_sb_lock); 1215 spin_unlock(&mp->m_sb_lock);
1348 return error; 1216 return ret;
1349} 1217}
1350 1218
1351/* 1219/*
@@ -1407,573 +1275,3 @@ xfs_dev_is_read_only(
1407 } 1275 }
1408 return 0; 1276 return 0;
1409} 1277}
1410
1411#ifdef HAVE_PERCPU_SB
1412/*
1413 * Per-cpu incore superblock counters
1414 *
1415 * Simple concept, difficult implementation
1416 *
1417 * Basically, replace the incore superblock counters with a distributed per cpu
1418 * counter for contended fields (e.g. free block count).
1419 *
1420 * Difficulties arise in that the incore sb is used for ENOSPC checking, and
1421 * hence needs to be accurately read when we are running low on space. Hence
1422 * there is a method to enable and disable the per-cpu counters based on how
1423 * much "stuff" is available in them.
1424 *
1425 * Basically, a counter is enabled if there is enough free resource to justify
1426 * running a per-cpu fast-path. If the per-cpu counter runs out (i.e. a local
1427 * ENOSPC), then we disable the counters to synchronise all callers and
1428 * re-distribute the available resources.
1429 *
1430 * If, once we redistributed the available resources, we still get a failure,
1431 * we disable the per-cpu counter and go through the slow path.
1432 *
1433 * The slow path is the current xfs_mod_incore_sb() function. This means that
1434 * when we disable a per-cpu counter, we need to drain its resources back to
1435 * the global superblock. We do this after disabling the counter to prevent
1436 * more threads from queueing up on the counter.
1437 *
1438 * Essentially, this means that we still need a lock in the fast path to enable
1439 * synchronisation between the global counters and the per-cpu counters. This
1440 * is not a problem because the lock will be local to a CPU almost all the time
1441 * and have little contention except when we get to ENOSPC conditions.
1442 *
1443 * Basically, this lock becomes a barrier that enables us to lock out the fast
1444 * path while we do things like enabling and disabling counters and
1445 * synchronising the counters.
1446 *
1447 * Locking rules:
1448 *
1449 * 1. m_sb_lock before picking up per-cpu locks
1450 * 2. per-cpu locks always picked up via for_each_online_cpu() order
1451 * 3. accurate counter sync requires m_sb_lock + per cpu locks
1452 * 4. modifying per-cpu counters requires holding per-cpu lock
1453 * 5. modifying global counters requires holding m_sb_lock
1454 * 6. enabling or disabling a counter requires holding the m_sb_lock
1455 * and _none_ of the per-cpu locks.
1456 *
1457 * Disabled counters are only ever re-enabled by a balance operation
1458 * that results in more free resources per CPU than a given threshold.
1459 * To ensure counters don't remain disabled, they are rebalanced when
1460 * the global resource goes above a higher threshold (i.e. some hysteresis
1461 * is present to prevent thrashing).
1462 */
1463
1464#ifdef CONFIG_HOTPLUG_CPU
1465/*
1466 * hot-plug CPU notifier support.
1467 *
1468 * We need a notifier per filesystem as we need to be able to identify
1469 * the filesystem to balance the counters out. This is achieved by
1470 * having a notifier block embedded in the xfs_mount_t and doing pointer
1471 * magic to get the mount pointer from the notifier block address.
1472 */
1473STATIC int
1474xfs_icsb_cpu_notify(
1475 struct notifier_block *nfb,
1476 unsigned long action,
1477 void *hcpu)
1478{
1479 xfs_icsb_cnts_t *cntp;
1480 xfs_mount_t *mp;
1481
1482 mp = (xfs_mount_t *)container_of(nfb, xfs_mount_t, m_icsb_notifier);
1483 cntp = (xfs_icsb_cnts_t *)
1484 per_cpu_ptr(mp->m_sb_cnts, (unsigned long)hcpu);
1485 switch (action) {
1486 case CPU_UP_PREPARE:
1487 case CPU_UP_PREPARE_FROZEN:
1488 /* Easy Case - initialize the area and locks, and
1489 * then rebalance when online does everything else for us. */
1490 memset(cntp, 0, sizeof(xfs_icsb_cnts_t));
1491 break;
1492 case CPU_ONLINE:
1493 case CPU_ONLINE_FROZEN:
1494 xfs_icsb_lock(mp);
1495 xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0);
1496 xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0);
1497 xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0);
1498 xfs_icsb_unlock(mp);
1499 break;
1500 case CPU_DEAD:
1501 case CPU_DEAD_FROZEN:
1502 /* Disable all the counters, then fold the dead cpu's
1503 * count into the total on the global superblock and
1504 * re-enable the counters. */
1505 xfs_icsb_lock(mp);
1506 spin_lock(&mp->m_sb_lock);
1507 xfs_icsb_disable_counter(mp, XFS_SBS_ICOUNT);
1508 xfs_icsb_disable_counter(mp, XFS_SBS_IFREE);
1509 xfs_icsb_disable_counter(mp, XFS_SBS_FDBLOCKS);
1510
1511 mp->m_sb.sb_icount += cntp->icsb_icount;
1512 mp->m_sb.sb_ifree += cntp->icsb_ifree;
1513 mp->m_sb.sb_fdblocks += cntp->icsb_fdblocks;
1514
1515 memset(cntp, 0, sizeof(xfs_icsb_cnts_t));
1516
1517 xfs_icsb_balance_counter_locked(mp, XFS_SBS_ICOUNT, 0);
1518 xfs_icsb_balance_counter_locked(mp, XFS_SBS_IFREE, 0);
1519 xfs_icsb_balance_counter_locked(mp, XFS_SBS_FDBLOCKS, 0);
1520 spin_unlock(&mp->m_sb_lock);
1521 xfs_icsb_unlock(mp);
1522 break;
1523 }
1524
1525 return NOTIFY_OK;
1526}
1527#endif /* CONFIG_HOTPLUG_CPU */
1528
1529int
1530xfs_icsb_init_counters(
1531 xfs_mount_t *mp)
1532{
1533 xfs_icsb_cnts_t *cntp;
1534 int i;
1535
1536 mp->m_sb_cnts = alloc_percpu(xfs_icsb_cnts_t);
1537 if (mp->m_sb_cnts == NULL)
1538 return -ENOMEM;
1539
1540 for_each_online_cpu(i) {
1541 cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i);
1542 memset(cntp, 0, sizeof(xfs_icsb_cnts_t));
1543 }
1544
1545 mutex_init(&mp->m_icsb_mutex);
1546
1547 /*
1548 * start with all counters disabled so that the
1549 * initial balance kicks us off correctly
1550 */
1551 mp->m_icsb_counters = -1;
1552
1553#ifdef CONFIG_HOTPLUG_CPU
1554 mp->m_icsb_notifier.notifier_call = xfs_icsb_cpu_notify;
1555 mp->m_icsb_notifier.priority = 0;
1556 register_hotcpu_notifier(&mp->m_icsb_notifier);
1557#endif /* CONFIG_HOTPLUG_CPU */
1558
1559 return 0;
1560}
1561
1562void
1563xfs_icsb_reinit_counters(
1564 xfs_mount_t *mp)
1565{
1566 xfs_icsb_lock(mp);
1567 /*
1568 * start with all counters disabled so that the
1569 * initial balance kicks us off correctly
1570 */
1571 mp->m_icsb_counters = -1;
1572 xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0);
1573 xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0);
1574 xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0);
1575 xfs_icsb_unlock(mp);
1576}
1577
1578void
1579xfs_icsb_destroy_counters(
1580 xfs_mount_t *mp)
1581{
1582 if (mp->m_sb_cnts) {
1583 unregister_hotcpu_notifier(&mp->m_icsb_notifier);
1584 free_percpu(mp->m_sb_cnts);
1585 }
1586 mutex_destroy(&mp->m_icsb_mutex);
1587}
1588
1589STATIC void
1590xfs_icsb_lock_cntr(
1591 xfs_icsb_cnts_t *icsbp)
1592{
1593 while (test_and_set_bit(XFS_ICSB_FLAG_LOCK, &icsbp->icsb_flags)) {
1594 ndelay(1000);
1595 }
1596}
1597
1598STATIC void
1599xfs_icsb_unlock_cntr(
1600 xfs_icsb_cnts_t *icsbp)
1601{
1602 clear_bit(XFS_ICSB_FLAG_LOCK, &icsbp->icsb_flags);
1603}
1604
1605
1606STATIC void
1607xfs_icsb_lock_all_counters(
1608 xfs_mount_t *mp)
1609{
1610 xfs_icsb_cnts_t *cntp;
1611 int i;
1612
1613 for_each_online_cpu(i) {
1614 cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i);
1615 xfs_icsb_lock_cntr(cntp);
1616 }
1617}
1618
1619STATIC void
1620xfs_icsb_unlock_all_counters(
1621 xfs_mount_t *mp)
1622{
1623 xfs_icsb_cnts_t *cntp;
1624 int i;
1625
1626 for_each_online_cpu(i) {
1627 cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i);
1628 xfs_icsb_unlock_cntr(cntp);
1629 }
1630}
1631
1632STATIC void
1633xfs_icsb_count(
1634 xfs_mount_t *mp,
1635 xfs_icsb_cnts_t *cnt,
1636 int flags)
1637{
1638 xfs_icsb_cnts_t *cntp;
1639 int i;
1640
1641 memset(cnt, 0, sizeof(xfs_icsb_cnts_t));
1642
1643 if (!(flags & XFS_ICSB_LAZY_COUNT))
1644 xfs_icsb_lock_all_counters(mp);
1645
1646 for_each_online_cpu(i) {
1647 cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i);
1648 cnt->icsb_icount += cntp->icsb_icount;
1649 cnt->icsb_ifree += cntp->icsb_ifree;
1650 cnt->icsb_fdblocks += cntp->icsb_fdblocks;
1651 }
1652
1653 if (!(flags & XFS_ICSB_LAZY_COUNT))
1654 xfs_icsb_unlock_all_counters(mp);
1655}
1656
1657STATIC int
1658xfs_icsb_counter_disabled(
1659 xfs_mount_t *mp,
1660 xfs_sb_field_t field)
1661{
1662 ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS));
1663 return test_bit(field, &mp->m_icsb_counters);
1664}
1665
1666STATIC void
1667xfs_icsb_disable_counter(
1668 xfs_mount_t *mp,
1669 xfs_sb_field_t field)
1670{
1671 xfs_icsb_cnts_t cnt;
1672
1673 ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS));
1674
1675 /*
1676 * If we are already disabled, then there is nothing to do
1677 * here. We check before locking all the counters to avoid
1678 * the expensive lock operation when being called in the
1679 * slow path and the counter is already disabled. This is
1680 * safe because the only time we set or clear this state is under
1681 * the m_icsb_mutex.
1682 */
1683 if (xfs_icsb_counter_disabled(mp, field))
1684 return;
1685
1686 xfs_icsb_lock_all_counters(mp);
1687 if (!test_and_set_bit(field, &mp->m_icsb_counters)) {
1688 /* drain back to superblock */
1689
1690 xfs_icsb_count(mp, &cnt, XFS_ICSB_LAZY_COUNT);
1691 switch(field) {
1692 case XFS_SBS_ICOUNT:
1693 mp->m_sb.sb_icount = cnt.icsb_icount;
1694 break;
1695 case XFS_SBS_IFREE:
1696 mp->m_sb.sb_ifree = cnt.icsb_ifree;
1697 break;
1698 case XFS_SBS_FDBLOCKS:
1699 mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks;
1700 break;
1701 default:
1702 BUG();
1703 }
1704 }
1705
1706 xfs_icsb_unlock_all_counters(mp);
1707}
1708
1709STATIC void
1710xfs_icsb_enable_counter(
1711 xfs_mount_t *mp,
1712 xfs_sb_field_t field,
1713 uint64_t count,
1714 uint64_t resid)
1715{
1716 xfs_icsb_cnts_t *cntp;
1717 int i;
1718
1719 ASSERT((field >= XFS_SBS_ICOUNT) && (field <= XFS_SBS_FDBLOCKS));
1720
1721 xfs_icsb_lock_all_counters(mp);
1722 for_each_online_cpu(i) {
1723 cntp = per_cpu_ptr(mp->m_sb_cnts, i);
1724 switch (field) {
1725 case XFS_SBS_ICOUNT:
1726 cntp->icsb_icount = count + resid;
1727 break;
1728 case XFS_SBS_IFREE:
1729 cntp->icsb_ifree = count + resid;
1730 break;
1731 case XFS_SBS_FDBLOCKS:
1732 cntp->icsb_fdblocks = count + resid;
1733 break;
1734 default:
1735 BUG();
1736 break;
1737 }
1738 resid = 0;
1739 }
1740 clear_bit(field, &mp->m_icsb_counters);
1741 xfs_icsb_unlock_all_counters(mp);
1742}
1743
1744void
1745xfs_icsb_sync_counters_locked(
1746 xfs_mount_t *mp,
1747 int flags)
1748{
1749 xfs_icsb_cnts_t cnt;
1750
1751 xfs_icsb_count(mp, &cnt, flags);
1752
1753 if (!xfs_icsb_counter_disabled(mp, XFS_SBS_ICOUNT))
1754 mp->m_sb.sb_icount = cnt.icsb_icount;
1755 if (!xfs_icsb_counter_disabled(mp, XFS_SBS_IFREE))
1756 mp->m_sb.sb_ifree = cnt.icsb_ifree;
1757 if (!xfs_icsb_counter_disabled(mp, XFS_SBS_FDBLOCKS))
1758 mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks;
1759}
1760
1761/*
1762 * Accurate update of per-cpu counters to incore superblock
1763 */
1764void
1765xfs_icsb_sync_counters(
1766 xfs_mount_t *mp,
1767 int flags)
1768{
1769 spin_lock(&mp->m_sb_lock);
1770 xfs_icsb_sync_counters_locked(mp, flags);
1771 spin_unlock(&mp->m_sb_lock);
1772}
1773
1774/*
1775 * Balance and enable/disable counters as necessary.
1776 *
1777 * Thresholds for re-enabling counters are somewhat magic. inode counts are
1778 * chosen to be the same number as single on disk allocation chunk per CPU, and
1779 * free blocks is something far enough zero that we aren't going thrash when we
1780 * get near ENOSPC. We also need to supply a minimum we require per cpu to
1781 * prevent looping endlessly when xfs_alloc_space asks for more than will
1782 * be distributed to a single CPU but each CPU has enough blocks to be
1783 * reenabled.
1784 *
1785 * Note that we can be called when counters are already disabled.
1786 * xfs_icsb_disable_counter() optimises the counter locking in this case to
1787 * prevent locking every per-cpu counter needlessly.
1788 */
1789
1790#define XFS_ICSB_INO_CNTR_REENABLE (uint64_t)64
1791#define XFS_ICSB_FDBLK_CNTR_REENABLE(mp) \
1792 (uint64_t)(512 + XFS_ALLOC_SET_ASIDE(mp))
1793STATIC void
1794xfs_icsb_balance_counter_locked(
1795 xfs_mount_t *mp,
1796 xfs_sb_field_t field,
1797 int min_per_cpu)
1798{
1799 uint64_t count, resid;
1800 int weight = num_online_cpus();
1801 uint64_t min = (uint64_t)min_per_cpu;
1802
1803 /* disable counter and sync counter */
1804 xfs_icsb_disable_counter(mp, field);
1805
1806 /* update counters - first CPU gets residual*/
1807 switch (field) {
1808 case XFS_SBS_ICOUNT:
1809 count = mp->m_sb.sb_icount;
1810 resid = do_div(count, weight);
1811 if (count < max(min, XFS_ICSB_INO_CNTR_REENABLE))
1812 return;
1813 break;
1814 case XFS_SBS_IFREE:
1815 count = mp->m_sb.sb_ifree;
1816 resid = do_div(count, weight);
1817 if (count < max(min, XFS_ICSB_INO_CNTR_REENABLE))
1818 return;
1819 break;
1820 case XFS_SBS_FDBLOCKS:
1821 count = mp->m_sb.sb_fdblocks;
1822 resid = do_div(count, weight);
1823 if (count < max(min, XFS_ICSB_FDBLK_CNTR_REENABLE(mp)))
1824 return;
1825 break;
1826 default:
1827 BUG();
1828 count = resid = 0; /* quiet, gcc */
1829 break;
1830 }
1831
1832 xfs_icsb_enable_counter(mp, field, count, resid);
1833}
1834
1835STATIC void
1836xfs_icsb_balance_counter(
1837 xfs_mount_t *mp,
1838 xfs_sb_field_t fields,
1839 int min_per_cpu)
1840{
1841 spin_lock(&mp->m_sb_lock);
1842 xfs_icsb_balance_counter_locked(mp, fields, min_per_cpu);
1843 spin_unlock(&mp->m_sb_lock);
1844}
1845
1846int
1847xfs_icsb_modify_counters(
1848 xfs_mount_t *mp,
1849 xfs_sb_field_t field,
1850 int64_t delta,
1851 int rsvd)
1852{
1853 xfs_icsb_cnts_t *icsbp;
1854 long long lcounter; /* long counter for 64 bit fields */
1855 int ret = 0;
1856
1857 might_sleep();
1858again:
1859 preempt_disable();
1860 icsbp = this_cpu_ptr(mp->m_sb_cnts);
1861
1862 /*
1863 * if the counter is disabled, go to slow path
1864 */
1865 if (unlikely(xfs_icsb_counter_disabled(mp, field)))
1866 goto slow_path;
1867 xfs_icsb_lock_cntr(icsbp);
1868 if (unlikely(xfs_icsb_counter_disabled(mp, field))) {
1869 xfs_icsb_unlock_cntr(icsbp);
1870 goto slow_path;
1871 }
1872
1873 switch (field) {
1874 case XFS_SBS_ICOUNT:
1875 lcounter = icsbp->icsb_icount;
1876 lcounter += delta;
1877 if (unlikely(lcounter < 0))
1878 goto balance_counter;
1879 icsbp->icsb_icount = lcounter;
1880 break;
1881
1882 case XFS_SBS_IFREE:
1883 lcounter = icsbp->icsb_ifree;
1884 lcounter += delta;
1885 if (unlikely(lcounter < 0))
1886 goto balance_counter;
1887 icsbp->icsb_ifree = lcounter;
1888 break;
1889
1890 case XFS_SBS_FDBLOCKS:
1891 BUG_ON((mp->m_resblks - mp->m_resblks_avail) != 0);
1892
1893 lcounter = icsbp->icsb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
1894 lcounter += delta;
1895 if (unlikely(lcounter < 0))
1896 goto balance_counter;
1897 icsbp->icsb_fdblocks = lcounter + XFS_ALLOC_SET_ASIDE(mp);
1898 break;
1899 default:
1900 BUG();
1901 break;
1902 }
1903 xfs_icsb_unlock_cntr(icsbp);
1904 preempt_enable();
1905 return 0;
1906
1907slow_path:
1908 preempt_enable();
1909
1910 /*
1911 * serialise with a mutex so we don't burn lots of cpu on
1912 * the superblock lock. We still need to hold the superblock
1913 * lock, however, when we modify the global structures.
1914 */
1915 xfs_icsb_lock(mp);
1916
1917 /*
1918 * Now running atomically.
1919 *
1920 * If the counter is enabled, someone has beaten us to rebalancing.
1921 * Drop the lock and try again in the fast path....
1922 */
1923 if (!(xfs_icsb_counter_disabled(mp, field))) {
1924 xfs_icsb_unlock(mp);
1925 goto again;
1926 }
1927
1928 /*
1929 * The counter is currently disabled. Because we are
1930 * running atomically here, we know a rebalance cannot
1931 * be in progress. Hence we can go straight to operating
1932 * on the global superblock. We do not call xfs_mod_incore_sb()
1933 * here even though we need to get the m_sb_lock. Doing so
1934 * will cause us to re-enter this function and deadlock.
1935 * Hence we get the m_sb_lock ourselves and then call
1936 * xfs_mod_incore_sb_unlocked() as the unlocked path operates
1937 * directly on the global counters.
1938 */
1939 spin_lock(&mp->m_sb_lock);
1940 ret = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd);
1941 spin_unlock(&mp->m_sb_lock);
1942
1943 /*
1944 * Now that we've modified the global superblock, we
1945 * may be able to re-enable the distributed counters
1946 * (e.g. lots of space just got freed). After that
1947 * we are done.
1948 */
1949 if (ret != -ENOSPC)
1950 xfs_icsb_balance_counter(mp, field, 0);
1951 xfs_icsb_unlock(mp);
1952 return ret;
1953
1954balance_counter:
1955 xfs_icsb_unlock_cntr(icsbp);
1956 preempt_enable();
1957
1958 /*
1959 * We may have multiple threads here if multiple per-cpu
1960 * counters run dry at the same time. This will mean we can
1961 * do more balances than strictly necessary but it is not
1962 * the common slowpath case.
1963 */
1964 xfs_icsb_lock(mp);
1965
1966 /*
1967 * running atomically.
1968 *
1969 * This will leave the counter in the correct state for future
1970 * accesses. After the rebalance, we simply try again and our retry
1971 * will either succeed through the fast path or slow path without
1972 * another balance operation being required.
1973 */
1974 xfs_icsb_balance_counter(mp, field, delta);
1975 xfs_icsb_unlock(mp);
1976 goto again;
1977}
1978
1979#endif
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 0d8abd6364d9..8c995a2ccb6f 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -18,8 +18,6 @@
18#ifndef __XFS_MOUNT_H__ 18#ifndef __XFS_MOUNT_H__
19#define __XFS_MOUNT_H__ 19#define __XFS_MOUNT_H__
20 20
21#ifdef __KERNEL__
22
23struct xlog; 21struct xlog;
24struct xfs_inode; 22struct xfs_inode;
25struct xfs_mru_cache; 23struct xfs_mru_cache;
@@ -29,44 +27,6 @@ struct xfs_quotainfo;
29struct xfs_dir_ops; 27struct xfs_dir_ops;
30struct xfs_da_geometry; 28struct xfs_da_geometry;
31 29
32#ifdef HAVE_PERCPU_SB
33
34/*
35 * Valid per-cpu incore superblock counters. Note that if you add new counters,
36 * you may need to define new counter disabled bit field descriptors as there
37 * are more possible fields in the superblock that can fit in a bitfield on a
38 * 32 bit platform. The XFS_SBS_* values for the current current counters just
39 * fit.
40 */
41typedef struct xfs_icsb_cnts {
42 uint64_t icsb_fdblocks;
43 uint64_t icsb_ifree;
44 uint64_t icsb_icount;
45 unsigned long icsb_flags;
46} xfs_icsb_cnts_t;
47
48#define XFS_ICSB_FLAG_LOCK (1 << 0) /* counter lock bit */
49
50#define XFS_ICSB_LAZY_COUNT (1 << 1) /* accuracy not needed */
51
52extern int xfs_icsb_init_counters(struct xfs_mount *);
53extern void xfs_icsb_reinit_counters(struct xfs_mount *);
54extern void xfs_icsb_destroy_counters(struct xfs_mount *);
55extern void xfs_icsb_sync_counters(struct xfs_mount *, int);
56extern void xfs_icsb_sync_counters_locked(struct xfs_mount *, int);
57extern int xfs_icsb_modify_counters(struct xfs_mount *, xfs_sb_field_t,
58 int64_t, int);
59
60#else
61#define xfs_icsb_init_counters(mp) (0)
62#define xfs_icsb_destroy_counters(mp) do { } while (0)
63#define xfs_icsb_reinit_counters(mp) do { } while (0)
64#define xfs_icsb_sync_counters(mp, flags) do { } while (0)
65#define xfs_icsb_sync_counters_locked(mp, flags) do { } while (0)
66#define xfs_icsb_modify_counters(mp, field, delta, rsvd) \
67 xfs_mod_incore_sb(mp, field, delta, rsvd)
68#endif
69
70/* dynamic preallocation free space thresholds, 5% down to 1% */ 30/* dynamic preallocation free space thresholds, 5% down to 1% */
71enum { 31enum {
72 XFS_LOWSP_1_PCNT = 0, 32 XFS_LOWSP_1_PCNT = 0,
@@ -81,8 +41,13 @@ typedef struct xfs_mount {
81 struct super_block *m_super; 41 struct super_block *m_super;
82 xfs_tid_t m_tid; /* next unused tid for fs */ 42 xfs_tid_t m_tid; /* next unused tid for fs */
83 struct xfs_ail *m_ail; /* fs active log item list */ 43 struct xfs_ail *m_ail; /* fs active log item list */
84 xfs_sb_t m_sb; /* copy of fs superblock */ 44
45 struct xfs_sb m_sb; /* copy of fs superblock */
85 spinlock_t m_sb_lock; /* sb counter lock */ 46 spinlock_t m_sb_lock; /* sb counter lock */
47 struct percpu_counter m_icount; /* allocated inodes counter */
48 struct percpu_counter m_ifree; /* free inodes counter */
49 struct percpu_counter m_fdblocks; /* free block counter */
50
86 struct xfs_buf *m_sb_bp; /* buffer for superblock */ 51 struct xfs_buf *m_sb_bp; /* buffer for superblock */
87 char *m_fsname; /* filesystem name */ 52 char *m_fsname; /* filesystem name */
88 int m_fsname_len; /* strlen of fs name */ 53 int m_fsname_len; /* strlen of fs name */
@@ -152,12 +117,6 @@ typedef struct xfs_mount {
152 const struct xfs_dir_ops *m_nondir_inode_ops; /* !dir inode ops */ 117 const struct xfs_dir_ops *m_nondir_inode_ops; /* !dir inode ops */
153 uint m_chsize; /* size of next field */ 118 uint m_chsize; /* size of next field */
154 atomic_t m_active_trans; /* number trans frozen */ 119 atomic_t m_active_trans; /* number trans frozen */
155#ifdef HAVE_PERCPU_SB
156 xfs_icsb_cnts_t __percpu *m_sb_cnts; /* per-cpu superblock counters */
157 unsigned long m_icsb_counters; /* disabled per-cpu counters */
158 struct notifier_block m_icsb_notifier; /* hotplug cpu notifier */
159 struct mutex m_icsb_mutex; /* balancer sync lock */
160#endif
161 struct xfs_mru_cache *m_filestream; /* per-mount filestream data */ 120 struct xfs_mru_cache *m_filestream; /* per-mount filestream data */
162 struct delayed_work m_reclaim_work; /* background inode reclaim */ 121 struct delayed_work m_reclaim_work; /* background inode reclaim */
163 struct delayed_work m_eofblocks_work; /* background eof blocks 122 struct delayed_work m_eofblocks_work; /* background eof blocks
@@ -301,35 +260,6 @@ xfs_daddr_to_agbno(struct xfs_mount *mp, xfs_daddr_t d)
301} 260}
302 261
303/* 262/*
304 * Per-cpu superblock locking functions
305 */
306#ifdef HAVE_PERCPU_SB
307static inline void
308xfs_icsb_lock(xfs_mount_t *mp)
309{
310 mutex_lock(&mp->m_icsb_mutex);
311}
312
313static inline void
314xfs_icsb_unlock(xfs_mount_t *mp)
315{
316 mutex_unlock(&mp->m_icsb_mutex);
317}
318#else
319#define xfs_icsb_lock(mp)
320#define xfs_icsb_unlock(mp)
321#endif
322
323/*
324 * This structure is for use by the xfs_mod_incore_sb_batch() routine.
325 * xfs_growfs can specify a few fields which are more than int limit
326 */
327typedef struct xfs_mod_sb {
328 xfs_sb_field_t msb_field; /* Field to modify, see below */
329 int64_t msb_delta; /* Change to make to specified field */
330} xfs_mod_sb_t;
331
332/*
333 * Per-ag incore structure, copies of information in agf and agi, to improve the 263 * Per-ag incore structure, copies of information in agf and agi, to improve the
334 * performance of allocation group selection. 264 * performance of allocation group selection.
335 */ 265 */
@@ -383,11 +313,14 @@ extern __uint64_t xfs_default_resblks(xfs_mount_t *mp);
383extern int xfs_mountfs(xfs_mount_t *mp); 313extern int xfs_mountfs(xfs_mount_t *mp);
384extern int xfs_initialize_perag(xfs_mount_t *mp, xfs_agnumber_t agcount, 314extern int xfs_initialize_perag(xfs_mount_t *mp, xfs_agnumber_t agcount,
385 xfs_agnumber_t *maxagi); 315 xfs_agnumber_t *maxagi);
386
387extern void xfs_unmountfs(xfs_mount_t *); 316extern void xfs_unmountfs(xfs_mount_t *);
388extern int xfs_mod_incore_sb(xfs_mount_t *, xfs_sb_field_t, int64_t, int); 317
389extern int xfs_mod_incore_sb_batch(xfs_mount_t *, xfs_mod_sb_t *, 318extern int xfs_mod_icount(struct xfs_mount *mp, int64_t delta);
390 uint, int); 319extern int xfs_mod_ifree(struct xfs_mount *mp, int64_t delta);
320extern int xfs_mod_fdblocks(struct xfs_mount *mp, int64_t delta,
321 bool reserved);
322extern int xfs_mod_frextents(struct xfs_mount *mp, int64_t delta);
323
391extern int xfs_mount_log_sb(xfs_mount_t *); 324extern int xfs_mount_log_sb(xfs_mount_t *);
392extern struct xfs_buf *xfs_getsb(xfs_mount_t *, int); 325extern struct xfs_buf *xfs_getsb(xfs_mount_t *, int);
393extern int xfs_readsb(xfs_mount_t *, int); 326extern int xfs_readsb(xfs_mount_t *, int);
@@ -399,6 +332,4 @@ extern int xfs_dev_is_read_only(struct xfs_mount *, char *);
399 332
400extern void xfs_set_low_space_thresholds(struct xfs_mount *); 333extern void xfs_set_low_space_thresholds(struct xfs_mount *);
401 334
402#endif /* __KERNEL__ */
403
404#endif /* __XFS_MOUNT_H__ */ 335#endif /* __XFS_MOUNT_H__ */
diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c
index 4b33ef112400..365dd57ea760 100644
--- a/fs/xfs/xfs_pnfs.c
+++ b/fs/xfs/xfs_pnfs.c
@@ -300,8 +300,10 @@ xfs_fs_commit_blocks(
300 300
301 tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE); 301 tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE);
302 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0); 302 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0);
303 if (error) 303 if (error) {
304 xfs_trans_cancel(tp, 0);
304 goto out_drop_iolock; 305 goto out_drop_iolock;
306 }
305 307
306 xfs_ilock(ip, XFS_ILOCK_EXCL); 308 xfs_ilock(ip, XFS_ILOCK_EXCL);
307 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 309 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 53cc2aaf8d2b..5538468c7f63 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -719,6 +719,7 @@ xfs_qm_qino_alloc(
719 xfs_trans_t *tp; 719 xfs_trans_t *tp;
720 int error; 720 int error;
721 int committed; 721 int committed;
722 bool need_alloc = true;
722 723
723 *ip = NULL; 724 *ip = NULL;
724 /* 725 /*
@@ -747,6 +748,7 @@ xfs_qm_qino_alloc(
747 return error; 748 return error;
748 mp->m_sb.sb_gquotino = NULLFSINO; 749 mp->m_sb.sb_gquotino = NULLFSINO;
749 mp->m_sb.sb_pquotino = NULLFSINO; 750 mp->m_sb.sb_pquotino = NULLFSINO;
751 need_alloc = false;
750 } 752 }
751 } 753 }
752 754
@@ -758,7 +760,7 @@ xfs_qm_qino_alloc(
758 return error; 760 return error;
759 } 761 }
760 762
761 if (!*ip) { 763 if (need_alloc) {
762 error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, 1, ip, 764 error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, 1, ip,
763 &committed); 765 &committed);
764 if (error) { 766 if (error) {
@@ -794,11 +796,14 @@ xfs_qm_qino_alloc(
794 spin_unlock(&mp->m_sb_lock); 796 spin_unlock(&mp->m_sb_lock);
795 xfs_log_sb(tp); 797 xfs_log_sb(tp);
796 798
797 if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES))) { 799 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
800 if (error) {
801 ASSERT(XFS_FORCED_SHUTDOWN(mp));
798 xfs_alert(mp, "%s failed (error %d)!", __func__, error); 802 xfs_alert(mp, "%s failed (error %d)!", __func__, error);
799 return error;
800 } 803 }
801 return 0; 804 if (need_alloc)
805 xfs_finish_inode_setup(*ip);
806 return error;
802} 807}
803 808
804 809
@@ -836,6 +841,11 @@ xfs_qm_reset_dqcounts(
836 */ 841 */
837 xfs_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR, 842 xfs_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR,
838 "xfs_quotacheck"); 843 "xfs_quotacheck");
844 /*
845 * Reset type in case we are reusing group quota file for
846 * project quotas or vice versa
847 */
848 ddq->d_flags = type;
839 ddq->d_bcount = 0; 849 ddq->d_bcount = 0;
840 ddq->d_icount = 0; 850 ddq->d_icount = 0;
841 ddq->d_rtbcount = 0; 851 ddq->d_rtbcount = 0;
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 50c6fd3bd362..3ad0b17885f1 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -109,8 +109,6 @@ static struct xfs_kobj xfs_dbg_kobj; /* global debug sysfs attrs */
109#define MNTOPT_GQUOTANOENF "gqnoenforce"/* group quota limit enforcement */ 109#define MNTOPT_GQUOTANOENF "gqnoenforce"/* group quota limit enforcement */
110#define MNTOPT_PQUOTANOENF "pqnoenforce"/* project quota limit enforcement */ 110#define MNTOPT_PQUOTANOENF "pqnoenforce"/* project quota limit enforcement */
111#define MNTOPT_QUOTANOENF "qnoenforce" /* same as uqnoenforce */ 111#define MNTOPT_QUOTANOENF "qnoenforce" /* same as uqnoenforce */
112#define MNTOPT_DELAYLOG "delaylog" /* Delayed logging enabled */
113#define MNTOPT_NODELAYLOG "nodelaylog" /* Delayed logging disabled */
114#define MNTOPT_DISCARD "discard" /* Discard unused blocks */ 112#define MNTOPT_DISCARD "discard" /* Discard unused blocks */
115#define MNTOPT_NODISCARD "nodiscard" /* Do not discard unused blocks */ 113#define MNTOPT_NODISCARD "nodiscard" /* Do not discard unused blocks */
116 114
@@ -361,28 +359,10 @@ xfs_parseargs(
361 } else if (!strcmp(this_char, MNTOPT_GQUOTANOENF)) { 359 } else if (!strcmp(this_char, MNTOPT_GQUOTANOENF)) {
362 mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE); 360 mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
363 mp->m_qflags &= ~XFS_GQUOTA_ENFD; 361 mp->m_qflags &= ~XFS_GQUOTA_ENFD;
364 } else if (!strcmp(this_char, MNTOPT_DELAYLOG)) {
365 xfs_warn(mp,
366 "delaylog is the default now, option is deprecated.");
367 } else if (!strcmp(this_char, MNTOPT_NODELAYLOG)) {
368 xfs_warn(mp,
369 "nodelaylog support has been removed, option is deprecated.");
370 } else if (!strcmp(this_char, MNTOPT_DISCARD)) { 362 } else if (!strcmp(this_char, MNTOPT_DISCARD)) {
371 mp->m_flags |= XFS_MOUNT_DISCARD; 363 mp->m_flags |= XFS_MOUNT_DISCARD;
372 } else if (!strcmp(this_char, MNTOPT_NODISCARD)) { 364 } else if (!strcmp(this_char, MNTOPT_NODISCARD)) {
373 mp->m_flags &= ~XFS_MOUNT_DISCARD; 365 mp->m_flags &= ~XFS_MOUNT_DISCARD;
374 } else if (!strcmp(this_char, "ihashsize")) {
375 xfs_warn(mp,
376 "ihashsize no longer used, option is deprecated.");
377 } else if (!strcmp(this_char, "osyncisdsync")) {
378 xfs_warn(mp,
379 "osyncisdsync has no effect, option is deprecated.");
380 } else if (!strcmp(this_char, "osyncisosync")) {
381 xfs_warn(mp,
382 "osyncisosync has no effect, option is deprecated.");
383 } else if (!strcmp(this_char, "irixsgid")) {
384 xfs_warn(mp,
385 "irixsgid is now a sysctl(2) variable, option is deprecated.");
386 } else { 366 } else {
387 xfs_warn(mp, "unknown mount option [%s].", this_char); 367 xfs_warn(mp, "unknown mount option [%s].", this_char);
388 return -EINVAL; 368 return -EINVAL;
@@ -1035,23 +1015,6 @@ xfs_free_fsname(
1035 kfree(mp->m_logname); 1015 kfree(mp->m_logname);
1036} 1016}
1037 1017
1038STATIC void
1039xfs_fs_put_super(
1040 struct super_block *sb)
1041{
1042 struct xfs_mount *mp = XFS_M(sb);
1043
1044 xfs_filestream_unmount(mp);
1045 xfs_unmountfs(mp);
1046
1047 xfs_freesb(mp);
1048 xfs_icsb_destroy_counters(mp);
1049 xfs_destroy_mount_workqueues(mp);
1050 xfs_close_devices(mp);
1051 xfs_free_fsname(mp);
1052 kfree(mp);
1053}
1054
1055STATIC int 1018STATIC int
1056xfs_fs_sync_fs( 1019xfs_fs_sync_fs(
1057 struct super_block *sb, 1020 struct super_block *sb,
@@ -1087,6 +1050,9 @@ xfs_fs_statfs(
1087 xfs_sb_t *sbp = &mp->m_sb; 1050 xfs_sb_t *sbp = &mp->m_sb;
1088 struct xfs_inode *ip = XFS_I(dentry->d_inode); 1051 struct xfs_inode *ip = XFS_I(dentry->d_inode);
1089 __uint64_t fakeinos, id; 1052 __uint64_t fakeinos, id;
1053 __uint64_t icount;
1054 __uint64_t ifree;
1055 __uint64_t fdblocks;
1090 xfs_extlen_t lsize; 1056 xfs_extlen_t lsize;
1091 __int64_t ffree; 1057 __int64_t ffree;
1092 1058
@@ -1097,17 +1063,21 @@ xfs_fs_statfs(
1097 statp->f_fsid.val[0] = (u32)id; 1063 statp->f_fsid.val[0] = (u32)id;
1098 statp->f_fsid.val[1] = (u32)(id >> 32); 1064 statp->f_fsid.val[1] = (u32)(id >> 32);
1099 1065
1100 xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT); 1066 icount = percpu_counter_sum(&mp->m_icount);
1067 ifree = percpu_counter_sum(&mp->m_ifree);
1068 fdblocks = percpu_counter_sum(&mp->m_fdblocks);
1101 1069
1102 spin_lock(&mp->m_sb_lock); 1070 spin_lock(&mp->m_sb_lock);
1103 statp->f_bsize = sbp->sb_blocksize; 1071 statp->f_bsize = sbp->sb_blocksize;
1104 lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0; 1072 lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
1105 statp->f_blocks = sbp->sb_dblocks - lsize; 1073 statp->f_blocks = sbp->sb_dblocks - lsize;
1106 statp->f_bfree = statp->f_bavail = 1074 spin_unlock(&mp->m_sb_lock);
1107 sbp->sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); 1075
1076 statp->f_bfree = fdblocks - XFS_ALLOC_SET_ASIDE(mp);
1077 statp->f_bavail = statp->f_bfree;
1078
1108 fakeinos = statp->f_bfree << sbp->sb_inopblog; 1079 fakeinos = statp->f_bfree << sbp->sb_inopblog;
1109 statp->f_files = 1080 statp->f_files = MIN(icount + fakeinos, (__uint64_t)XFS_MAXINUMBER);
1110 MIN(sbp->sb_icount + fakeinos, (__uint64_t)XFS_MAXINUMBER);
1111 if (mp->m_maxicount) 1081 if (mp->m_maxicount)
1112 statp->f_files = min_t(typeof(statp->f_files), 1082 statp->f_files = min_t(typeof(statp->f_files),
1113 statp->f_files, 1083 statp->f_files,
@@ -1119,10 +1089,9 @@ xfs_fs_statfs(
1119 sbp->sb_icount); 1089 sbp->sb_icount);
1120 1090
1121 /* make sure statp->f_ffree does not underflow */ 1091 /* make sure statp->f_ffree does not underflow */
1122 ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree); 1092 ffree = statp->f_files - (icount - ifree);
1123 statp->f_ffree = max_t(__int64_t, ffree, 0); 1093 statp->f_ffree = max_t(__int64_t, ffree, 0);
1124 1094
1125 spin_unlock(&mp->m_sb_lock);
1126 1095
1127 if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) && 1096 if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
1128 ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) == 1097 ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) ==
@@ -1403,6 +1372,51 @@ xfs_finish_flags(
1403 return 0; 1372 return 0;
1404} 1373}
1405 1374
1375static int
1376xfs_init_percpu_counters(
1377 struct xfs_mount *mp)
1378{
1379 int error;
1380
1381 error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL);
1382 if (error)
1383 return ENOMEM;
1384
1385 error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL);
1386 if (error)
1387 goto free_icount;
1388
1389 error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL);
1390 if (error)
1391 goto free_ifree;
1392
1393 return 0;
1394
1395free_ifree:
1396 percpu_counter_destroy(&mp->m_ifree);
1397free_icount:
1398 percpu_counter_destroy(&mp->m_icount);
1399 return -ENOMEM;
1400}
1401
1402void
1403xfs_reinit_percpu_counters(
1404 struct xfs_mount *mp)
1405{
1406 percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount);
1407 percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree);
1408 percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks);
1409}
1410
1411static void
1412xfs_destroy_percpu_counters(
1413 struct xfs_mount *mp)
1414{
1415 percpu_counter_destroy(&mp->m_icount);
1416 percpu_counter_destroy(&mp->m_ifree);
1417 percpu_counter_destroy(&mp->m_fdblocks);
1418}
1419
1406STATIC int 1420STATIC int
1407xfs_fs_fill_super( 1421xfs_fs_fill_super(
1408 struct super_block *sb, 1422 struct super_block *sb,
@@ -1451,7 +1465,7 @@ xfs_fs_fill_super(
1451 if (error) 1465 if (error)
1452 goto out_close_devices; 1466 goto out_close_devices;
1453 1467
1454 error = xfs_icsb_init_counters(mp); 1468 error = xfs_init_percpu_counters(mp);
1455 if (error) 1469 if (error)
1456 goto out_destroy_workqueues; 1470 goto out_destroy_workqueues;
1457 1471
@@ -1509,7 +1523,7 @@ xfs_fs_fill_super(
1509 out_free_sb: 1523 out_free_sb:
1510 xfs_freesb(mp); 1524 xfs_freesb(mp);
1511 out_destroy_counters: 1525 out_destroy_counters:
1512 xfs_icsb_destroy_counters(mp); 1526 xfs_destroy_percpu_counters(mp);
1513out_destroy_workqueues: 1527out_destroy_workqueues:
1514 xfs_destroy_mount_workqueues(mp); 1528 xfs_destroy_mount_workqueues(mp);
1515 out_close_devices: 1529 out_close_devices:
@@ -1526,6 +1540,24 @@ out_destroy_workqueues:
1526 goto out_free_sb; 1540 goto out_free_sb;
1527} 1541}
1528 1542
1543STATIC void
1544xfs_fs_put_super(
1545 struct super_block *sb)
1546{
1547 struct xfs_mount *mp = XFS_M(sb);
1548
1549 xfs_notice(mp, "Unmounting Filesystem");
1550 xfs_filestream_unmount(mp);
1551 xfs_unmountfs(mp);
1552
1553 xfs_freesb(mp);
1554 xfs_destroy_percpu_counters(mp);
1555 xfs_destroy_mount_workqueues(mp);
1556 xfs_close_devices(mp);
1557 xfs_free_fsname(mp);
1558 kfree(mp);
1559}
1560
1529STATIC struct dentry * 1561STATIC struct dentry *
1530xfs_fs_mount( 1562xfs_fs_mount(
1531 struct file_system_type *fs_type, 1563 struct file_system_type *fs_type,
diff --git a/fs/xfs/xfs_super.h b/fs/xfs/xfs_super.h
index 2b830c2f322e..499058fea303 100644
--- a/fs/xfs/xfs_super.h
+++ b/fs/xfs/xfs_super.h
@@ -72,6 +72,8 @@ extern const struct export_operations xfs_export_operations;
72extern const struct xattr_handler *xfs_xattr_handlers[]; 72extern const struct xattr_handler *xfs_xattr_handlers[];
73extern const struct quotactl_ops xfs_quotactl_operations; 73extern const struct quotactl_ops xfs_quotactl_operations;
74 74
75extern void xfs_reinit_percpu_counters(struct xfs_mount *mp);
76
75#define XFS_M(sb) ((struct xfs_mount *)((sb)->s_fs_info)) 77#define XFS_M(sb) ((struct xfs_mount *)((sb)->s_fs_info))
76 78
77#endif /* __XFS_SUPER_H__ */ 79#endif /* __XFS_SUPER_H__ */
diff --git a/fs/xfs/xfs_symlink.c b/fs/xfs/xfs_symlink.c
index 25791df6f638..3df411eadb86 100644
--- a/fs/xfs/xfs_symlink.c
+++ b/fs/xfs/xfs_symlink.c
@@ -177,7 +177,7 @@ xfs_symlink(
177 int pathlen; 177 int pathlen;
178 struct xfs_bmap_free free_list; 178 struct xfs_bmap_free free_list;
179 xfs_fsblock_t first_block; 179 xfs_fsblock_t first_block;
180 bool unlock_dp_on_error = false; 180 bool unlock_dp_on_error = false;
181 uint cancel_flags; 181 uint cancel_flags;
182 int committed; 182 int committed;
183 xfs_fileoff_t first_fsb; 183 xfs_fileoff_t first_fsb;
@@ -221,7 +221,7 @@ xfs_symlink(
221 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, 221 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
222 &udqp, &gdqp, &pdqp); 222 &udqp, &gdqp, &pdqp);
223 if (error) 223 if (error)
224 goto std_return; 224 return error;
225 225
226 tp = xfs_trans_alloc(mp, XFS_TRANS_SYMLINK); 226 tp = xfs_trans_alloc(mp, XFS_TRANS_SYMLINK);
227 cancel_flags = XFS_TRANS_RELEASE_LOG_RES; 227 cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
@@ -241,7 +241,7 @@ xfs_symlink(
241 } 241 }
242 if (error) { 242 if (error) {
243 cancel_flags = 0; 243 cancel_flags = 0;
244 goto error_return; 244 goto out_trans_cancel;
245 } 245 }
246 246
247 xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT); 247 xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
@@ -252,7 +252,7 @@ xfs_symlink(
252 */ 252 */
253 if (dp->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) { 253 if (dp->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) {
254 error = -EPERM; 254 error = -EPERM;
255 goto error_return; 255 goto out_trans_cancel;
256 } 256 }
257 257
258 /* 258 /*
@@ -261,7 +261,7 @@ xfs_symlink(
261 error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp, 261 error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
262 pdqp, resblks, 1, 0); 262 pdqp, resblks, 1, 0);
263 if (error) 263 if (error)
264 goto error_return; 264 goto out_trans_cancel;
265 265
266 /* 266 /*
267 * Check for ability to enter directory entry, if no space reserved. 267 * Check for ability to enter directory entry, if no space reserved.
@@ -269,7 +269,7 @@ xfs_symlink(
269 if (!resblks) { 269 if (!resblks) {
270 error = xfs_dir_canenter(tp, dp, link_name); 270 error = xfs_dir_canenter(tp, dp, link_name);
271 if (error) 271 if (error)
272 goto error_return; 272 goto out_trans_cancel;
273 } 273 }
274 /* 274 /*
275 * Initialize the bmap freelist prior to calling either 275 * Initialize the bmap freelist prior to calling either
@@ -282,15 +282,14 @@ xfs_symlink(
282 */ 282 */
283 error = xfs_dir_ialloc(&tp, dp, S_IFLNK | (mode & ~S_IFMT), 1, 0, 283 error = xfs_dir_ialloc(&tp, dp, S_IFLNK | (mode & ~S_IFMT), 1, 0,
284 prid, resblks > 0, &ip, NULL); 284 prid, resblks > 0, &ip, NULL);
285 if (error) { 285 if (error)
286 if (error == -ENOSPC) 286 goto out_trans_cancel;
287 goto error_return;
288 goto error1;
289 }
290 287
291 /* 288 /*
292 * An error after we've joined dp to the transaction will result in the 289 * Now we join the directory inode to the transaction. We do not do it
293 * transaction cancel unlocking dp so don't do it explicitly in the 290 * earlier because xfs_dir_ialloc might commit the previous transaction
291 * (and release all the locks). An error from here on will result in
292 * the transaction cancel unlocking dp so don't do it explicitly in the
294 * error path. 293 * error path.
295 */ 294 */
296 xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL); 295 xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
@@ -330,7 +329,7 @@ xfs_symlink(
330 XFS_BMAPI_METADATA, &first_block, resblks, 329 XFS_BMAPI_METADATA, &first_block, resblks,
331 mval, &nmaps, &free_list); 330 mval, &nmaps, &free_list);
332 if (error) 331 if (error)
333 goto error2; 332 goto out_bmap_cancel;
334 333
335 if (resblks) 334 if (resblks)
336 resblks -= fs_blocks; 335 resblks -= fs_blocks;
@@ -348,7 +347,7 @@ xfs_symlink(
348 BTOBB(byte_cnt), 0); 347 BTOBB(byte_cnt), 0);
349 if (!bp) { 348 if (!bp) {
350 error = -ENOMEM; 349 error = -ENOMEM;
351 goto error2; 350 goto out_bmap_cancel;
352 } 351 }
353 bp->b_ops = &xfs_symlink_buf_ops; 352 bp->b_ops = &xfs_symlink_buf_ops;
354 353
@@ -378,7 +377,7 @@ xfs_symlink(
378 error = xfs_dir_createname(tp, dp, link_name, ip->i_ino, 377 error = xfs_dir_createname(tp, dp, link_name, ip->i_ino,
379 &first_block, &free_list, resblks); 378 &first_block, &free_list, resblks);
380 if (error) 379 if (error)
381 goto error2; 380 goto out_bmap_cancel;
382 xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 381 xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
383 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); 382 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
384 383
@@ -392,10 +391,13 @@ xfs_symlink(
392 } 391 }
393 392
394 error = xfs_bmap_finish(&tp, &free_list, &committed); 393 error = xfs_bmap_finish(&tp, &free_list, &committed);
395 if (error) { 394 if (error)
396 goto error2; 395 goto out_bmap_cancel;
397 } 396
398 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); 397 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
398 if (error)
399 goto out_release_inode;
400
399 xfs_qm_dqrele(udqp); 401 xfs_qm_dqrele(udqp);
400 xfs_qm_dqrele(gdqp); 402 xfs_qm_dqrele(gdqp);
401 xfs_qm_dqrele(pdqp); 403 xfs_qm_dqrele(pdqp);
@@ -403,20 +405,28 @@ xfs_symlink(
403 *ipp = ip; 405 *ipp = ip;
404 return 0; 406 return 0;
405 407
406 error2: 408out_bmap_cancel:
407 IRELE(ip);
408 error1:
409 xfs_bmap_cancel(&free_list); 409 xfs_bmap_cancel(&free_list);
410 cancel_flags |= XFS_TRANS_ABORT; 410 cancel_flags |= XFS_TRANS_ABORT;
411 error_return: 411out_trans_cancel:
412 xfs_trans_cancel(tp, cancel_flags); 412 xfs_trans_cancel(tp, cancel_flags);
413out_release_inode:
414 /*
415 * Wait until after the current transaction is aborted to finish the
416 * setup of the inode and release the inode. This prevents recursive
417 * transactions and deadlocks from xfs_inactive.
418 */
419 if (ip) {
420 xfs_finish_inode_setup(ip);
421 IRELE(ip);
422 }
423
413 xfs_qm_dqrele(udqp); 424 xfs_qm_dqrele(udqp);
414 xfs_qm_dqrele(gdqp); 425 xfs_qm_dqrele(gdqp);
415 xfs_qm_dqrele(pdqp); 426 xfs_qm_dqrele(pdqp);
416 427
417 if (unlock_dp_on_error) 428 if (unlock_dp_on_error)
418 xfs_iunlock(dp, XFS_ILOCK_EXCL); 429 xfs_iunlock(dp, XFS_ILOCK_EXCL);
419 std_return:
420 return error; 430 return error;
421} 431}
422 432
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index eb90cd59a0ec..220ef2c906b2 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -173,7 +173,7 @@ xfs_trans_reserve(
173 uint rtextents) 173 uint rtextents)
174{ 174{
175 int error = 0; 175 int error = 0;
176 int rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0; 176 bool rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
177 177
178 /* Mark this thread as being in a transaction */ 178 /* Mark this thread as being in a transaction */
179 current_set_flags_nested(&tp->t_pflags, PF_FSTRANS); 179 current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
@@ -184,8 +184,7 @@ xfs_trans_reserve(
184 * fail if the count would go below zero. 184 * fail if the count would go below zero.
185 */ 185 */
186 if (blocks > 0) { 186 if (blocks > 0) {
187 error = xfs_icsb_modify_counters(tp->t_mountp, XFS_SBS_FDBLOCKS, 187 error = xfs_mod_fdblocks(tp->t_mountp, -((int64_t)blocks), rsvd);
188 -((int64_t)blocks), rsvd);
189 if (error != 0) { 188 if (error != 0) {
190 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS); 189 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
191 return -ENOSPC; 190 return -ENOSPC;
@@ -236,8 +235,7 @@ xfs_trans_reserve(
236 * fail if the count would go below zero. 235 * fail if the count would go below zero.
237 */ 236 */
238 if (rtextents > 0) { 237 if (rtextents > 0) {
239 error = xfs_mod_incore_sb(tp->t_mountp, XFS_SBS_FREXTENTS, 238 error = xfs_mod_frextents(tp->t_mountp, -((int64_t)rtextents));
240 -((int64_t)rtextents), rsvd);
241 if (error) { 239 if (error) {
242 error = -ENOSPC; 240 error = -ENOSPC;
243 goto undo_log; 241 goto undo_log;
@@ -268,8 +266,7 @@ undo_log:
268 266
269undo_blocks: 267undo_blocks:
270 if (blocks > 0) { 268 if (blocks > 0) {
271 xfs_icsb_modify_counters(tp->t_mountp, XFS_SBS_FDBLOCKS, 269 xfs_mod_fdblocks(tp->t_mountp, -((int64_t)blocks), rsvd);
272 (int64_t)blocks, rsvd);
273 tp->t_blk_res = 0; 270 tp->t_blk_res = 0;
274 } 271 }
275 272
@@ -488,6 +485,54 @@ xfs_trans_apply_sb_deltas(
488 sizeof(sbp->sb_frextents) - 1); 485 sizeof(sbp->sb_frextents) - 1);
489} 486}
490 487
488STATIC int
489xfs_sb_mod8(
490 uint8_t *field,
491 int8_t delta)
492{
493 int8_t counter = *field;
494
495 counter += delta;
496 if (counter < 0) {
497 ASSERT(0);
498 return -EINVAL;
499 }
500 *field = counter;
501 return 0;
502}
503
504STATIC int
505xfs_sb_mod32(
506 uint32_t *field,
507 int32_t delta)
508{
509 int32_t counter = *field;
510
511 counter += delta;
512 if (counter < 0) {
513 ASSERT(0);
514 return -EINVAL;
515 }
516 *field = counter;
517 return 0;
518}
519
520STATIC int
521xfs_sb_mod64(
522 uint64_t *field,
523 int64_t delta)
524{
525 int64_t counter = *field;
526
527 counter += delta;
528 if (counter < 0) {
529 ASSERT(0);
530 return -EINVAL;
531 }
532 *field = counter;
533 return 0;
534}
535
491/* 536/*
492 * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations 537 * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations
493 * and apply superblock counter changes to the in-core superblock. The 538 * and apply superblock counter changes to the in-core superblock. The
@@ -495,13 +540,6 @@ xfs_trans_apply_sb_deltas(
495 * applied to the in-core superblock. The idea is that that has already been 540 * applied to the in-core superblock. The idea is that that has already been
496 * done. 541 * done.
497 * 542 *
498 * This is done efficiently with a single call to xfs_mod_incore_sb_batch().
499 * However, we have to ensure that we only modify each superblock field only
500 * once because the application of the delta values may not be atomic. That can
501 * lead to ENOSPC races occurring if we have two separate modifcations of the
502 * free space counter to put back the entire reservation and then take away
503 * what we used.
504 *
505 * If we are not logging superblock counters, then the inode allocated/free and 543 * If we are not logging superblock counters, then the inode allocated/free and
506 * used block counts are not updated in the on disk superblock. In this case, 544 * used block counts are not updated in the on disk superblock. In this case,
507 * XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we 545 * XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we
@@ -509,21 +547,15 @@ xfs_trans_apply_sb_deltas(
509 */ 547 */
510void 548void
511xfs_trans_unreserve_and_mod_sb( 549xfs_trans_unreserve_and_mod_sb(
512 xfs_trans_t *tp) 550 struct xfs_trans *tp)
513{ 551{
514 xfs_mod_sb_t msb[9]; /* If you add cases, add entries */ 552 struct xfs_mount *mp = tp->t_mountp;
515 xfs_mod_sb_t *msbp; 553 bool rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
516 xfs_mount_t *mp = tp->t_mountp; 554 int64_t blkdelta = 0;
517 /* REFERENCED */ 555 int64_t rtxdelta = 0;
518 int error; 556 int64_t idelta = 0;
519 int rsvd; 557 int64_t ifreedelta = 0;
520 int64_t blkdelta = 0; 558 int error;
521 int64_t rtxdelta = 0;
522 int64_t idelta = 0;
523 int64_t ifreedelta = 0;
524
525 msbp = msb;
526 rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
527 559
528 /* calculate deltas */ 560 /* calculate deltas */
529 if (tp->t_blk_res > 0) 561 if (tp->t_blk_res > 0)
@@ -547,97 +579,115 @@ xfs_trans_unreserve_and_mod_sb(
547 579
548 /* apply the per-cpu counters */ 580 /* apply the per-cpu counters */
549 if (blkdelta) { 581 if (blkdelta) {
550 error = xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, 582 error = xfs_mod_fdblocks(mp, blkdelta, rsvd);
551 blkdelta, rsvd);
552 if (error) 583 if (error)
553 goto out; 584 goto out;
554 } 585 }
555 586
556 if (idelta) { 587 if (idelta) {
557 error = xfs_icsb_modify_counters(mp, XFS_SBS_ICOUNT, 588 error = xfs_mod_icount(mp, idelta);
558 idelta, rsvd);
559 if (error) 589 if (error)
560 goto out_undo_fdblocks; 590 goto out_undo_fdblocks;
561 } 591 }
562 592
563 if (ifreedelta) { 593 if (ifreedelta) {
564 error = xfs_icsb_modify_counters(mp, XFS_SBS_IFREE, 594 error = xfs_mod_ifree(mp, ifreedelta);
565 ifreedelta, rsvd);
566 if (error) 595 if (error)
567 goto out_undo_icount; 596 goto out_undo_icount;
568 } 597 }
569 598
599 if (rtxdelta == 0 && !(tp->t_flags & XFS_TRANS_SB_DIRTY))
600 return;
601
570 /* apply remaining deltas */ 602 /* apply remaining deltas */
571 if (rtxdelta != 0) { 603 spin_lock(&mp->m_sb_lock);
572 msbp->msb_field = XFS_SBS_FREXTENTS; 604 if (rtxdelta) {
573 msbp->msb_delta = rtxdelta; 605 error = xfs_sb_mod64(&mp->m_sb.sb_frextents, rtxdelta);
574 msbp++; 606 if (error)
607 goto out_undo_ifree;
575 } 608 }
576 609
577 if (tp->t_flags & XFS_TRANS_SB_DIRTY) { 610 if (tp->t_dblocks_delta != 0) {
578 if (tp->t_dblocks_delta != 0) { 611 error = xfs_sb_mod64(&mp->m_sb.sb_dblocks, tp->t_dblocks_delta);
579 msbp->msb_field = XFS_SBS_DBLOCKS; 612 if (error)
580 msbp->msb_delta = tp->t_dblocks_delta; 613 goto out_undo_frextents;
581 msbp++;
582 }
583 if (tp->t_agcount_delta != 0) {
584 msbp->msb_field = XFS_SBS_AGCOUNT;
585 msbp->msb_delta = tp->t_agcount_delta;
586 msbp++;
587 }
588 if (tp->t_imaxpct_delta != 0) {
589 msbp->msb_field = XFS_SBS_IMAX_PCT;
590 msbp->msb_delta = tp->t_imaxpct_delta;
591 msbp++;
592 }
593 if (tp->t_rextsize_delta != 0) {
594 msbp->msb_field = XFS_SBS_REXTSIZE;
595 msbp->msb_delta = tp->t_rextsize_delta;
596 msbp++;
597 }
598 if (tp->t_rbmblocks_delta != 0) {
599 msbp->msb_field = XFS_SBS_RBMBLOCKS;
600 msbp->msb_delta = tp->t_rbmblocks_delta;
601 msbp++;
602 }
603 if (tp->t_rblocks_delta != 0) {
604 msbp->msb_field = XFS_SBS_RBLOCKS;
605 msbp->msb_delta = tp->t_rblocks_delta;
606 msbp++;
607 }
608 if (tp->t_rextents_delta != 0) {
609 msbp->msb_field = XFS_SBS_REXTENTS;
610 msbp->msb_delta = tp->t_rextents_delta;
611 msbp++;
612 }
613 if (tp->t_rextslog_delta != 0) {
614 msbp->msb_field = XFS_SBS_REXTSLOG;
615 msbp->msb_delta = tp->t_rextslog_delta;
616 msbp++;
617 }
618 } 614 }
619 615 if (tp->t_agcount_delta != 0) {
620 /* 616 error = xfs_sb_mod32(&mp->m_sb.sb_agcount, tp->t_agcount_delta);
621 * If we need to change anything, do it.
622 */
623 if (msbp > msb) {
624 error = xfs_mod_incore_sb_batch(tp->t_mountp, msb,
625 (uint)(msbp - msb), rsvd);
626 if (error) 617 if (error)
627 goto out_undo_ifreecount; 618 goto out_undo_dblocks;
628 } 619 }
629 620 if (tp->t_imaxpct_delta != 0) {
621 error = xfs_sb_mod8(&mp->m_sb.sb_imax_pct, tp->t_imaxpct_delta);
622 if (error)
623 goto out_undo_agcount;
624 }
625 if (tp->t_rextsize_delta != 0) {
626 error = xfs_sb_mod32(&mp->m_sb.sb_rextsize,
627 tp->t_rextsize_delta);
628 if (error)
629 goto out_undo_imaxpct;
630 }
631 if (tp->t_rbmblocks_delta != 0) {
632 error = xfs_sb_mod32(&mp->m_sb.sb_rbmblocks,
633 tp->t_rbmblocks_delta);
634 if (error)
635 goto out_undo_rextsize;
636 }
637 if (tp->t_rblocks_delta != 0) {
638 error = xfs_sb_mod64(&mp->m_sb.sb_rblocks, tp->t_rblocks_delta);
639 if (error)
640 goto out_undo_rbmblocks;
641 }
642 if (tp->t_rextents_delta != 0) {
643 error = xfs_sb_mod64(&mp->m_sb.sb_rextents,
644 tp->t_rextents_delta);
645 if (error)
646 goto out_undo_rblocks;
647 }
648 if (tp->t_rextslog_delta != 0) {
649 error = xfs_sb_mod8(&mp->m_sb.sb_rextslog,
650 tp->t_rextslog_delta);
651 if (error)
652 goto out_undo_rextents;
653 }
654 spin_unlock(&mp->m_sb_lock);
630 return; 655 return;
631 656
632out_undo_ifreecount: 657out_undo_rextents:
658 if (tp->t_rextents_delta)
659 xfs_sb_mod64(&mp->m_sb.sb_rextents, -tp->t_rextents_delta);
660out_undo_rblocks:
661 if (tp->t_rblocks_delta)
662 xfs_sb_mod64(&mp->m_sb.sb_rblocks, -tp->t_rblocks_delta);
663out_undo_rbmblocks:
664 if (tp->t_rbmblocks_delta)
665 xfs_sb_mod32(&mp->m_sb.sb_rbmblocks, -tp->t_rbmblocks_delta);
666out_undo_rextsize:
667 if (tp->t_rextsize_delta)
668 xfs_sb_mod32(&mp->m_sb.sb_rextsize, -tp->t_rextsize_delta);
669out_undo_imaxpct:
670 if (tp->t_rextsize_delta)
671 xfs_sb_mod8(&mp->m_sb.sb_imax_pct, -tp->t_imaxpct_delta);
672out_undo_agcount:
673 if (tp->t_agcount_delta)
674 xfs_sb_mod32(&mp->m_sb.sb_agcount, -tp->t_agcount_delta);
675out_undo_dblocks:
676 if (tp->t_dblocks_delta)
677 xfs_sb_mod64(&mp->m_sb.sb_dblocks, -tp->t_dblocks_delta);
678out_undo_frextents:
679 if (rtxdelta)
680 xfs_sb_mod64(&mp->m_sb.sb_frextents, -rtxdelta);
681out_undo_ifree:
682 spin_unlock(&mp->m_sb_lock);
633 if (ifreedelta) 683 if (ifreedelta)
634 xfs_icsb_modify_counters(mp, XFS_SBS_IFREE, -ifreedelta, rsvd); 684 xfs_mod_ifree(mp, -ifreedelta);
635out_undo_icount: 685out_undo_icount:
636 if (idelta) 686 if (idelta)
637 xfs_icsb_modify_counters(mp, XFS_SBS_ICOUNT, -idelta, rsvd); 687 xfs_mod_icount(mp, -idelta);
638out_undo_fdblocks: 688out_undo_fdblocks:
639 if (blkdelta) 689 if (blkdelta)
640 xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, -blkdelta, rsvd); 690 xfs_mod_fdblocks(mp, -blkdelta, rsvd);
641out: 691out:
642 ASSERT(error == 0); 692 ASSERT(error == 0);
643 return; 693 return;