aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@infradead.org>2011-09-18 16:41:04 -0400
committerAlex Elder <aelder@sgi.com>2011-10-11 22:15:07 -0400
commit572a4cf04ac7f46e9206aabfef03dae602812341 (patch)
treec548aecf511c86a4d8b47ef12c6a44a8d416aa5c /fs/xfs
parentc315c90b7d530d1ec3c226052e153b0cffa512c8 (diff)
xfs: pass bmalloca to xfs_bmap_add_extent_delay_real
All the parameters passed to xfs_bmap_add_extent_delay_real() are in the xfs_bmalloca structure now. Just pass the bmalloca parameter to the function instead of 8 separate parameters. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Alex Elder <aelder@sgi.com>
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/xfs_bmap.c332
1 files changed, 171 insertions, 161 deletions
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index ed094749851b..c8b9c4ec9f6f 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -375,16 +375,9 @@ xfs_bmap_add_attrfork_local(
375 */ 375 */
376STATIC int /* error */ 376STATIC int /* error */
377xfs_bmap_add_extent_delay_real( 377xfs_bmap_add_extent_delay_real(
378 struct xfs_trans *tp, /* transaction pointer */ 378 struct xfs_bmalloca *bma)
379 xfs_inode_t *ip, /* incore inode pointer */
380 xfs_extnum_t *idx, /* extent number to update/insert */
381 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
382 xfs_bmbt_irec_t *new, /* new data to add to file extents */
383 xfs_fsblock_t *first, /* pointer to firstblock variable */
384 xfs_bmap_free_t *flist, /* list of extents to be freed */
385 int *logflagsp) /* inode logging flags */
386{ 379{
387 xfs_btree_cur_t *cur; /* btree cursor */ 380 struct xfs_bmbt_irec *new = &bma->got;
388 int diff; /* temp value */ 381 int diff; /* temp value */
389 xfs_bmbt_rec_host_t *ep; /* extent entry for idx */ 382 xfs_bmbt_rec_host_t *ep; /* extent entry for idx */
390 int error; /* error return value */ 383 int error; /* error return value */
@@ -401,18 +394,16 @@ xfs_bmap_add_extent_delay_real(
401 xfs_filblks_t temp2=0;/* value for da_new calculations */ 394 xfs_filblks_t temp2=0;/* value for da_new calculations */
402 int tmp_rval; /* partial logging flags */ 395 int tmp_rval; /* partial logging flags */
403 396
404 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); 397 ifp = XFS_IFORK_PTR(bma->ip, XFS_DATA_FORK);
405 cur = *curp;
406 398
407 ASSERT(*idx >= 0); 399 ASSERT(bma->idx >= 0);
408 ASSERT(*idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec)); 400 ASSERT(bma->idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec));
409 ASSERT(!isnullstartblock(new->br_startblock)); 401 ASSERT(!isnullstartblock(new->br_startblock));
410 ASSERT(!cur || (cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL)); 402 ASSERT(!bma->cur ||
403 (bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
411 404
412 XFS_STATS_INC(xs_add_exlist); 405 XFS_STATS_INC(xs_add_exlist);
413 406
414 *logflagsp = 0;
415
416#define LEFT r[0] 407#define LEFT r[0]
417#define RIGHT r[1] 408#define RIGHT r[1]
418#define PREV r[2] 409#define PREV r[2]
@@ -420,7 +411,7 @@ xfs_bmap_add_extent_delay_real(
420 /* 411 /*
421 * Set up a bunch of variables to make the tests simpler. 412 * Set up a bunch of variables to make the tests simpler.
422 */ 413 */
423 ep = xfs_iext_get_ext(ifp, *idx); 414 ep = xfs_iext_get_ext(ifp, bma->idx);
424 xfs_bmbt_get_all(ep, &PREV); 415 xfs_bmbt_get_all(ep, &PREV);
425 new_endoff = new->br_startoff + new->br_blockcount; 416 new_endoff = new->br_startoff + new->br_blockcount;
426 ASSERT(PREV.br_startoff <= new->br_startoff); 417 ASSERT(PREV.br_startoff <= new->br_startoff);
@@ -442,9 +433,9 @@ xfs_bmap_add_extent_delay_real(
442 * Check and set flags if this segment has a left neighbor. 433 * Check and set flags if this segment has a left neighbor.
443 * Don't set contiguous if the combined extent would be too large. 434 * Don't set contiguous if the combined extent would be too large.
444 */ 435 */
445 if (*idx > 0) { 436 if (bma->idx > 0) {
446 state |= BMAP_LEFT_VALID; 437 state |= BMAP_LEFT_VALID;
447 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &LEFT); 438 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1), &LEFT);
448 439
449 if (isnullstartblock(LEFT.br_startblock)) 440 if (isnullstartblock(LEFT.br_startblock))
450 state |= BMAP_LEFT_DELAY; 441 state |= BMAP_LEFT_DELAY;
@@ -462,9 +453,9 @@ xfs_bmap_add_extent_delay_real(
462 * Don't set contiguous if the combined extent would be too large. 453 * Don't set contiguous if the combined extent would be too large.
463 * Also check for all-three-contiguous being too large. 454 * Also check for all-three-contiguous being too large.
464 */ 455 */
465 if (*idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) { 456 if (bma->idx < bma->ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) {
466 state |= BMAP_RIGHT_VALID; 457 state |= BMAP_RIGHT_VALID;
467 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx + 1), &RIGHT); 458 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx + 1), &RIGHT);
468 459
469 if (isnullstartblock(RIGHT.br_startblock)) 460 if (isnullstartblock(RIGHT.br_startblock))
470 state |= BMAP_RIGHT_DELAY; 461 state |= BMAP_RIGHT_DELAY;
@@ -495,35 +486,39 @@ xfs_bmap_add_extent_delay_real(
495 * Filling in all of a previously delayed allocation extent. 486 * Filling in all of a previously delayed allocation extent.
496 * The left and right neighbors are both contiguous with new. 487 * The left and right neighbors are both contiguous with new.
497 */ 488 */
498 --*idx; 489 bma->idx--;
499 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 490 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
500 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), 491 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
501 LEFT.br_blockcount + PREV.br_blockcount + 492 LEFT.br_blockcount + PREV.br_blockcount +
502 RIGHT.br_blockcount); 493 RIGHT.br_blockcount);
503 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 494 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
504 495
505 xfs_iext_remove(ip, *idx + 1, 2, state); 496 xfs_iext_remove(bma->ip, bma->idx + 1, 2, state);
506 ip->i_d.di_nextents--; 497 bma->ip->i_d.di_nextents--;
507 if (cur == NULL) 498 if (bma->cur == NULL)
508 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 499 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
509 else { 500 else {
510 rval = XFS_ILOG_CORE; 501 rval = XFS_ILOG_CORE;
511 if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff, 502 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
512 RIGHT.br_startblock, 503 RIGHT.br_startblock,
513 RIGHT.br_blockcount, &i))) 504 RIGHT.br_blockcount, &i);
505 if (error)
514 goto done; 506 goto done;
515 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 507 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
516 if ((error = xfs_btree_delete(cur, &i))) 508 error = xfs_btree_delete(bma->cur, &i);
509 if (error)
517 goto done; 510 goto done;
518 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 511 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
519 if ((error = xfs_btree_decrement(cur, 0, &i))) 512 error = xfs_btree_decrement(bma->cur, 0, &i);
513 if (error)
520 goto done; 514 goto done;
521 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 515 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
522 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff, 516 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
523 LEFT.br_startblock, 517 LEFT.br_startblock,
524 LEFT.br_blockcount + 518 LEFT.br_blockcount +
525 PREV.br_blockcount + 519 PREV.br_blockcount +
526 RIGHT.br_blockcount, LEFT.br_state))) 520 RIGHT.br_blockcount, LEFT.br_state);
521 if (error)
527 goto done; 522 goto done;
528 } 523 }
529 break; 524 break;
@@ -533,27 +528,29 @@ xfs_bmap_add_extent_delay_real(
533 * Filling in all of a previously delayed allocation extent. 528 * Filling in all of a previously delayed allocation extent.
534 * The left neighbor is contiguous, the right is not. 529 * The left neighbor is contiguous, the right is not.
535 */ 530 */
536 --*idx; 531 bma->idx--;
537 532
538 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 533 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
539 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), 534 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
540 LEFT.br_blockcount + PREV.br_blockcount); 535 LEFT.br_blockcount + PREV.br_blockcount);
541 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 536 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
542 537
543 xfs_iext_remove(ip, *idx + 1, 1, state); 538 xfs_iext_remove(bma->ip, bma->idx + 1, 1, state);
544 if (cur == NULL) 539 if (bma->cur == NULL)
545 rval = XFS_ILOG_DEXT; 540 rval = XFS_ILOG_DEXT;
546 else { 541 else {
547 rval = 0; 542 rval = 0;
548 if ((error = xfs_bmbt_lookup_eq(cur, LEFT.br_startoff, 543 error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff,
549 LEFT.br_startblock, LEFT.br_blockcount, 544 LEFT.br_startblock, LEFT.br_blockcount,
550 &i))) 545 &i);
546 if (error)
551 goto done; 547 goto done;
552 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 548 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
553 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff, 549 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
554 LEFT.br_startblock, 550 LEFT.br_startblock,
555 LEFT.br_blockcount + 551 LEFT.br_blockcount +
556 PREV.br_blockcount, LEFT.br_state))) 552 PREV.br_blockcount, LEFT.br_state);
553 if (error)
557 goto done; 554 goto done;
558 } 555 }
559 break; 556 break;
@@ -563,26 +560,28 @@ xfs_bmap_add_extent_delay_real(
563 * Filling in all of a previously delayed allocation extent. 560 * Filling in all of a previously delayed allocation extent.
564 * The right neighbor is contiguous, the left is not. 561 * The right neighbor is contiguous, the left is not.
565 */ 562 */
566 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 563 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
567 xfs_bmbt_set_startblock(ep, new->br_startblock); 564 xfs_bmbt_set_startblock(ep, new->br_startblock);
568 xfs_bmbt_set_blockcount(ep, 565 xfs_bmbt_set_blockcount(ep,
569 PREV.br_blockcount + RIGHT.br_blockcount); 566 PREV.br_blockcount + RIGHT.br_blockcount);
570 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 567 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
571 568
572 xfs_iext_remove(ip, *idx + 1, 1, state); 569 xfs_iext_remove(bma->ip, bma->idx + 1, 1, state);
573 if (cur == NULL) 570 if (bma->cur == NULL)
574 rval = XFS_ILOG_DEXT; 571 rval = XFS_ILOG_DEXT;
575 else { 572 else {
576 rval = 0; 573 rval = 0;
577 if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff, 574 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
578 RIGHT.br_startblock, 575 RIGHT.br_startblock,
579 RIGHT.br_blockcount, &i))) 576 RIGHT.br_blockcount, &i);
577 if (error)
580 goto done; 578 goto done;
581 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 579 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
582 if ((error = xfs_bmbt_update(cur, PREV.br_startoff, 580 error = xfs_bmbt_update(bma->cur, PREV.br_startoff,
583 new->br_startblock, 581 new->br_startblock,
584 PREV.br_blockcount + 582 PREV.br_blockcount +
585 RIGHT.br_blockcount, PREV.br_state))) 583 RIGHT.br_blockcount, PREV.br_state);
584 if (error)
586 goto done; 585 goto done;
587 } 586 }
588 break; 587 break;
@@ -593,22 +592,24 @@ xfs_bmap_add_extent_delay_real(
593 * Neither the left nor right neighbors are contiguous with 592 * Neither the left nor right neighbors are contiguous with
594 * the new one. 593 * the new one.
595 */ 594 */
596 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 595 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
597 xfs_bmbt_set_startblock(ep, new->br_startblock); 596 xfs_bmbt_set_startblock(ep, new->br_startblock);
598 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 597 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
599 598
600 ip->i_d.di_nextents++; 599 bma->ip->i_d.di_nextents++;
601 if (cur == NULL) 600 if (bma->cur == NULL)
602 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 601 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
603 else { 602 else {
604 rval = XFS_ILOG_CORE; 603 rval = XFS_ILOG_CORE;
605 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff, 604 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
606 new->br_startblock, new->br_blockcount, 605 new->br_startblock, new->br_blockcount,
607 &i))) 606 &i);
607 if (error)
608 goto done; 608 goto done;
609 XFS_WANT_CORRUPTED_GOTO(i == 0, done); 609 XFS_WANT_CORRUPTED_GOTO(i == 0, done);
610 cur->bc_rec.b.br_state = XFS_EXT_NORM; 610 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
611 if ((error = xfs_btree_insert(cur, &i))) 611 error = xfs_btree_insert(bma->cur, &i);
612 if (error)
612 goto done; 613 goto done;
613 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 614 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
614 } 615 }
@@ -619,38 +620,40 @@ xfs_bmap_add_extent_delay_real(
619 * Filling in the first part of a previous delayed allocation. 620 * Filling in the first part of a previous delayed allocation.
620 * The left neighbor is contiguous. 621 * The left neighbor is contiguous.
621 */ 622 */
622 trace_xfs_bmap_pre_update(ip, *idx - 1, state, _THIS_IP_); 623 trace_xfs_bmap_pre_update(bma->ip, bma->idx - 1, state, _THIS_IP_);
623 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx - 1), 624 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx - 1),
624 LEFT.br_blockcount + new->br_blockcount); 625 LEFT.br_blockcount + new->br_blockcount);
625 xfs_bmbt_set_startoff(ep, 626 xfs_bmbt_set_startoff(ep,
626 PREV.br_startoff + new->br_blockcount); 627 PREV.br_startoff + new->br_blockcount);
627 trace_xfs_bmap_post_update(ip, *idx - 1, state, _THIS_IP_); 628 trace_xfs_bmap_post_update(bma->ip, bma->idx - 1, state, _THIS_IP_);
628 629
629 temp = PREV.br_blockcount - new->br_blockcount; 630 temp = PREV.br_blockcount - new->br_blockcount;
630 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 631 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
631 xfs_bmbt_set_blockcount(ep, temp); 632 xfs_bmbt_set_blockcount(ep, temp);
632 if (cur == NULL) 633 if (bma->cur == NULL)
633 rval = XFS_ILOG_DEXT; 634 rval = XFS_ILOG_DEXT;
634 else { 635 else {
635 rval = 0; 636 rval = 0;
636 if ((error = xfs_bmbt_lookup_eq(cur, LEFT.br_startoff, 637 error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff,
637 LEFT.br_startblock, LEFT.br_blockcount, 638 LEFT.br_startblock, LEFT.br_blockcount,
638 &i))) 639 &i);
640 if (error)
639 goto done; 641 goto done;
640 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 642 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
641 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff, 643 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
642 LEFT.br_startblock, 644 LEFT.br_startblock,
643 LEFT.br_blockcount + 645 LEFT.br_blockcount +
644 new->br_blockcount, 646 new->br_blockcount,
645 LEFT.br_state))) 647 LEFT.br_state);
648 if (error)
646 goto done; 649 goto done;
647 } 650 }
648 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 651 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
649 startblockval(PREV.br_startblock)); 652 startblockval(PREV.br_startblock));
650 xfs_bmbt_set_startblock(ep, nullstartblock(da_new)); 653 xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
651 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 654 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
652 655
653 --*idx; 656 bma->idx--;
654 break; 657 break;
655 658
656 case BMAP_LEFT_FILLING: 659 case BMAP_LEFT_FILLING:
@@ -658,41 +661,43 @@ xfs_bmap_add_extent_delay_real(
658 * Filling in the first part of a previous delayed allocation. 661 * Filling in the first part of a previous delayed allocation.
659 * The left neighbor is not contiguous. 662 * The left neighbor is not contiguous.
660 */ 663 */
661 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 664 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
662 xfs_bmbt_set_startoff(ep, new_endoff); 665 xfs_bmbt_set_startoff(ep, new_endoff);
663 temp = PREV.br_blockcount - new->br_blockcount; 666 temp = PREV.br_blockcount - new->br_blockcount;
664 xfs_bmbt_set_blockcount(ep, temp); 667 xfs_bmbt_set_blockcount(ep, temp);
665 xfs_iext_insert(ip, *idx, 1, new, state); 668 xfs_iext_insert(bma->ip, bma->idx, 1, new, state);
666 ip->i_d.di_nextents++; 669 bma->ip->i_d.di_nextents++;
667 if (cur == NULL) 670 if (bma->cur == NULL)
668 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 671 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
669 else { 672 else {
670 rval = XFS_ILOG_CORE; 673 rval = XFS_ILOG_CORE;
671 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff, 674 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
672 new->br_startblock, new->br_blockcount, 675 new->br_startblock, new->br_blockcount,
673 &i))) 676 &i);
677 if (error)
674 goto done; 678 goto done;
675 XFS_WANT_CORRUPTED_GOTO(i == 0, done); 679 XFS_WANT_CORRUPTED_GOTO(i == 0, done);
676 cur->bc_rec.b.br_state = XFS_EXT_NORM; 680 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
677 if ((error = xfs_btree_insert(cur, &i))) 681 error = xfs_btree_insert(bma->cur, &i);
682 if (error)
678 goto done; 683 goto done;
679 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 684 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
680 } 685 }
681 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS && 686 if (bma->ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
682 ip->i_d.di_nextents > ip->i_df.if_ext_max) { 687 bma->ip->i_d.di_nextents > bma->ip->i_df.if_ext_max) {
683 error = xfs_bmap_extents_to_btree(tp, ip, 688 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
684 first, flist, &cur, 1, &tmp_rval, 689 bma->firstblock, bma->flist,
685 XFS_DATA_FORK); 690 &bma->cur, 1, &tmp_rval, XFS_DATA_FORK);
686 rval |= tmp_rval; 691 rval |= tmp_rval;
687 if (error) 692 if (error)
688 goto done; 693 goto done;
689 } 694 }
690 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 695 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
691 startblockval(PREV.br_startblock) - 696 startblockval(PREV.br_startblock) -
692 (cur ? cur->bc_private.b.allocated : 0)); 697 (bma->cur ? bma->cur->bc_private.b.allocated : 0));
693 ep = xfs_iext_get_ext(ifp, *idx + 1); 698 ep = xfs_iext_get_ext(ifp, bma->idx + 1);
694 xfs_bmbt_set_startblock(ep, nullstartblock(da_new)); 699 xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
695 trace_xfs_bmap_post_update(ip, *idx + 1, state, _THIS_IP_); 700 trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
696 break; 701 break;
697 702
698 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: 703 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
@@ -701,37 +706,39 @@ xfs_bmap_add_extent_delay_real(
701 * The right neighbor is contiguous with the new allocation. 706 * The right neighbor is contiguous with the new allocation.
702 */ 707 */
703 temp = PREV.br_blockcount - new->br_blockcount; 708 temp = PREV.br_blockcount - new->br_blockcount;
704 trace_xfs_bmap_pre_update(ip, *idx + 1, state, _THIS_IP_); 709 trace_xfs_bmap_pre_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
705 xfs_bmbt_set_blockcount(ep, temp); 710 xfs_bmbt_set_blockcount(ep, temp);
706 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx + 1), 711 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, bma->idx + 1),
707 new->br_startoff, new->br_startblock, 712 new->br_startoff, new->br_startblock,
708 new->br_blockcount + RIGHT.br_blockcount, 713 new->br_blockcount + RIGHT.br_blockcount,
709 RIGHT.br_state); 714 RIGHT.br_state);
710 trace_xfs_bmap_post_update(ip, *idx + 1, state, _THIS_IP_); 715 trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
711 if (cur == NULL) 716 if (bma->cur == NULL)
712 rval = XFS_ILOG_DEXT; 717 rval = XFS_ILOG_DEXT;
713 else { 718 else {
714 rval = 0; 719 rval = 0;
715 if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff, 720 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
716 RIGHT.br_startblock, 721 RIGHT.br_startblock,
717 RIGHT.br_blockcount, &i))) 722 RIGHT.br_blockcount, &i);
723 if (error)
718 goto done; 724 goto done;
719 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 725 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
720 if ((error = xfs_bmbt_update(cur, new->br_startoff, 726 error = xfs_bmbt_update(bma->cur, new->br_startoff,
721 new->br_startblock, 727 new->br_startblock,
722 new->br_blockcount + 728 new->br_blockcount +
723 RIGHT.br_blockcount, 729 RIGHT.br_blockcount,
724 RIGHT.br_state))) 730 RIGHT.br_state);
731 if (error)
725 goto done; 732 goto done;
726 } 733 }
727 734
728 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 735 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
729 startblockval(PREV.br_startblock)); 736 startblockval(PREV.br_startblock));
730 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 737 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
731 xfs_bmbt_set_startblock(ep, nullstartblock(da_new)); 738 xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
732 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 739 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
733 740
734 ++*idx; 741 bma->idx++;
735 break; 742 break;
736 743
737 case BMAP_RIGHT_FILLING: 744 case BMAP_RIGHT_FILLING:
@@ -740,41 +747,43 @@ xfs_bmap_add_extent_delay_real(
740 * The right neighbor is not contiguous. 747 * The right neighbor is not contiguous.
741 */ 748 */
742 temp = PREV.br_blockcount - new->br_blockcount; 749 temp = PREV.br_blockcount - new->br_blockcount;
743 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); 750 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
744 xfs_bmbt_set_blockcount(ep, temp); 751 xfs_bmbt_set_blockcount(ep, temp);
745 xfs_iext_insert(ip, *idx + 1, 1, new, state); 752 xfs_iext_insert(bma->ip, bma->idx + 1, 1, new, state);
746 ip->i_d.di_nextents++; 753 bma->ip->i_d.di_nextents++;
747 if (cur == NULL) 754 if (bma->cur == NULL)
748 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 755 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
749 else { 756 else {
750 rval = XFS_ILOG_CORE; 757 rval = XFS_ILOG_CORE;
751 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff, 758 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
752 new->br_startblock, new->br_blockcount, 759 new->br_startblock, new->br_blockcount,
753 &i))) 760 &i);
761 if (error)
754 goto done; 762 goto done;
755 XFS_WANT_CORRUPTED_GOTO(i == 0, done); 763 XFS_WANT_CORRUPTED_GOTO(i == 0, done);
756 cur->bc_rec.b.br_state = XFS_EXT_NORM; 764 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
757 if ((error = xfs_btree_insert(cur, &i))) 765 error = xfs_btree_insert(bma->cur, &i);
766 if (error)
758 goto done; 767 goto done;
759 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 768 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
760 } 769 }
761 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS && 770 if (bma->ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
762 ip->i_d.di_nextents > ip->i_df.if_ext_max) { 771 bma->ip->i_d.di_nextents > bma->ip->i_df.if_ext_max) {
763 error = xfs_bmap_extents_to_btree(tp, ip, 772 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
764 first, flist, &cur, 1, &tmp_rval, 773 bma->firstblock, bma->flist, &bma->cur, 1,
765 XFS_DATA_FORK); 774 &tmp_rval, XFS_DATA_FORK);
766 rval |= tmp_rval; 775 rval |= tmp_rval;
767 if (error) 776 if (error)
768 goto done; 777 goto done;
769 } 778 }
770 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 779 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
771 startblockval(PREV.br_startblock) - 780 startblockval(PREV.br_startblock) -
772 (cur ? cur->bc_private.b.allocated : 0)); 781 (bma->cur ? bma->cur->bc_private.b.allocated : 0));
773 ep = xfs_iext_get_ext(ifp, *idx); 782 ep = xfs_iext_get_ext(ifp, bma->idx);
774 xfs_bmbt_set_startblock(ep, nullstartblock(da_new)); 783 xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
775 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 784 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
776 785
777 ++*idx; 786 bma->idx++;
778 break; 787 break;
779 788
780 case 0: 789 case 0:
@@ -800,46 +809,48 @@ xfs_bmap_add_extent_delay_real(
800 */ 809 */
801 temp = new->br_startoff - PREV.br_startoff; 810 temp = new->br_startoff - PREV.br_startoff;
802 temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff; 811 temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff;
803 trace_xfs_bmap_pre_update(ip, *idx, 0, _THIS_IP_); 812 trace_xfs_bmap_pre_update(bma->ip, bma->idx, 0, _THIS_IP_);
804 xfs_bmbt_set_blockcount(ep, temp); /* truncate PREV */ 813 xfs_bmbt_set_blockcount(ep, temp); /* truncate PREV */
805 LEFT = *new; 814 LEFT = *new;
806 RIGHT.br_state = PREV.br_state; 815 RIGHT.br_state = PREV.br_state;
807 RIGHT.br_startblock = nullstartblock( 816 RIGHT.br_startblock = nullstartblock(
808 (int)xfs_bmap_worst_indlen(ip, temp2)); 817 (int)xfs_bmap_worst_indlen(bma->ip, temp2));
809 RIGHT.br_startoff = new_endoff; 818 RIGHT.br_startoff = new_endoff;
810 RIGHT.br_blockcount = temp2; 819 RIGHT.br_blockcount = temp2;
811 /* insert LEFT (r[0]) and RIGHT (r[1]) at the same time */ 820 /* insert LEFT (r[0]) and RIGHT (r[1]) at the same time */
812 xfs_iext_insert(ip, *idx + 1, 2, &LEFT, state); 821 xfs_iext_insert(bma->ip, bma->idx + 1, 2, &LEFT, state);
813 ip->i_d.di_nextents++; 822 bma->ip->i_d.di_nextents++;
814 if (cur == NULL) 823 if (bma->cur == NULL)
815 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; 824 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
816 else { 825 else {
817 rval = XFS_ILOG_CORE; 826 rval = XFS_ILOG_CORE;
818 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff, 827 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
819 new->br_startblock, new->br_blockcount, 828 new->br_startblock, new->br_blockcount,
820 &i))) 829 &i);
830 if (error)
821 goto done; 831 goto done;
822 XFS_WANT_CORRUPTED_GOTO(i == 0, done); 832 XFS_WANT_CORRUPTED_GOTO(i == 0, done);
823 cur->bc_rec.b.br_state = XFS_EXT_NORM; 833 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
824 if ((error = xfs_btree_insert(cur, &i))) 834 error = xfs_btree_insert(bma->cur, &i);
835 if (error)
825 goto done; 836 goto done;
826 XFS_WANT_CORRUPTED_GOTO(i == 1, done); 837 XFS_WANT_CORRUPTED_GOTO(i == 1, done);
827 } 838 }
828 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS && 839 if (bma->ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
829 ip->i_d.di_nextents > ip->i_df.if_ext_max) { 840 bma->ip->i_d.di_nextents > bma->ip->i_df.if_ext_max) {
830 error = xfs_bmap_extents_to_btree(tp, ip, 841 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
831 first, flist, &cur, 1, &tmp_rval, 842 bma->firstblock, bma->flist, &bma->cur,
832 XFS_DATA_FORK); 843 1, &tmp_rval, XFS_DATA_FORK);
833 rval |= tmp_rval; 844 rval |= tmp_rval;
834 if (error) 845 if (error)
835 goto done; 846 goto done;
836 } 847 }
837 temp = xfs_bmap_worst_indlen(ip, temp); 848 temp = xfs_bmap_worst_indlen(bma->ip, temp);
838 temp2 = xfs_bmap_worst_indlen(ip, temp2); 849 temp2 = xfs_bmap_worst_indlen(bma->ip, temp2);
839 diff = (int)(temp + temp2 - startblockval(PREV.br_startblock) - 850 diff = (int)(temp + temp2 - startblockval(PREV.br_startblock) -
840 (cur ? cur->bc_private.b.allocated : 0)); 851 (bma->cur ? bma->cur->bc_private.b.allocated : 0));
841 if (diff > 0) { 852 if (diff > 0) {
842 error = xfs_icsb_modify_counters(ip->i_mount, 853 error = xfs_icsb_modify_counters(bma->ip->i_mount,
843 XFS_SBS_FDBLOCKS, 854 XFS_SBS_FDBLOCKS,
844 -((int64_t)diff), 0); 855 -((int64_t)diff), 0);
845 ASSERT(!error); 856 ASSERT(!error);
@@ -847,15 +858,15 @@ xfs_bmap_add_extent_delay_real(
847 goto done; 858 goto done;
848 } 859 }
849 860
850 ep = xfs_iext_get_ext(ifp, *idx); 861 ep = xfs_iext_get_ext(ifp, bma->idx);
851 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); 862 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
852 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); 863 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
853 trace_xfs_bmap_pre_update(ip, *idx + 2, state, _THIS_IP_); 864 trace_xfs_bmap_pre_update(bma->ip, bma->idx + 2, state, _THIS_IP_);
854 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx + 2), 865 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, bma->idx + 2),
855 nullstartblock((int)temp2)); 866 nullstartblock((int)temp2));
856 trace_xfs_bmap_post_update(ip, *idx + 2, state, _THIS_IP_); 867 trace_xfs_bmap_post_update(bma->ip, bma->idx + 2, state, _THIS_IP_);
857 868
858 ++*idx; 869 bma->idx++;
859 da_new = temp + temp2; 870 da_new = temp + temp2;
860 break; 871 break;
861 872
@@ -873,14 +884,15 @@ xfs_bmap_add_extent_delay_real(
873 } 884 }
874 885
875 /* convert to a btree if necessary */ 886 /* convert to a btree if necessary */
876 if (XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) == XFS_DINODE_FMT_EXTENTS && 887 if (XFS_IFORK_FORMAT(bma->ip, XFS_DATA_FORK) == XFS_DINODE_FMT_EXTENTS &&
877 XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) > ifp->if_ext_max) { 888 XFS_IFORK_NEXTENTS(bma->ip, XFS_DATA_FORK) > ifp->if_ext_max) {
878 int tmp_logflags; /* partial log flag return val */ 889 int tmp_logflags; /* partial log flag return val */
879 890
880 ASSERT(cur == NULL); 891 ASSERT(bma->cur == NULL);
881 error = xfs_bmap_extents_to_btree(tp, ip, first, flist, &cur, 892 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
893 bma->firstblock, bma->flist, &bma->cur,
882 da_old > 0, &tmp_logflags, XFS_DATA_FORK); 894 da_old > 0, &tmp_logflags, XFS_DATA_FORK);
883 *logflagsp |= tmp_logflags; 895 bma->logflags |= tmp_logflags;
884 if (error) 896 if (error)
885 goto done; 897 goto done;
886 } 898 }
@@ -888,22 +900,22 @@ xfs_bmap_add_extent_delay_real(
888 /* adjust for changes in reserved delayed indirect blocks */ 900 /* adjust for changes in reserved delayed indirect blocks */
889 if (da_old || da_new) { 901 if (da_old || da_new) {
890 temp = da_new; 902 temp = da_new;
891 if (cur) 903 if (bma->cur)
892 temp += cur->bc_private.b.allocated; 904 temp += bma->cur->bc_private.b.allocated;
893 ASSERT(temp <= da_old); 905 ASSERT(temp <= da_old);
894 if (temp < da_old) 906 if (temp < da_old)
895 xfs_icsb_modify_counters(ip->i_mount, XFS_SBS_FDBLOCKS, 907 xfs_icsb_modify_counters(bma->ip->i_mount,
896 (int64_t)(da_old - temp), 0); 908 XFS_SBS_FDBLOCKS,
909 (int64_t)(da_old - temp), 0);
897 } 910 }
898 911
899 /* clear out the allocated field, done with it now in any case. */ 912 /* clear out the allocated field, done with it now in any case. */
900 if (cur) { 913 if (bma->cur)
901 cur->bc_private.b.allocated = 0; 914 bma->cur->bc_private.b.allocated = 0;
902 *curp = cur; 915
903 } 916 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, XFS_DATA_FORK);
904 xfs_bmap_check_leaf_extents(cur, ip, XFS_DATA_FORK);
905done: 917done:
906 *logflagsp |= rval; 918 bma->logflags |= rval;
907 return error; 919 return error;
908#undef LEFT 920#undef LEFT
909#undef RIGHT 921#undef RIGHT
@@ -4698,9 +4710,7 @@ xfs_bmapi_allocate(
4698 bma->got.br_state = XFS_EXT_UNWRITTEN; 4710 bma->got.br_state = XFS_EXT_UNWRITTEN;
4699 4711
4700 if (bma->wasdel) { 4712 if (bma->wasdel) {
4701 error = xfs_bmap_add_extent_delay_real(bma->tp, bma->ip, 4713 error = xfs_bmap_add_extent_delay_real(bma);
4702 &bma->idx, &bma->cur, &bma->got,
4703 bma->firstblock, bma->flist, &tmp_logflags);
4704 } else { 4714 } else {
4705 error = xfs_bmap_add_extent_hole_real(bma->tp, bma->ip, 4715 error = xfs_bmap_add_extent_hole_real(bma->tp, bma->ip,
4706 &bma->idx, &bma->cur, &bma->got, 4716 &bma->idx, &bma->cur, &bma->got,