diff options
Diffstat (limited to 'fs/xfs/xfs_trans_ail.c')
-rw-r--r-- | fs/xfs/xfs_trans_ail.c | 232 |
1 files changed, 122 insertions, 110 deletions
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c index dc9069568ff7..c5bbbc45db91 100644 --- a/fs/xfs/xfs_trans_ail.c +++ b/fs/xfs/xfs_trans_ail.c | |||
@@ -28,8 +28,8 @@ | |||
28 | #include "xfs_trans_priv.h" | 28 | #include "xfs_trans_priv.h" |
29 | #include "xfs_error.h" | 29 | #include "xfs_error.h" |
30 | 30 | ||
31 | STATIC void xfs_ail_insert(struct xfs_ail *, xfs_log_item_t *); | 31 | STATIC void xfs_ail_splice(struct xfs_ail *, struct list_head *, xfs_lsn_t); |
32 | STATIC xfs_log_item_t * xfs_ail_delete(struct xfs_ail *, xfs_log_item_t *); | 32 | STATIC void xfs_ail_delete(struct xfs_ail *, xfs_log_item_t *); |
33 | STATIC xfs_log_item_t * xfs_ail_min(struct xfs_ail *); | 33 | STATIC xfs_log_item_t * xfs_ail_min(struct xfs_ail *); |
34 | STATIC xfs_log_item_t * xfs_ail_next(struct xfs_ail *, xfs_log_item_t *); | 34 | STATIC xfs_log_item_t * xfs_ail_next(struct xfs_ail *, xfs_log_item_t *); |
35 | 35 | ||
@@ -449,129 +449,152 @@ xfs_trans_unlocked_item( | |||
449 | xfs_log_move_tail(ailp->xa_mount, 1); | 449 | xfs_log_move_tail(ailp->xa_mount, 1); |
450 | } /* xfs_trans_unlocked_item */ | 450 | } /* xfs_trans_unlocked_item */ |
451 | 451 | ||
452 | |||
453 | /* | 452 | /* |
454 | * Update the position of the item in the AIL with the new | 453 | * xfs_trans_ail_update - bulk AIL insertion operation. |
455 | * lsn. If it is not yet in the AIL, add it. Otherwise, move | 454 | * |
456 | * it to its new position by removing it and re-adding it. | 455 | * @xfs_trans_ail_update takes an array of log items that all need to be |
456 | * positioned at the same LSN in the AIL. If an item is not in the AIL, it will | ||
457 | * be added. Otherwise, it will be repositioned by removing it and re-adding | ||
458 | * it to the AIL. If we move the first item in the AIL, update the log tail to | ||
459 | * match the new minimum LSN in the AIL. | ||
457 | * | 460 | * |
458 | * Wakeup anyone with an lsn less than the item's lsn. If the item | 461 | * This function takes the AIL lock once to execute the update operations on |
459 | * we move in the AIL is the minimum one, update the tail lsn in the | 462 | * all the items in the array, and as such should not be called with the AIL |
460 | * log manager. | 463 | * lock held. As a result, once we have the AIL lock, we need to check each log |
464 | * item LSN to confirm it needs to be moved forward in the AIL. | ||
461 | * | 465 | * |
462 | * This function must be called with the AIL lock held. The lock | 466 | * To optimise the insert operation, we delete all the items from the AIL in |
463 | * is dropped before returning. | 467 | * the first pass, moving them into a temporary list, then splice the temporary |
468 | * list into the correct position in the AIL. This avoids needing to do an | ||
469 | * insert operation on every item. | ||
470 | * | ||
471 | * This function must be called with the AIL lock held. The lock is dropped | ||
472 | * before returning. | ||
464 | */ | 473 | */ |
465 | void | 474 | void |
466 | xfs_trans_ail_update( | 475 | xfs_trans_ail_update_bulk( |
467 | struct xfs_ail *ailp, | 476 | struct xfs_ail *ailp, |
468 | xfs_log_item_t *lip, | 477 | struct xfs_log_item **log_items, |
469 | xfs_lsn_t lsn) __releases(ailp->xa_lock) | 478 | int nr_items, |
479 | xfs_lsn_t lsn) __releases(ailp->xa_lock) | ||
470 | { | 480 | { |
471 | xfs_log_item_t *dlip = NULL; | 481 | xfs_log_item_t *mlip; |
472 | xfs_log_item_t *mlip; /* ptr to minimum lip */ | ||
473 | xfs_lsn_t tail_lsn; | 482 | xfs_lsn_t tail_lsn; |
483 | int mlip_changed = 0; | ||
484 | int i; | ||
485 | LIST_HEAD(tmp); | ||
474 | 486 | ||
475 | mlip = xfs_ail_min(ailp); | 487 | mlip = xfs_ail_min(ailp); |
476 | 488 | ||
477 | if (lip->li_flags & XFS_LI_IN_AIL) { | 489 | for (i = 0; i < nr_items; i++) { |
478 | dlip = xfs_ail_delete(ailp, lip); | 490 | struct xfs_log_item *lip = log_items[i]; |
479 | ASSERT(dlip == lip); | 491 | if (lip->li_flags & XFS_LI_IN_AIL) { |
480 | xfs_trans_ail_cursor_clear(ailp, dlip); | 492 | /* check if we really need to move the item */ |
481 | } else { | 493 | if (XFS_LSN_CMP(lsn, lip->li_lsn) <= 0) |
482 | lip->li_flags |= XFS_LI_IN_AIL; | 494 | continue; |
495 | |||
496 | xfs_ail_delete(ailp, lip); | ||
497 | if (mlip == lip) | ||
498 | mlip_changed = 1; | ||
499 | } else { | ||
500 | lip->li_flags |= XFS_LI_IN_AIL; | ||
501 | } | ||
502 | lip->li_lsn = lsn; | ||
503 | list_add(&lip->li_ail, &tmp); | ||
483 | } | 504 | } |
484 | 505 | ||
485 | lip->li_lsn = lsn; | 506 | xfs_ail_splice(ailp, &tmp, lsn); |
486 | xfs_ail_insert(ailp, lip); | ||
487 | 507 | ||
488 | if (mlip == dlip) { | 508 | if (!mlip_changed) { |
489 | mlip = xfs_ail_min(ailp); | ||
490 | /* | ||
491 | * It is not safe to access mlip after the AIL lock is | ||
492 | * dropped, so we must get a copy of li_lsn before we do | ||
493 | * so. This is especially important on 32-bit platforms | ||
494 | * where accessing and updating 64-bit values like li_lsn | ||
495 | * is not atomic. | ||
496 | */ | ||
497 | tail_lsn = mlip->li_lsn; | ||
498 | spin_unlock(&ailp->xa_lock); | ||
499 | xfs_log_move_tail(ailp->xa_mount, tail_lsn); | ||
500 | } else { | ||
501 | spin_unlock(&ailp->xa_lock); | 509 | spin_unlock(&ailp->xa_lock); |
510 | return; | ||
502 | } | 511 | } |
503 | 512 | ||
504 | 513 | /* | |
505 | } /* xfs_trans_update_ail */ | 514 | * It is not safe to access mlip after the AIL lock is dropped, so we |
515 | * must get a copy of li_lsn before we do so. This is especially | ||
516 | * important on 32-bit platforms where accessing and updating 64-bit | ||
517 | * values like li_lsn is not atomic. | ||
518 | */ | ||
519 | mlip = xfs_ail_min(ailp); | ||
520 | tail_lsn = mlip->li_lsn; | ||
521 | spin_unlock(&ailp->xa_lock); | ||
522 | xfs_log_move_tail(ailp->xa_mount, tail_lsn); | ||
523 | } | ||
506 | 524 | ||
507 | /* | 525 | /* |
508 | * Delete the given item from the AIL. It must already be in | 526 | * xfs_trans_ail_delete_bulk - remove multiple log items from the AIL |
509 | * the AIL. | ||
510 | * | 527 | * |
511 | * Wakeup anyone with an lsn less than item's lsn. If the item | 528 | * @xfs_trans_ail_delete_bulk takes an array of log items that all need to |
512 | * we delete in the AIL is the minimum one, update the tail lsn in the | 529 | * removed from the AIL. The caller is already holding the AIL lock, and done |
513 | * log manager. | 530 | * all the checks necessary to ensure the items passed in via @log_items are |
531 | * ready for deletion. This includes checking that the items are in the AIL. | ||
514 | * | 532 | * |
515 | * Clear the IN_AIL flag from the item, reset its lsn to 0, and | 533 | * For each log item to be removed, unlink it from the AIL, clear the IN_AIL |
516 | * bump the AIL's generation count to indicate that the tree | 534 | * flag from the item and reset the item's lsn to 0. If we remove the first |
517 | * has changed. | 535 | * item in the AIL, update the log tail to match the new minimum LSN in the |
536 | * AIL. | ||
518 | * | 537 | * |
519 | * This function must be called with the AIL lock held. The lock | 538 | * This function will not drop the AIL lock until all items are removed from |
520 | * is dropped before returning. | 539 | * the AIL to minimise the amount of lock traffic on the AIL. This does not |
540 | * greatly increase the AIL hold time, but does significantly reduce the amount | ||
541 | * of traffic on the lock, especially during IO completion. | ||
542 | * | ||
543 | * This function must be called with the AIL lock held. The lock is dropped | ||
544 | * before returning. | ||
521 | */ | 545 | */ |
522 | void | 546 | void |
523 | xfs_trans_ail_delete( | 547 | xfs_trans_ail_delete_bulk( |
524 | struct xfs_ail *ailp, | 548 | struct xfs_ail *ailp, |
525 | xfs_log_item_t *lip) __releases(ailp->xa_lock) | 549 | struct xfs_log_item **log_items, |
550 | int nr_items) __releases(ailp->xa_lock) | ||
526 | { | 551 | { |
527 | xfs_log_item_t *dlip; | ||
528 | xfs_log_item_t *mlip; | 552 | xfs_log_item_t *mlip; |
529 | xfs_lsn_t tail_lsn; | 553 | xfs_lsn_t tail_lsn; |
554 | int mlip_changed = 0; | ||
555 | int i; | ||
530 | 556 | ||
531 | if (lip->li_flags & XFS_LI_IN_AIL) { | 557 | mlip = xfs_ail_min(ailp); |
532 | mlip = xfs_ail_min(ailp); | ||
533 | dlip = xfs_ail_delete(ailp, lip); | ||
534 | ASSERT(dlip == lip); | ||
535 | xfs_trans_ail_cursor_clear(ailp, dlip); | ||
536 | |||
537 | 558 | ||
538 | lip->li_flags &= ~XFS_LI_IN_AIL; | 559 | for (i = 0; i < nr_items; i++) { |
539 | lip->li_lsn = 0; | 560 | struct xfs_log_item *lip = log_items[i]; |
561 | if (!(lip->li_flags & XFS_LI_IN_AIL)) { | ||
562 | struct xfs_mount *mp = ailp->xa_mount; | ||
540 | 563 | ||
541 | if (mlip == dlip) { | ||
542 | mlip = xfs_ail_min(ailp); | ||
543 | /* | ||
544 | * It is not safe to access mlip after the AIL lock | ||
545 | * is dropped, so we must get a copy of li_lsn | ||
546 | * before we do so. This is especially important | ||
547 | * on 32-bit platforms where accessing and updating | ||
548 | * 64-bit values like li_lsn is not atomic. | ||
549 | */ | ||
550 | tail_lsn = mlip ? mlip->li_lsn : 0; | ||
551 | spin_unlock(&ailp->xa_lock); | ||
552 | xfs_log_move_tail(ailp->xa_mount, tail_lsn); | ||
553 | } else { | ||
554 | spin_unlock(&ailp->xa_lock); | 564 | spin_unlock(&ailp->xa_lock); |
565 | if (!XFS_FORCED_SHUTDOWN(mp)) { | ||
566 | xfs_cmn_err(XFS_PTAG_AILDELETE, CE_ALERT, mp, | ||
567 | "%s: attempting to delete a log item that is not in the AIL", | ||
568 | __func__); | ||
569 | xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); | ||
570 | } | ||
571 | return; | ||
555 | } | 572 | } |
573 | |||
574 | xfs_ail_delete(ailp, lip); | ||
575 | lip->li_flags &= ~XFS_LI_IN_AIL; | ||
576 | lip->li_lsn = 0; | ||
577 | if (mlip == lip) | ||
578 | mlip_changed = 1; | ||
556 | } | 579 | } |
557 | else { | ||
558 | /* | ||
559 | * If the file system is not being shutdown, we are in | ||
560 | * serious trouble if we get to this stage. | ||
561 | */ | ||
562 | struct xfs_mount *mp = ailp->xa_mount; | ||
563 | 580 | ||
581 | if (!mlip_changed) { | ||
564 | spin_unlock(&ailp->xa_lock); | 582 | spin_unlock(&ailp->xa_lock); |
565 | if (!XFS_FORCED_SHUTDOWN(mp)) { | 583 | return; |
566 | xfs_cmn_err(XFS_PTAG_AILDELETE, CE_ALERT, mp, | ||
567 | "%s: attempting to delete a log item that is not in the AIL", | ||
568 | __func__); | ||
569 | xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); | ||
570 | } | ||
571 | } | 584 | } |
572 | } | ||
573 | |||
574 | 585 | ||
586 | /* | ||
587 | * It is not safe to access mlip after the AIL lock is dropped, so we | ||
588 | * must get a copy of li_lsn before we do so. This is especially | ||
589 | * important on 32-bit platforms where accessing and updating 64-bit | ||
590 | * values like li_lsn is not atomic. It is possible we've emptied the | ||
591 | * AIL here, so if that is the case, pass an LSN of 0 to the tail move. | ||
592 | */ | ||
593 | mlip = xfs_ail_min(ailp); | ||
594 | tail_lsn = mlip ? mlip->li_lsn : 0; | ||
595 | spin_unlock(&ailp->xa_lock); | ||
596 | xfs_log_move_tail(ailp->xa_mount, tail_lsn); | ||
597 | } | ||
575 | 598 | ||
576 | /* | 599 | /* |
577 | * The active item list (AIL) is a doubly linked list of log | 600 | * The active item list (AIL) is a doubly linked list of log |
@@ -623,16 +646,13 @@ xfs_trans_ail_destroy( | |||
623 | } | 646 | } |
624 | 647 | ||
625 | /* | 648 | /* |
626 | * Insert the given log item into the AIL. | 649 | * splice the log item list into the AIL at the given LSN. |
627 | * We almost always insert at the end of the list, so on inserts | ||
628 | * we search from the end of the list to find where the | ||
629 | * new item belongs. | ||
630 | */ | 650 | */ |
631 | STATIC void | 651 | STATIC void |
632 | xfs_ail_insert( | 652 | xfs_ail_splice( |
633 | struct xfs_ail *ailp, | 653 | struct xfs_ail *ailp, |
634 | xfs_log_item_t *lip) | 654 | struct list_head *list, |
635 | /* ARGSUSED */ | 655 | xfs_lsn_t lsn) |
636 | { | 656 | { |
637 | xfs_log_item_t *next_lip; | 657 | xfs_log_item_t *next_lip; |
638 | 658 | ||
@@ -640,39 +660,33 @@ xfs_ail_insert( | |||
640 | * If the list is empty, just insert the item. | 660 | * If the list is empty, just insert the item. |
641 | */ | 661 | */ |
642 | if (list_empty(&ailp->xa_ail)) { | 662 | if (list_empty(&ailp->xa_ail)) { |
643 | list_add(&lip->li_ail, &ailp->xa_ail); | 663 | list_splice(list, &ailp->xa_ail); |
644 | return; | 664 | return; |
645 | } | 665 | } |
646 | 666 | ||
647 | list_for_each_entry_reverse(next_lip, &ailp->xa_ail, li_ail) { | 667 | list_for_each_entry_reverse(next_lip, &ailp->xa_ail, li_ail) { |
648 | if (XFS_LSN_CMP(next_lip->li_lsn, lip->li_lsn) <= 0) | 668 | if (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0) |
649 | break; | 669 | break; |
650 | } | 670 | } |
651 | 671 | ||
652 | ASSERT((&next_lip->li_ail == &ailp->xa_ail) || | 672 | ASSERT((&next_lip->li_ail == &ailp->xa_ail) || |
653 | (XFS_LSN_CMP(next_lip->li_lsn, lip->li_lsn) <= 0)); | 673 | (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0)); |
654 | |||
655 | list_add(&lip->li_ail, &next_lip->li_ail); | ||
656 | 674 | ||
657 | xfs_ail_check(ailp, lip); | 675 | list_splice_init(list, &next_lip->li_ail); |
658 | return; | 676 | return; |
659 | } | 677 | } |
660 | 678 | ||
661 | /* | 679 | /* |
662 | * Delete the given item from the AIL. Return a pointer to the item. | 680 | * Delete the given item from the AIL. Return a pointer to the item. |
663 | */ | 681 | */ |
664 | /*ARGSUSED*/ | 682 | STATIC void |
665 | STATIC xfs_log_item_t * | ||
666 | xfs_ail_delete( | 683 | xfs_ail_delete( |
667 | struct xfs_ail *ailp, | 684 | struct xfs_ail *ailp, |
668 | xfs_log_item_t *lip) | 685 | xfs_log_item_t *lip) |
669 | /* ARGSUSED */ | ||
670 | { | 686 | { |
671 | xfs_ail_check(ailp, lip); | 687 | xfs_ail_check(ailp, lip); |
672 | |||
673 | list_del(&lip->li_ail); | 688 | list_del(&lip->li_ail); |
674 | 689 | xfs_trans_ail_cursor_clear(ailp, lip); | |
675 | return lip; | ||
676 | } | 690 | } |
677 | 691 | ||
678 | /* | 692 | /* |
@@ -682,7 +696,6 @@ xfs_ail_delete( | |||
682 | STATIC xfs_log_item_t * | 696 | STATIC xfs_log_item_t * |
683 | xfs_ail_min( | 697 | xfs_ail_min( |
684 | struct xfs_ail *ailp) | 698 | struct xfs_ail *ailp) |
685 | /* ARGSUSED */ | ||
686 | { | 699 | { |
687 | if (list_empty(&ailp->xa_ail)) | 700 | if (list_empty(&ailp->xa_ail)) |
688 | return NULL; | 701 | return NULL; |
@@ -699,7 +712,6 @@ STATIC xfs_log_item_t * | |||
699 | xfs_ail_next( | 712 | xfs_ail_next( |
700 | struct xfs_ail *ailp, | 713 | struct xfs_ail *ailp, |
701 | xfs_log_item_t *lip) | 714 | xfs_log_item_t *lip) |
702 | /* ARGSUSED */ | ||
703 | { | 715 | { |
704 | if (lip->li_ail.next == &ailp->xa_ail) | 716 | if (lip->li_ail.next == &ailp->xa_ail) |
705 | return NULL; | 717 | return NULL; |