aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_trans_ail.c
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2010-12-19 20:02:19 -0500
committerDave Chinner <david@fromorbit.com>2010-12-19 20:02:19 -0500
commit0e57f6a36f9be03e5abb755f524ee91c4aebe854 (patch)
tree4de7d393444171525ca8c6b73026493ffa8e1b12 /fs/xfs/xfs_trans_ail.c
parenteb3efa1249b6413be930bdf13d10b6238028a440 (diff)
xfs: bulk AIL insertion during transaction commit
When inserting items into the AIL from the transaction committed callbacks, we take the AIL lock for every single item that is to be inserted. For a CIL checkpoint commit, this can be tens of thousands of individual inserts, yet almost all of the items will be inserted at the same point in the AIL because they have the same index. To reduce the overhead and contention on the AIL lock for such operations, introduce a "bulk insert" operation which allows a list of log items with the same LSN to be inserted in a single operation via a list splice. To do this, we need to pre-sort the log items being committed into a temporary list for insertion. The complexity is that not every log item will end up with the same LSN, and not every item is actually inserted into the AIL. Items that don't match the commit LSN will be inserted and unpinned as per the current one-at-a-time method (relatively rare), while items that are not to be inserted will be unpinned and freed immediately. Items that are to be inserted at the given commit lsn are placed in a temporary array and inserted into the AIL in bulk each time the array fills up. As a result of this, we trade off AIL hold time for a significant reduction in traffic. lock_stat output shows that the worst case hold time is unchanged, but contention from AIL inserts drops by an order of magnitude and the number of lock traversal decreases significantly. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'fs/xfs/xfs_trans_ail.c')
-rw-r--r--fs/xfs/xfs_trans_ail.c109
1 files changed, 107 insertions, 2 deletions
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index 645928cab42d..fe991a76bf14 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -29,6 +29,7 @@
29#include "xfs_error.h" 29#include "xfs_error.h"
30 30
31STATIC void xfs_ail_insert(struct xfs_ail *, xfs_log_item_t *); 31STATIC void xfs_ail_insert(struct xfs_ail *, xfs_log_item_t *);
32STATIC void xfs_ail_splice(struct xfs_ail *, struct list_head *, xfs_lsn_t);
32STATIC void xfs_ail_delete(struct xfs_ail *, xfs_log_item_t *); 33STATIC void xfs_ail_delete(struct xfs_ail *, xfs_log_item_t *);
33STATIC xfs_log_item_t * xfs_ail_min(struct xfs_ail *); 34STATIC xfs_log_item_t * xfs_ail_min(struct xfs_ail *);
34STATIC xfs_log_item_t * xfs_ail_next(struct xfs_ail *, xfs_log_item_t *); 35STATIC xfs_log_item_t * xfs_ail_next(struct xfs_ail *, xfs_log_item_t *);
@@ -502,6 +503,79 @@ xfs_trans_ail_update(
502} /* xfs_trans_update_ail */ 503} /* xfs_trans_update_ail */
503 504
504/* 505/*
506 * xfs_trans_ail_update - bulk AIL insertion operation.
507 *
508 * @xfs_trans_ail_update takes an array of log items that all need to be
509 * positioned at the same LSN in the AIL. If an item is not in the AIL, it will
510 * be added. Otherwise, it will be repositioned by removing it and re-adding
511 * it to the AIL. If we move the first item in the AIL, update the log tail to
512 * match the new minimum LSN in the AIL.
513 *
514 * This function takes the AIL lock once to execute the update operations on
515 * all the items in the array, and as such should not be called with the AIL
516 * lock held. As a result, once we have the AIL lock, we need to check each log
517 * item LSN to confirm it needs to be moved forward in the AIL.
518 *
519 * To optimise the insert operation, we delete all the items from the AIL in
520 * the first pass, moving them into a temporary list, then splice the temporary
521 * list into the correct position in the AIL. This avoids needing to do an
522 * insert operation on every item.
523 *
524 * This function must be called with the AIL lock held. The lock is dropped
525 * before returning.
526 */
527void
528xfs_trans_ail_update_bulk(
529 struct xfs_ail *ailp,
530 struct xfs_log_item **log_items,
531 int nr_items,
532 xfs_lsn_t lsn) __releases(ailp->xa_lock)
533{
534 xfs_log_item_t *mlip;
535 xfs_lsn_t tail_lsn;
536 int mlip_changed = 0;
537 int i;
538 LIST_HEAD(tmp);
539
540 mlip = xfs_ail_min(ailp);
541
542 for (i = 0; i < nr_items; i++) {
543 struct xfs_log_item *lip = log_items[i];
544 if (lip->li_flags & XFS_LI_IN_AIL) {
545 /* check if we really need to move the item */
546 if (XFS_LSN_CMP(lsn, lip->li_lsn) <= 0)
547 continue;
548
549 xfs_ail_delete(ailp, lip);
550 if (mlip == lip)
551 mlip_changed = 1;
552 } else {
553 lip->li_flags |= XFS_LI_IN_AIL;
554 }
555 lip->li_lsn = lsn;
556 list_add(&lip->li_ail, &tmp);
557 }
558
559 xfs_ail_splice(ailp, &tmp, lsn);
560
561 if (!mlip_changed) {
562 spin_unlock(&ailp->xa_lock);
563 return;
564 }
565
566 /*
567 * It is not safe to access mlip after the AIL lock is dropped, so we
568 * must get a copy of li_lsn before we do so. This is especially
569 * important on 32-bit platforms where accessing and updating 64-bit
570 * values like li_lsn is not atomic.
571 */
572 mlip = xfs_ail_min(ailp);
573 tail_lsn = mlip->li_lsn;
574 spin_unlock(&ailp->xa_lock);
575 xfs_log_move_tail(ailp->xa_mount, tail_lsn);
576}
577
578/*
505 * Delete the given item from the AIL. It must already be in 579 * Delete the given item from the AIL. It must already be in
506 * the AIL. 580 * the AIL.
507 * 581 *
@@ -642,8 +716,8 @@ xfs_ail_insert(
642 break; 716 break;
643 } 717 }
644 718
645 ASSERT((&next_lip->li_ail == &ailp->xa_ail) || 719 ASSERT(&next_lip->li_ail == &ailp->xa_ail ||
646 (XFS_LSN_CMP(next_lip->li_lsn, lip->li_lsn) <= 0)); 720 XFS_LSN_CMP(next_lip->li_lsn, lip->li_lsn) <= 0);
647 721
648 list_add(&lip->li_ail, &next_lip->li_ail); 722 list_add(&lip->li_ail, &next_lip->li_ail);
649 723
@@ -652,6 +726,37 @@ xfs_ail_insert(
652} 726}
653 727
654/* 728/*
729 * splice the log item list into the AIL at the given LSN.
730 */
731STATIC void
732xfs_ail_splice(
733 struct xfs_ail *ailp,
734 struct list_head *list,
735 xfs_lsn_t lsn)
736{
737 xfs_log_item_t *next_lip;
738
739 /*
740 * If the list is empty, just insert the item.
741 */
742 if (list_empty(&ailp->xa_ail)) {
743 list_splice(list, &ailp->xa_ail);
744 return;
745 }
746
747 list_for_each_entry_reverse(next_lip, &ailp->xa_ail, li_ail) {
748 if (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0)
749 break;
750 }
751
752 ASSERT((&next_lip->li_ail == &ailp->xa_ail) ||
753 (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0));
754
755 list_splice_init(list, &next_lip->li_ail);
756 return;
757}
758
759/*
655 * Delete the given item from the AIL. Return a pointer to the item. 760 * Delete the given item from the AIL. Return a pointer to the item.
656 */ 761 */
657STATIC void 762STATIC void