aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2010-12-19 20:02:19 -0500
committerDave Chinner <david@fromorbit.com>2010-12-19 20:02:19 -0500
commit0e57f6a36f9be03e5abb755f524ee91c4aebe854 (patch)
tree4de7d393444171525ca8c6b73026493ffa8e1b12 /fs/xfs
parenteb3efa1249b6413be930bdf13d10b6238028a440 (diff)
xfs: bulk AIL insertion during transaction commit
When inserting items into the AIL from the transaction committed callbacks, we take the AIL lock for every single item that is to be inserted. For a CIL checkpoint commit, this can be tens of thousands of individual inserts, yet almost all of the items will be inserted at the same point in the AIL because they have the same index. To reduce the overhead and contention on the AIL lock for such operations, introduce a "bulk insert" operation which allows a list of log items with the same LSN to be inserted in a single operation via a list splice. To do this, we need to pre-sort the log items being committed into a temporary list for insertion. The complexity is that not every log item will end up with the same LSN, and not every item is actually inserted into the AIL. Items that don't match the commit LSN will be inserted and unpinned as per the current one-at-a-time method (relatively rare), while items that are not to be inserted will be unpinned and freed immediately. Items that are to be inserted at the given commit lsn are placed in a temporary array and inserted into the AIL in bulk each time the array fills up. As a result of this, we trade off AIL hold time for a significant reduction in traffic. lock_stat output shows that the worst case hold time is unchanged, but contention from AIL inserts drops by an order of magnitude and the number of lock traversal decreases significantly. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/xfs_log_cil.c9
-rw-r--r--fs/xfs/xfs_trans.c79
-rw-r--r--fs/xfs/xfs_trans_ail.c109
-rw-r--r--fs/xfs/xfs_trans_priv.h10
4 files changed, 195 insertions, 12 deletions
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index 23d6ceb5e97b..f36f1a2f4dc1 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -361,15 +361,10 @@ xlog_cil_committed(
361 int abort) 361 int abort)
362{ 362{
363 struct xfs_cil_ctx *ctx = args; 363 struct xfs_cil_ctx *ctx = args;
364 struct xfs_log_vec *lv;
365 int abortflag = abort ? XFS_LI_ABORTED : 0;
366 struct xfs_busy_extent *busyp, *n; 364 struct xfs_busy_extent *busyp, *n;
367 365
368 /* unpin all the log items */ 366 xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, ctx->lv_chain,
369 for (lv = ctx->lv_chain; lv; lv = lv->lv_next ) { 367 ctx->start_lsn, abort);
370 xfs_trans_item_committed(lv->lv_item, ctx->start_lsn,
371 abortflag);
372 }
373 368
374 list_for_each_entry_safe(busyp, n, &ctx->busy_extents, list) 369 list_for_each_entry_safe(busyp, n, &ctx->busy_extents, list)
375 xfs_alloc_busy_clear(ctx->cil->xc_log->l_mp, busyp); 370 xfs_alloc_busy_clear(ctx->cil->xc_log->l_mp, busyp);
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index f6d956b7711e..f80a067a4658 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -1350,7 +1350,7 @@ xfs_trans_fill_vecs(
1350 * they could be immediately flushed and we'd have to race with the flusher 1350 * they could be immediately flushed and we'd have to race with the flusher
1351 * trying to pull the item from the AIL as we add it. 1351 * trying to pull the item from the AIL as we add it.
1352 */ 1352 */
1353void 1353static void
1354xfs_trans_item_committed( 1354xfs_trans_item_committed(
1355 struct xfs_log_item *lip, 1355 struct xfs_log_item *lip,
1356 xfs_lsn_t commit_lsn, 1356 xfs_lsn_t commit_lsn,
@@ -1425,6 +1425,83 @@ xfs_trans_committed(
1425 xfs_trans_free(tp); 1425 xfs_trans_free(tp);
1426} 1426}
1427 1427
1428static inline void
1429xfs_log_item_batch_insert(
1430 struct xfs_ail *ailp,
1431 struct xfs_log_item **log_items,
1432 int nr_items,
1433 xfs_lsn_t commit_lsn)
1434{
1435 int i;
1436
1437 spin_lock(&ailp->xa_lock);
1438 /* xfs_trans_ail_update_bulk drops ailp->xa_lock */
1439 xfs_trans_ail_update_bulk(ailp, log_items, nr_items, commit_lsn);
1440
1441 for (i = 0; i < nr_items; i++)
1442 IOP_UNPIN(log_items[i], 0);
1443}
1444
1445/*
1446 * Bulk operation version of xfs_trans_committed that takes a log vector of
1447 * items to insert into the AIL. This uses bulk AIL insertion techniques to
1448 * minimise lock traffic.
1449 */
1450void
1451xfs_trans_committed_bulk(
1452 struct xfs_ail *ailp,
1453 struct xfs_log_vec *log_vector,
1454 xfs_lsn_t commit_lsn,
1455 int aborted)
1456{
1457#define LOG_ITEM_BATCH_SIZE 32
1458 struct xfs_log_item *log_items[LOG_ITEM_BATCH_SIZE];
1459 struct xfs_log_vec *lv;
1460 int i = 0;
1461
1462 /* unpin all the log items */
1463 for (lv = log_vector; lv; lv = lv->lv_next ) {
1464 struct xfs_log_item *lip = lv->lv_item;
1465 xfs_lsn_t item_lsn;
1466
1467 if (aborted)
1468 lip->li_flags |= XFS_LI_ABORTED;
1469 item_lsn = IOP_COMMITTED(lip, commit_lsn);
1470
1471 /* item_lsn of -1 means the item was freed */
1472 if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0)
1473 continue;
1474
1475 if (item_lsn != commit_lsn) {
1476
1477 /*
1478 * Not a bulk update option due to unusual item_lsn.
1479 * Push into AIL immediately, rechecking the lsn once
1480 * we have the ail lock. Then unpin the item.
1481 */
1482 spin_lock(&ailp->xa_lock);
1483 if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0)
1484 xfs_trans_ail_update(ailp, lip, item_lsn);
1485 else
1486 spin_unlock(&ailp->xa_lock);
1487 IOP_UNPIN(lip, 0);
1488 continue;
1489 }
1490
1491 /* Item is a candidate for bulk AIL insert. */
1492 log_items[i++] = lv->lv_item;
1493 if (i >= LOG_ITEM_BATCH_SIZE) {
1494 xfs_log_item_batch_insert(ailp, log_items,
1495 LOG_ITEM_BATCH_SIZE, commit_lsn);
1496 i = 0;
1497 }
1498 }
1499
1500 /* make sure we insert the remainder! */
1501 if (i)
1502 xfs_log_item_batch_insert(ailp, log_items, i, commit_lsn);
1503}
1504
1428/* 1505/*
1429 * Called from the trans_commit code when we notice that 1506 * Called from the trans_commit code when we notice that
1430 * the filesystem is in the middle of a forced shutdown. 1507 * the filesystem is in the middle of a forced shutdown.
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index 645928cab42d..fe991a76bf14 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -29,6 +29,7 @@
29#include "xfs_error.h" 29#include "xfs_error.h"
30 30
31STATIC void xfs_ail_insert(struct xfs_ail *, xfs_log_item_t *); 31STATIC void xfs_ail_insert(struct xfs_ail *, xfs_log_item_t *);
32STATIC void xfs_ail_splice(struct xfs_ail *, struct list_head *, xfs_lsn_t);
32STATIC void xfs_ail_delete(struct xfs_ail *, xfs_log_item_t *); 33STATIC void xfs_ail_delete(struct xfs_ail *, xfs_log_item_t *);
33STATIC xfs_log_item_t * xfs_ail_min(struct xfs_ail *); 34STATIC xfs_log_item_t * xfs_ail_min(struct xfs_ail *);
34STATIC xfs_log_item_t * xfs_ail_next(struct xfs_ail *, xfs_log_item_t *); 35STATIC xfs_log_item_t * xfs_ail_next(struct xfs_ail *, xfs_log_item_t *);
@@ -502,6 +503,79 @@ xfs_trans_ail_update(
502} /* xfs_trans_update_ail */ 503} /* xfs_trans_update_ail */
503 504
504/* 505/*
506 * xfs_trans_ail_update - bulk AIL insertion operation.
507 *
508 * @xfs_trans_ail_update takes an array of log items that all need to be
509 * positioned at the same LSN in the AIL. If an item is not in the AIL, it will
510 * be added. Otherwise, it will be repositioned by removing it and re-adding
511 * it to the AIL. If we move the first item in the AIL, update the log tail to
512 * match the new minimum LSN in the AIL.
513 *
514 * This function takes the AIL lock once to execute the update operations on
515 * all the items in the array, and as such should not be called with the AIL
516 * lock held. As a result, once we have the AIL lock, we need to check each log
517 * item LSN to confirm it needs to be moved forward in the AIL.
518 *
519 * To optimise the insert operation, we delete all the items from the AIL in
520 * the first pass, moving them into a temporary list, then splice the temporary
521 * list into the correct position in the AIL. This avoids needing to do an
522 * insert operation on every item.
523 *
524 * This function must be called with the AIL lock held. The lock is dropped
525 * before returning.
526 */
527void
528xfs_trans_ail_update_bulk(
529 struct xfs_ail *ailp,
530 struct xfs_log_item **log_items,
531 int nr_items,
532 xfs_lsn_t lsn) __releases(ailp->xa_lock)
533{
534 xfs_log_item_t *mlip;
535 xfs_lsn_t tail_lsn;
536 int mlip_changed = 0;
537 int i;
538 LIST_HEAD(tmp);
539
540 mlip = xfs_ail_min(ailp);
541
542 for (i = 0; i < nr_items; i++) {
543 struct xfs_log_item *lip = log_items[i];
544 if (lip->li_flags & XFS_LI_IN_AIL) {
545 /* check if we really need to move the item */
546 if (XFS_LSN_CMP(lsn, lip->li_lsn) <= 0)
547 continue;
548
549 xfs_ail_delete(ailp, lip);
550 if (mlip == lip)
551 mlip_changed = 1;
552 } else {
553 lip->li_flags |= XFS_LI_IN_AIL;
554 }
555 lip->li_lsn = lsn;
556 list_add(&lip->li_ail, &tmp);
557 }
558
559 xfs_ail_splice(ailp, &tmp, lsn);
560
561 if (!mlip_changed) {
562 spin_unlock(&ailp->xa_lock);
563 return;
564 }
565
566 /*
567 * It is not safe to access mlip after the AIL lock is dropped, so we
568 * must get a copy of li_lsn before we do so. This is especially
569 * important on 32-bit platforms where accessing and updating 64-bit
570 * values like li_lsn is not atomic.
571 */
572 mlip = xfs_ail_min(ailp);
573 tail_lsn = mlip->li_lsn;
574 spin_unlock(&ailp->xa_lock);
575 xfs_log_move_tail(ailp->xa_mount, tail_lsn);
576}
577
578/*
505 * Delete the given item from the AIL. It must already be in 579 * Delete the given item from the AIL. It must already be in
506 * the AIL. 580 * the AIL.
507 * 581 *
@@ -642,8 +716,8 @@ xfs_ail_insert(
642 break; 716 break;
643 } 717 }
644 718
645 ASSERT((&next_lip->li_ail == &ailp->xa_ail) || 719 ASSERT(&next_lip->li_ail == &ailp->xa_ail ||
646 (XFS_LSN_CMP(next_lip->li_lsn, lip->li_lsn) <= 0)); 720 XFS_LSN_CMP(next_lip->li_lsn, lip->li_lsn) <= 0);
647 721
648 list_add(&lip->li_ail, &next_lip->li_ail); 722 list_add(&lip->li_ail, &next_lip->li_ail);
649 723
@@ -652,6 +726,37 @@ xfs_ail_insert(
652} 726}
653 727
654/* 728/*
729 * splice the log item list into the AIL at the given LSN.
730 */
731STATIC void
732xfs_ail_splice(
733 struct xfs_ail *ailp,
734 struct list_head *list,
735 xfs_lsn_t lsn)
736{
737 xfs_log_item_t *next_lip;
738
739 /*
740 * If the list is empty, just insert the item.
741 */
742 if (list_empty(&ailp->xa_ail)) {
743 list_splice(list, &ailp->xa_ail);
744 return;
745 }
746
747 list_for_each_entry_reverse(next_lip, &ailp->xa_ail, li_ail) {
748 if (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0)
749 break;
750 }
751
752 ASSERT((&next_lip->li_ail == &ailp->xa_ail) ||
753 (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0));
754
755 list_splice_init(list, &next_lip->li_ail);
756 return;
757}
758
759/*
655 * Delete the given item from the AIL. Return a pointer to the item. 760 * Delete the given item from the AIL. Return a pointer to the item.
656 */ 761 */
657STATIC void 762STATIC void
diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h
index 62da86c90de5..e039729186e9 100644
--- a/fs/xfs/xfs_trans_priv.h
+++ b/fs/xfs/xfs_trans_priv.h
@@ -22,15 +22,17 @@ struct xfs_log_item;
22struct xfs_log_item_desc; 22struct xfs_log_item_desc;
23struct xfs_mount; 23struct xfs_mount;
24struct xfs_trans; 24struct xfs_trans;
25struct xfs_ail;
26struct xfs_log_vec;
25 27
26void xfs_trans_add_item(struct xfs_trans *, struct xfs_log_item *); 28void xfs_trans_add_item(struct xfs_trans *, struct xfs_log_item *);
27void xfs_trans_del_item(struct xfs_log_item *); 29void xfs_trans_del_item(struct xfs_log_item *);
28void xfs_trans_free_items(struct xfs_trans *tp, xfs_lsn_t commit_lsn, 30void xfs_trans_free_items(struct xfs_trans *tp, xfs_lsn_t commit_lsn,
29 int flags); 31 int flags);
30void xfs_trans_item_committed(struct xfs_log_item *lip,
31 xfs_lsn_t commit_lsn, int aborted);
32void xfs_trans_unreserve_and_mod_sb(struct xfs_trans *tp); 32void xfs_trans_unreserve_and_mod_sb(struct xfs_trans *tp);
33 33
34void xfs_trans_committed_bulk(struct xfs_ail *ailp, struct xfs_log_vec *lv,
35 xfs_lsn_t commit_lsn, int aborted);
34/* 36/*
35 * AIL traversal cursor. 37 * AIL traversal cursor.
36 * 38 *
@@ -76,6 +78,10 @@ struct xfs_ail {
76void xfs_trans_ail_update(struct xfs_ail *ailp, 78void xfs_trans_ail_update(struct xfs_ail *ailp,
77 struct xfs_log_item *lip, xfs_lsn_t lsn) 79 struct xfs_log_item *lip, xfs_lsn_t lsn)
78 __releases(ailp->xa_lock); 80 __releases(ailp->xa_lock);
81void xfs_trans_ail_update_bulk(struct xfs_ail *ailp,
82 struct xfs_log_item **log_items,
83 int nr_items, xfs_lsn_t lsn)
84 __releases(ailp->xa_lock);
79void xfs_trans_ail_delete(struct xfs_ail *ailp, 85void xfs_trans_ail_delete(struct xfs_ail *ailp,
80 struct xfs_log_item *lip) 86 struct xfs_log_item *lip)
81 __releases(ailp->xa_lock); 87 __releases(ailp->xa_lock);