aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_trans.c
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2011-07-17 23:40:16 -0400
committerAlex Elder <aelder@sgi.com>2011-07-20 19:37:20 -0400
commit1d8c95a363bf8cd4d4182dd19c01693b635311c2 (patch)
tree839d26eb6a1f6814c687e2c6855feb377b74c0f9 /fs/xfs/xfs_trans.c
parentad1a2c878ca70829874b4fcc83223cccb4e26dab (diff)
xfs: use a cursor for bulk AIL insertion
Delayed logging can insert tens of thousands of log items into the AIL at the same LSN. When the committing of log commit records occur, we can get insertions occurring at an LSN that is not at the end of the AIL. If there are thousands of items in the AIL on the tail LSN, each insertion has to walk the AIL to find the correct place to insert the new item into the AIL. This can consume large amounts of CPU time and block other operations from occurring while the traversals are in progress. To avoid this repeated walk, use a AIL cursor to record where we should be inserting the new items into the AIL without having to repeat the walk. The cursor infrastructure already provides this functionality for push walks, so is a simple extension of existing code. While this will not avoid the initial walk, it will avoid repeating it tens of thousands of times during a single checkpoint commit. This version includes logic improvements from Christoph Hellwig. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Alex Elder <aelder@sgi.com>
Diffstat (limited to 'fs/xfs/xfs_trans.c')
-rw-r--r--fs/xfs/xfs_trans.c27
1 files changed, 23 insertions, 4 deletions
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index c83f63b33aae..efc147f0e9b6 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -1426,6 +1426,7 @@ xfs_trans_committed(
1426static inline void 1426static inline void
1427xfs_log_item_batch_insert( 1427xfs_log_item_batch_insert(
1428 struct xfs_ail *ailp, 1428 struct xfs_ail *ailp,
1429 struct xfs_ail_cursor *cur,
1429 struct xfs_log_item **log_items, 1430 struct xfs_log_item **log_items,
1430 int nr_items, 1431 int nr_items,
1431 xfs_lsn_t commit_lsn) 1432 xfs_lsn_t commit_lsn)
@@ -1434,7 +1435,7 @@ xfs_log_item_batch_insert(
1434 1435
1435 spin_lock(&ailp->xa_lock); 1436 spin_lock(&ailp->xa_lock);
1436 /* xfs_trans_ail_update_bulk drops ailp->xa_lock */ 1437 /* xfs_trans_ail_update_bulk drops ailp->xa_lock */
1437 xfs_trans_ail_update_bulk(ailp, log_items, nr_items, commit_lsn); 1438 xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn);
1438 1439
1439 for (i = 0; i < nr_items; i++) 1440 for (i = 0; i < nr_items; i++)
1440 IOP_UNPIN(log_items[i], 0); 1441 IOP_UNPIN(log_items[i], 0);
@@ -1452,6 +1453,13 @@ xfs_log_item_batch_insert(
1452 * as an iclog write error even though we haven't started any IO yet. Hence in 1453 * as an iclog write error even though we haven't started any IO yet. Hence in
1453 * this case all we need to do is IOP_COMMITTED processing, followed by an 1454 * this case all we need to do is IOP_COMMITTED processing, followed by an
1454 * IOP_UNPIN(aborted) call. 1455 * IOP_UNPIN(aborted) call.
1456 *
1457 * The AIL cursor is used to optimise the insert process. If commit_lsn is not
1458 * at the end of the AIL, the insert cursor avoids the need to walk
1459 * the AIL to find the insertion point on every xfs_log_item_batch_insert()
1460 * call. This saves a lot of needless list walking and is a net win, even
1461 * though it slightly increases that amount of AIL lock traffic to set it up
1462 * and tear it down.
1455 */ 1463 */
1456void 1464void
1457xfs_trans_committed_bulk( 1465xfs_trans_committed_bulk(
@@ -1463,8 +1471,13 @@ xfs_trans_committed_bulk(
1463#define LOG_ITEM_BATCH_SIZE 32 1471#define LOG_ITEM_BATCH_SIZE 32
1464 struct xfs_log_item *log_items[LOG_ITEM_BATCH_SIZE]; 1472 struct xfs_log_item *log_items[LOG_ITEM_BATCH_SIZE];
1465 struct xfs_log_vec *lv; 1473 struct xfs_log_vec *lv;
1474 struct xfs_ail_cursor cur;
1466 int i = 0; 1475 int i = 0;
1467 1476
1477 spin_lock(&ailp->xa_lock);
1478 xfs_trans_ail_cursor_last(ailp, &cur, commit_lsn);
1479 spin_unlock(&ailp->xa_lock);
1480
1468 /* unpin all the log items */ 1481 /* unpin all the log items */
1469 for (lv = log_vector; lv; lv = lv->lv_next ) { 1482 for (lv = log_vector; lv; lv = lv->lv_next ) {
1470 struct xfs_log_item *lip = lv->lv_item; 1483 struct xfs_log_item *lip = lv->lv_item;
@@ -1493,7 +1506,9 @@ xfs_trans_committed_bulk(
1493 /* 1506 /*
1494 * Not a bulk update option due to unusual item_lsn. 1507 * Not a bulk update option due to unusual item_lsn.
1495 * Push into AIL immediately, rechecking the lsn once 1508 * Push into AIL immediately, rechecking the lsn once
1496 * we have the ail lock. Then unpin the item. 1509 * we have the ail lock. Then unpin the item. This does
1510 * not affect the AIL cursor the bulk insert path is
1511 * using.
1497 */ 1512 */
1498 spin_lock(&ailp->xa_lock); 1513 spin_lock(&ailp->xa_lock);
1499 if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0) 1514 if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0)
@@ -1507,7 +1522,7 @@ xfs_trans_committed_bulk(
1507 /* Item is a candidate for bulk AIL insert. */ 1522 /* Item is a candidate for bulk AIL insert. */
1508 log_items[i++] = lv->lv_item; 1523 log_items[i++] = lv->lv_item;
1509 if (i >= LOG_ITEM_BATCH_SIZE) { 1524 if (i >= LOG_ITEM_BATCH_SIZE) {
1510 xfs_log_item_batch_insert(ailp, log_items, 1525 xfs_log_item_batch_insert(ailp, &cur, log_items,
1511 LOG_ITEM_BATCH_SIZE, commit_lsn); 1526 LOG_ITEM_BATCH_SIZE, commit_lsn);
1512 i = 0; 1527 i = 0;
1513 } 1528 }
@@ -1515,7 +1530,11 @@ xfs_trans_committed_bulk(
1515 1530
1516 /* make sure we insert the remainder! */ 1531 /* make sure we insert the remainder! */
1517 if (i) 1532 if (i)
1518 xfs_log_item_batch_insert(ailp, log_items, i, commit_lsn); 1533 xfs_log_item_batch_insert(ailp, &cur, log_items, i, commit_lsn);
1534
1535 spin_lock(&ailp->xa_lock);
1536 xfs_trans_ail_cursor_done(ailp, &cur);
1537 spin_unlock(&ailp->xa_lock);
1519} 1538}
1520 1539
1521/* 1540/*