aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/xfs_trans.c27
-rw-r--r--fs/xfs/xfs_trans_ail.c109
-rw-r--r--fs/xfs/xfs_trans_priv.h10
3 files changed, 118 insertions, 28 deletions
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index c83f63b33aae..efc147f0e9b6 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -1426,6 +1426,7 @@ xfs_trans_committed(
1426static inline void 1426static inline void
1427xfs_log_item_batch_insert( 1427xfs_log_item_batch_insert(
1428 struct xfs_ail *ailp, 1428 struct xfs_ail *ailp,
1429 struct xfs_ail_cursor *cur,
1429 struct xfs_log_item **log_items, 1430 struct xfs_log_item **log_items,
1430 int nr_items, 1431 int nr_items,
1431 xfs_lsn_t commit_lsn) 1432 xfs_lsn_t commit_lsn)
@@ -1434,7 +1435,7 @@ xfs_log_item_batch_insert(
1434 1435
1435 spin_lock(&ailp->xa_lock); 1436 spin_lock(&ailp->xa_lock);
1436 /* xfs_trans_ail_update_bulk drops ailp->xa_lock */ 1437 /* xfs_trans_ail_update_bulk drops ailp->xa_lock */
1437 xfs_trans_ail_update_bulk(ailp, log_items, nr_items, commit_lsn); 1438 xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn);
1438 1439
1439 for (i = 0; i < nr_items; i++) 1440 for (i = 0; i < nr_items; i++)
1440 IOP_UNPIN(log_items[i], 0); 1441 IOP_UNPIN(log_items[i], 0);
@@ -1452,6 +1453,13 @@ xfs_log_item_batch_insert(
1452 * as an iclog write error even though we haven't started any IO yet. Hence in 1453 * as an iclog write error even though we haven't started any IO yet. Hence in
1453 * this case all we need to do is IOP_COMMITTED processing, followed by an 1454 * this case all we need to do is IOP_COMMITTED processing, followed by an
1454 * IOP_UNPIN(aborted) call. 1455 * IOP_UNPIN(aborted) call.
1456 *
1457 * The AIL cursor is used to optimise the insert process. If commit_lsn is not
1458 * at the end of the AIL, the insert cursor avoids the need to walk
1459 * the AIL to find the insertion point on every xfs_log_item_batch_insert()
1460 * call. This saves a lot of needless list walking and is a net win, even
1461 * though it slightly increases that amount of AIL lock traffic to set it up
1462 * and tear it down.
1455 */ 1463 */
1456void 1464void
1457xfs_trans_committed_bulk( 1465xfs_trans_committed_bulk(
@@ -1463,8 +1471,13 @@ xfs_trans_committed_bulk(
1463#define LOG_ITEM_BATCH_SIZE 32 1471#define LOG_ITEM_BATCH_SIZE 32
1464 struct xfs_log_item *log_items[LOG_ITEM_BATCH_SIZE]; 1472 struct xfs_log_item *log_items[LOG_ITEM_BATCH_SIZE];
1465 struct xfs_log_vec *lv; 1473 struct xfs_log_vec *lv;
1474 struct xfs_ail_cursor cur;
1466 int i = 0; 1475 int i = 0;
1467 1476
1477 spin_lock(&ailp->xa_lock);
1478 xfs_trans_ail_cursor_last(ailp, &cur, commit_lsn);
1479 spin_unlock(&ailp->xa_lock);
1480
1468 /* unpin all the log items */ 1481 /* unpin all the log items */
1469 for (lv = log_vector; lv; lv = lv->lv_next ) { 1482 for (lv = log_vector; lv; lv = lv->lv_next ) {
1470 struct xfs_log_item *lip = lv->lv_item; 1483 struct xfs_log_item *lip = lv->lv_item;
@@ -1493,7 +1506,9 @@ xfs_trans_committed_bulk(
1493 /* 1506 /*
1494 * Not a bulk update option due to unusual item_lsn. 1507 * Not a bulk update option due to unusual item_lsn.
1495 * Push into AIL immediately, rechecking the lsn once 1508 * Push into AIL immediately, rechecking the lsn once
1496 * we have the ail lock. Then unpin the item. 1509 * we have the ail lock. Then unpin the item. This does
1510 * not affect the AIL cursor the bulk insert path is
1511 * using.
1497 */ 1512 */
1498 spin_lock(&ailp->xa_lock); 1513 spin_lock(&ailp->xa_lock);
1499 if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0) 1514 if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0)
@@ -1507,7 +1522,7 @@ xfs_trans_committed_bulk(
1507 /* Item is a candidate for bulk AIL insert. */ 1522 /* Item is a candidate for bulk AIL insert. */
1508 log_items[i++] = lv->lv_item; 1523 log_items[i++] = lv->lv_item;
1509 if (i >= LOG_ITEM_BATCH_SIZE) { 1524 if (i >= LOG_ITEM_BATCH_SIZE) {
1510 xfs_log_item_batch_insert(ailp, log_items, 1525 xfs_log_item_batch_insert(ailp, &cur, log_items,
1511 LOG_ITEM_BATCH_SIZE, commit_lsn); 1526 LOG_ITEM_BATCH_SIZE, commit_lsn);
1512 i = 0; 1527 i = 0;
1513 } 1528 }
@@ -1515,7 +1530,11 @@ xfs_trans_committed_bulk(
1515 1530
1516 /* make sure we insert the remainder! */ 1531 /* make sure we insert the remainder! */
1517 if (i) 1532 if (i)
1518 xfs_log_item_batch_insert(ailp, log_items, i, commit_lsn); 1533 xfs_log_item_batch_insert(ailp, &cur, log_items, i, commit_lsn);
1534
1535 spin_lock(&ailp->xa_lock);
1536 xfs_trans_ail_cursor_done(ailp, &cur);
1537 spin_unlock(&ailp->xa_lock);
1519} 1538}
1520 1539
1521/* 1540/*
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index 5fc2380092c8..9a69dc06ea86 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -272,9 +272,9 @@ xfs_trans_ail_cursor_clear(
272} 272}
273 273
274/* 274/*
275 * Return the item in the AIL with the current lsn. 275 * Initialise the cursor to the first item in the AIL with the given @lsn.
276 * Return the current tree generation number for use 276 * This searches the list from lowest LSN to highest. Pass a @lsn of zero
277 * in calls to xfs_trans_next_ail(). 277 * to initialise the cursor to the first item in the AIL.
278 */ 278 */
279xfs_log_item_t * 279xfs_log_item_t *
280xfs_trans_ail_cursor_first( 280xfs_trans_ail_cursor_first(
@@ -300,31 +300,97 @@ out:
300} 300}
301 301
302/* 302/*
303 * splice the log item list into the AIL at the given LSN. 303 * Initialise the cursor to the last item in the AIL with the given @lsn.
304 * This searches the list from highest LSN to lowest. If there is no item with
305 * the value of @lsn, then it sets the cursor to the last item with an LSN lower
306 * than @lsn.
307 */
308static struct xfs_log_item *
309__xfs_trans_ail_cursor_last(
310 struct xfs_ail *ailp,
311 xfs_lsn_t lsn)
312{
313 xfs_log_item_t *lip;
314
315 list_for_each_entry_reverse(lip, &ailp->xa_ail, li_ail) {
316 if (XFS_LSN_CMP(lip->li_lsn, lsn) <= 0)
317 return lip;
318 }
319 return NULL;
320}
321
322/*
323 * Initialise the cursor to the last item in the AIL with the given @lsn.
324 * This searches the list from highest LSN to lowest.
325 */
326struct xfs_log_item *
327xfs_trans_ail_cursor_last(
328 struct xfs_ail *ailp,
329 struct xfs_ail_cursor *cur,
330 xfs_lsn_t lsn)
331{
332 xfs_trans_ail_cursor_init(ailp, cur);
333 cur->item = __xfs_trans_ail_cursor_last(ailp, lsn);
334 return cur->item;
335}
336
337/*
338 * splice the log item list into the AIL at the given LSN. We splice to the
339 * tail of the given LSN to maintain insert order for push traversals. The
340 * cursor is optional, allowing repeated updates to the same LSN to avoid
341 * repeated traversals.
304 */ 342 */
305static void 343static void
306xfs_ail_splice( 344xfs_ail_splice(
307 struct xfs_ail *ailp, 345 struct xfs_ail *ailp,
308 struct list_head *list, 346 struct xfs_ail_cursor *cur,
309 xfs_lsn_t lsn) 347 struct list_head *list,
348 xfs_lsn_t lsn)
310{ 349{
311 xfs_log_item_t *next_lip; 350 struct xfs_log_item *lip = cur ? cur->item : NULL;
351 struct xfs_log_item *next_lip;
312 352
313 /* If the list is empty, just insert the item. */ 353 /*
314 if (list_empty(&ailp->xa_ail)) { 354 * Get a new cursor if we don't have a placeholder or the existing one
315 list_splice(list, &ailp->xa_ail); 355 * has been invalidated.
316 return; 356 */
357 if (!lip || (__psint_t)lip & 1) {
358 lip = __xfs_trans_ail_cursor_last(ailp, lsn);
359
360 if (!lip) {
361 /* The list is empty, so just splice and return. */
362 if (cur)
363 cur->item = NULL;
364 list_splice(list, &ailp->xa_ail);
365 return;
366 }
317 } 367 }
318 368
319 list_for_each_entry_reverse(next_lip, &ailp->xa_ail, li_ail) { 369 /*
320 if (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0) 370 * Our cursor points to the item we want to insert _after_, so we have
321 break; 371 * to update the cursor to point to the end of the list we are splicing
372 * in so that it points to the correct location for the next splice.
373 * i.e. before the splice
374 *
375 * lsn -> lsn -> lsn + x -> lsn + x ...
376 * ^
377 * | cursor points here
378 *
379 * After the splice we have:
380 *
381 * lsn -> lsn -> lsn -> lsn -> .... -> lsn -> lsn + x -> lsn + x ...
382 * ^ ^
383 * | cursor points here | needs to move here
384 *
385 * So we set the cursor to the last item in the list to be spliced
386 * before we execute the splice, resulting in the cursor pointing to
387 * the correct item after the splice occurs.
388 */
389 if (cur) {
390 next_lip = list_entry(list->prev, struct xfs_log_item, li_ail);
391 cur->item = next_lip;
322 } 392 }
323 393 list_splice(list, &lip->li_ail);
324 ASSERT(&next_lip->li_ail == &ailp->xa_ail ||
325 XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0);
326
327 list_splice_init(list, &next_lip->li_ail);
328} 394}
329 395
330/* 396/*
@@ -645,6 +711,7 @@ xfs_trans_unlocked_item(
645void 711void
646xfs_trans_ail_update_bulk( 712xfs_trans_ail_update_bulk(
647 struct xfs_ail *ailp, 713 struct xfs_ail *ailp,
714 struct xfs_ail_cursor *cur,
648 struct xfs_log_item **log_items, 715 struct xfs_log_item **log_items,
649 int nr_items, 716 int nr_items,
650 xfs_lsn_t lsn) __releases(ailp->xa_lock) 717 xfs_lsn_t lsn) __releases(ailp->xa_lock)
@@ -674,7 +741,7 @@ xfs_trans_ail_update_bulk(
674 list_add(&lip->li_ail, &tmp); 741 list_add(&lip->li_ail, &tmp);
675 } 742 }
676 743
677 xfs_ail_splice(ailp, &tmp, lsn); 744 xfs_ail_splice(ailp, cur, &tmp, lsn);
678 745
679 if (!mlip_changed) { 746 if (!mlip_changed) {
680 spin_unlock(&ailp->xa_lock); 747 spin_unlock(&ailp->xa_lock);
diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h
index 6b164e9e9a1f..c0cb40890329 100644
--- a/fs/xfs/xfs_trans_priv.h
+++ b/fs/xfs/xfs_trans_priv.h
@@ -82,6 +82,7 @@ struct xfs_ail {
82extern struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */ 82extern struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */
83 83
84void xfs_trans_ail_update_bulk(struct xfs_ail *ailp, 84void xfs_trans_ail_update_bulk(struct xfs_ail *ailp,
85 struct xfs_ail_cursor *cur,
85 struct xfs_log_item **log_items, int nr_items, 86 struct xfs_log_item **log_items, int nr_items,
86 xfs_lsn_t lsn) __releases(ailp->xa_lock); 87 xfs_lsn_t lsn) __releases(ailp->xa_lock);
87static inline void 88static inline void
@@ -90,7 +91,7 @@ xfs_trans_ail_update(
90 struct xfs_log_item *lip, 91 struct xfs_log_item *lip,
91 xfs_lsn_t lsn) __releases(ailp->xa_lock) 92 xfs_lsn_t lsn) __releases(ailp->xa_lock)
92{ 93{
93 xfs_trans_ail_update_bulk(ailp, &lip, 1, lsn); 94 xfs_trans_ail_update_bulk(ailp, NULL, &lip, 1, lsn);
94} 95}
95 96
96void xfs_trans_ail_delete_bulk(struct xfs_ail *ailp, 97void xfs_trans_ail_delete_bulk(struct xfs_ail *ailp,
@@ -111,10 +112,13 @@ xfs_lsn_t xfs_ail_min_lsn(struct xfs_ail *ailp);
111void xfs_trans_unlocked_item(struct xfs_ail *, 112void xfs_trans_unlocked_item(struct xfs_ail *,
112 xfs_log_item_t *); 113 xfs_log_item_t *);
113 114
114struct xfs_log_item *xfs_trans_ail_cursor_first(struct xfs_ail *ailp, 115struct xfs_log_item * xfs_trans_ail_cursor_first(struct xfs_ail *ailp,
115 struct xfs_ail_cursor *cur, 116 struct xfs_ail_cursor *cur,
116 xfs_lsn_t lsn); 117 xfs_lsn_t lsn);
117struct xfs_log_item *xfs_trans_ail_cursor_next(struct xfs_ail *ailp, 118struct xfs_log_item * xfs_trans_ail_cursor_last(struct xfs_ail *ailp,
119 struct xfs_ail_cursor *cur,
120 xfs_lsn_t lsn);
121struct xfs_log_item * xfs_trans_ail_cursor_next(struct xfs_ail *ailp,
118 struct xfs_ail_cursor *cur); 122 struct xfs_ail_cursor *cur);
119void xfs_trans_ail_cursor_done(struct xfs_ail *ailp, 123void xfs_trans_ail_cursor_done(struct xfs_ail *ailp,
120 struct xfs_ail_cursor *cur); 124 struct xfs_ail_cursor *cur);