aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_trans_ail.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/xfs_trans_ail.c')
-rw-r--r--fs/xfs/xfs_trans_ail.c88
1 files changed, 0 insertions, 88 deletions
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index 218f96861c80..8481a5a6d6c2 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -28,7 +28,6 @@
28#include "xfs_trans_priv.h" 28#include "xfs_trans_priv.h"
29#include "xfs_error.h" 29#include "xfs_error.h"
30 30
31STATIC void xfs_ail_insert(struct xfs_ail *, xfs_log_item_t *);
32STATIC void xfs_ail_splice(struct xfs_ail *, struct list_head *, xfs_lsn_t); 31STATIC void xfs_ail_splice(struct xfs_ail *, struct list_head *, xfs_lsn_t);
33STATIC void xfs_ail_delete(struct xfs_ail *, xfs_log_item_t *); 32STATIC void xfs_ail_delete(struct xfs_ail *, xfs_log_item_t *);
34STATIC xfs_log_item_t * xfs_ail_min(struct xfs_ail *); 33STATIC xfs_log_item_t * xfs_ail_min(struct xfs_ail *);
@@ -450,58 +449,6 @@ xfs_trans_unlocked_item(
450 xfs_log_move_tail(ailp->xa_mount, 1); 449 xfs_log_move_tail(ailp->xa_mount, 1);
451} /* xfs_trans_unlocked_item */ 450} /* xfs_trans_unlocked_item */
452 451
453
454/*
455 * Update the position of the item in the AIL with the new
456 * lsn. If it is not yet in the AIL, add it. Otherwise, move
457 * it to its new position by removing it and re-adding it.
458 *
459 * Wakeup anyone with an lsn less than the item's lsn. If the item
460 * we move in the AIL is the minimum one, update the tail lsn in the
461 * log manager.
462 *
463 * This function must be called with the AIL lock held. The lock
464 * is dropped before returning.
465 */
466void
467xfs_trans_ail_update(
468 struct xfs_ail *ailp,
469 xfs_log_item_t *lip,
470 xfs_lsn_t lsn) __releases(ailp->xa_lock)
471{
472 xfs_log_item_t *mlip; /* ptr to minimum lip */
473 xfs_lsn_t tail_lsn;
474
475 mlip = xfs_ail_min(ailp);
476
477 if (lip->li_flags & XFS_LI_IN_AIL) {
478 xfs_ail_delete(ailp, lip);
479 } else {
480 lip->li_flags |= XFS_LI_IN_AIL;
481 }
482
483 lip->li_lsn = lsn;
484 xfs_ail_insert(ailp, lip);
485
486 if (mlip == lip) {
487 mlip = xfs_ail_min(ailp);
488 /*
489 * It is not safe to access mlip after the AIL lock is
490 * dropped, so we must get a copy of li_lsn before we do
491 * so. This is especially important on 32-bit platforms
492 * where accessing and updating 64-bit values like li_lsn
493 * is not atomic.
494 */
495 tail_lsn = mlip->li_lsn;
496 spin_unlock(&ailp->xa_lock);
497 xfs_log_move_tail(ailp->xa_mount, tail_lsn);
498 } else {
499 spin_unlock(&ailp->xa_lock);
500 }
501
502
503} /* xfs_trans_update_ail */
504
505/* 452/*
506 * xfs_trans_ail_update - bulk AIL insertion operation. 453 * xfs_trans_ail_update - bulk AIL insertion operation.
507 * 454 *
@@ -764,41 +711,6 @@ xfs_trans_ail_destroy(
764} 711}
765 712
766/* 713/*
767 * Insert the given log item into the AIL.
768 * We almost always insert at the end of the list, so on inserts
769 * we search from the end of the list to find where the
770 * new item belongs.
771 */
772STATIC void
773xfs_ail_insert(
774 struct xfs_ail *ailp,
775 xfs_log_item_t *lip)
776{
777 xfs_log_item_t *next_lip;
778
779 /*
780 * If the list is empty, just insert the item.
781 */
782 if (list_empty(&ailp->xa_ail)) {
783 list_add(&lip->li_ail, &ailp->xa_ail);
784 return;
785 }
786
787 list_for_each_entry_reverse(next_lip, &ailp->xa_ail, li_ail) {
788 if (XFS_LSN_CMP(next_lip->li_lsn, lip->li_lsn) <= 0)
789 break;
790 }
791
792 ASSERT(&next_lip->li_ail == &ailp->xa_ail ||
793 XFS_LSN_CMP(next_lip->li_lsn, lip->li_lsn) <= 0);
794
795 list_add(&lip->li_ail, &next_lip->li_ail);
796
797 xfs_ail_check(ailp, lip);
798 return;
799}
800
801/*
802 * splice the log item list into the AIL at the given LSN. 714 * splice the log item list into the AIL at the given LSN.
803 */ 715 */
804STATIC void 716STATIC void