diff options
author | Dave Chinner <david@fromorbit.com> | 2010-02-01 18:13:42 -0500 |
---|---|---|
committer | Dave Chinner <david@fromorbit.com> | 2010-02-01 18:13:42 -0500 |
commit | d808f617ad00a413585b806de340feda5ad9a2da (patch) | |
tree | ed03d4d019a9d8b566ffd454e112e9fbce70bad8 /fs/xfs/linux-2.6/xfs_buf.c | |
parent | c854363e80b49dd04a4de18ebc379eb8c8806674 (diff) |
xfs: Don't issue buffer IO direct from AIL push V2
All buffers logged into the AIL are marked as delayed write.
When the AIL needs to push the buffer out, it issues an async write of the
buffer. This means that IO patterns are dependent on the order of
buffers in the AIL.
Instead of flushing the buffer, promote the buffer in the delayed
write list so that the next time the xfsbufd is run the buffer will
be flushed by the xfsbufd. Return the state to the xfsaild that the
buffer was promoted so that the xfsaild knows that it needs to cause
the xfsbufd to run to flush the buffers that were promoted.
Using the xfsbufd for issuing the IO allows us to dispatch all
buffer IO from the one queue. This means that we can make much more
enlightened decisions on what order to flush buffers to disk as
we don't have multiple places issuing IO. Optimisations to xfsbufd
will be in a future patch.
Version 2
- kill XFS_ITEM_FLUSHING as it is now unused.
Signed-off-by: Dave Chinner <david@fromorbit.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'fs/xfs/linux-2.6/xfs_buf.c')
-rw-r--r-- | fs/xfs/linux-2.6/xfs_buf.c | 29 |
1 files changed, 29 insertions, 0 deletions
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index 44e20e578ba0..b306265caa33 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c | |||
@@ -1778,6 +1778,35 @@ xfs_buf_delwri_dequeue( | |||
1778 | trace_xfs_buf_delwri_dequeue(bp, _RET_IP_); | 1778 | trace_xfs_buf_delwri_dequeue(bp, _RET_IP_); |
1779 | } | 1779 | } |
1780 | 1780 | ||
1781 | /* | ||
1782 | * If a delwri buffer needs to be pushed before it has aged out, then promote | ||
1783 | * it to the head of the delwri queue so that it will be flushed on the next | ||
1784 | * xfsbufd run. We do this by resetting the queuetime of the buffer to be older | ||
1785 | * than the age currently needed to flush the buffer. Hence the next time the | ||
1786 | * xfsbufd sees it is guaranteed to be considered old enough to flush. | ||
1787 | */ | ||
1788 | void | ||
1789 | xfs_buf_delwri_promote( | ||
1790 | struct xfs_buf *bp) | ||
1791 | { | ||
1792 | struct xfs_buftarg *btp = bp->b_target; | ||
1793 | long age = xfs_buf_age_centisecs * msecs_to_jiffies(10) + 1; | ||
1794 | |||
1795 | ASSERT(bp->b_flags & XBF_DELWRI); | ||
1796 | ASSERT(bp->b_flags & _XBF_DELWRI_Q); | ||
1797 | |||
1798 | /* | ||
1799 | * Check the buffer age before locking the delayed write queue as we | ||
1800 | * don't need to promote buffers that are already past the flush age. | ||
1801 | */ | ||
1802 | if (bp->b_queuetime < jiffies - age) | ||
1803 | return; | ||
1804 | bp->b_queuetime = jiffies - age; | ||
1805 | spin_lock(&btp->bt_delwrite_lock); | ||
1806 | list_move(&bp->b_list, &btp->bt_delwrite_queue); | ||
1807 | spin_unlock(&btp->bt_delwrite_lock); | ||
1808 | } | ||
1809 | |||
1781 | STATIC void | 1810 | STATIC void |
1782 | xfs_buf_runall_queues( | 1811 | xfs_buf_runall_queues( |
1783 | struct workqueue_struct *queue) | 1812 | struct workqueue_struct *queue) |