aboutsummaryrefslogtreecommitdiffstats
path: root/fs/fs-writeback.c
diff options
context:
space:
mode:
authorWu Fengguang <fengguang.wu@intel.com>2010-07-21 22:32:30 -0400
committerWu Fengguang <fengguang.wu@intel.com>2011-06-07 20:25:21 -0400
commitba9aa8399fda48510d80c2fed1afb8fedbe1bb41 (patch)
tree477c6fc2ca327316139d8ba935e410bf528ab84e /fs/fs-writeback.c
parente6fb6da2e10682d477f2fdb749451d9fe5d168e8 (diff)
writeback: the kupdate expire timestamp should be a moving target
Dynamically compute the dirty expire timestamp at queue_io() time. writeback_control.older_than_this used to be determined at entrance to the kupdate writeback work. This _static_ timestamp may go stale if the kupdate work runs on and on. The flusher may then stuck with some old busy inodes, never considering newly expired inodes thereafter. This has two possible problems: - It is unfair for a large dirty inode to delay (for a long time) the writeback of small dirty inodes. - As time goes by, the large and busy dirty inode may contain only _freshly_ dirtied pages. Ignoring newly expired dirty inodes risks delaying the expired dirty pages to the end of LRU lists, triggering the evil pageout(). Nevertheless this patch merely addresses part of the problem. v2: keep policy changes inside wb_writeback() and keep the wbc.older_than_this visibility as suggested by Dave. CC: Dave Chinner <david@fromorbit.com> Acked-by: Jan Kara <jack@suse.cz> Acked-by: Mel Gorman <mel@csn.ul.ie> Signed-off-by: Itaru Kitayama <kitayama@cl.bb4u.ne.jp> Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
Diffstat (limited to 'fs/fs-writeback.c')
-rw-r--r--fs/fs-writeback.c11
1 files changed, 6 insertions, 5 deletions
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 271cf2150ba0..0adee7853b80 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -672,11 +672,6 @@ static long wb_writeback(struct bdi_writeback *wb,
672 long write_chunk = MAX_WRITEBACK_PAGES; 672 long write_chunk = MAX_WRITEBACK_PAGES;
673 struct inode *inode; 673 struct inode *inode;
674 674
675 if (wbc.for_kupdate) {
676 wbc.older_than_this = &oldest_jif;
677 oldest_jif = jiffies -
678 msecs_to_jiffies(dirty_expire_interval * 10);
679 }
680 if (!wbc.range_cyclic) { 675 if (!wbc.range_cyclic) {
681 wbc.range_start = 0; 676 wbc.range_start = 0;
682 wbc.range_end = LLONG_MAX; 677 wbc.range_end = LLONG_MAX;
@@ -723,6 +718,12 @@ static long wb_writeback(struct bdi_writeback *wb,
723 if (work->for_background && !over_bground_thresh()) 718 if (work->for_background && !over_bground_thresh())
724 break; 719 break;
725 720
721 if (work->for_kupdate) {
722 oldest_jif = jiffies -
723 msecs_to_jiffies(dirty_expire_interval * 10);
724 wbc.older_than_this = &oldest_jif;
725 }
726
726 wbc.more_io = 0; 727 wbc.more_io = 0;
727 wbc.nr_to_write = write_chunk; 728 wbc.nr_to_write = write_chunk;
728 wbc.pages_skipped = 0; 729 wbc.pages_skipped = 0;