aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWu Fengguang <fengguang.wu@intel.com>2009-09-23 08:33:40 -0400
committerJens Axboe <jens.axboe@oracle.com>2009-09-25 12:08:24 -0400
commitd3ddec7635b6fb37cb49e3553bdeea59642be653 (patch)
tree662a1ea9c0eac6aa9bb07d4f67b6aad84709600c
parent3a2e9a5a2afc1a2d2c548b8987f133235cebe933 (diff)
writeback: stop background writeback when below background threshold
Treat bdi_start_writeback(0) as a special request to do background write, and stop such work when we are below the background dirty threshold. Also simplify the (nr_pages <= 0) checks. Since we already pass in nr_pages=LONG_MAX for WB_SYNC_ALL and background writes, we don't need to worry about it being decreased to zero. Reported-by: Richard Kennedy <richard@rsk.demon.co.uk> CC: Jan Kara <jack@suse.cz> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Wu Fengguang <fengguang.wu@intel.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r--fs/fs-writeback.c28
-rw-r--r--mm/page-writeback.c6
2 files changed, 20 insertions, 14 deletions
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index c59d6737036c..476be9b10881 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -41,8 +41,9 @@ struct wb_writeback_args {
41 long nr_pages; 41 long nr_pages;
42 struct super_block *sb; 42 struct super_block *sb;
43 enum writeback_sync_modes sync_mode; 43 enum writeback_sync_modes sync_mode;
44 int for_kupdate; 44 int for_kupdate:1;
45 int range_cyclic; 45 int range_cyclic:1;
46 int for_background:1;
46}; 47};
47 48
48/* 49/*
@@ -257,6 +258,15 @@ void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages)
257 .range_cyclic = 1, 258 .range_cyclic = 1,
258 }; 259 };
259 260
261 /*
262 * We treat @nr_pages=0 as the special case to do background writeback,
263 * ie. to sync pages until the background dirty threshold is reached.
264 */
265 if (!nr_pages) {
266 args.nr_pages = LONG_MAX;
267 args.for_background = 1;
268 }
269
260 bdi_alloc_queue_work(bdi, &args); 270 bdi_alloc_queue_work(bdi, &args);
261} 271}
262 272
@@ -720,20 +730,16 @@ static long wb_writeback(struct bdi_writeback *wb,
720 730
721 for (;;) { 731 for (;;) {
722 /* 732 /*
723 * Don't flush anything for non-integrity writeback where 733 * Stop writeback when nr_pages has been consumed
724 * no nr_pages was given
725 */ 734 */
726 if (!args->for_kupdate && args->nr_pages <= 0 && 735 if (args->nr_pages <= 0)
727 args->sync_mode == WB_SYNC_NONE)
728 break; 736 break;
729 737
730 /* 738 /*
731 * If no specific pages were given and this is just a 739 * For background writeout, stop when we are below the
732 * periodic background writeout and we are below the 740 * background dirty threshold
733 * background dirty threshold, don't do anything
734 */ 741 */
735 if (args->for_kupdate && args->nr_pages <= 0 && 742 if (args->for_background && !over_bground_thresh())
736 !over_bground_thresh())
737 break; 743 break;
738 744
739 wbc.more_io = 0; 745 wbc.more_io = 0;
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index cbd4cba468bd..3c78fc316202 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -593,10 +593,10 @@ static void balance_dirty_pages(struct address_space *mapping,
593 * background_thresh, to keep the amount of dirty memory low. 593 * background_thresh, to keep the amount of dirty memory low.
594 */ 594 */
595 if ((laptop_mode && pages_written) || 595 if ((laptop_mode && pages_written) ||
596 (!laptop_mode && ((nr_writeback = global_page_state(NR_FILE_DIRTY) 596 (!laptop_mode && ((global_page_state(NR_FILE_DIRTY)
597 + global_page_state(NR_UNSTABLE_NFS)) 597 + global_page_state(NR_UNSTABLE_NFS))
598 > background_thresh))) 598 > background_thresh)))
599 bdi_start_writeback(bdi, nr_writeback); 599 bdi_start_writeback(bdi, 0);
600} 600}
601 601
602void set_page_dirty_balance(struct page *page, int page_mkwrite) 602void set_page_dirty_balance(struct page *page, int page_mkwrite)