diff options
author | Peter Zijlstra <peterz@infradead.org> | 2007-11-14 19:59:15 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-11-14 21:45:38 -0500 |
commit | 5fce25a9df4865bdd5e3dc4853b269dc1677a02a (patch) | |
tree | 207fe2bf726bac89e402eb738b9548cfc7cae2f5 /mm | |
parent | 546040dc4872f807d40b69bed86605636082564c (diff) |
mm: speed up writeback ramp-up on clean systems
We allow violation of bdi limits if there is a lot of room on the system.
Once we hit half the total limit we start enforcing bdi limits and bdi
ramp-up should happen. Doing it this way avoids many small writeouts on an
otherwise idle system and should also speed up the ramp-up.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Reviewed-by: Fengguang Wu <wfg@mail.ustc.edu.cn>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page-writeback.c | 19 |
1 files changed, 17 insertions, 2 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 838a5e31394c..81a91e6f1f99 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -355,8 +355,8 @@ get_dirty_limits(long *pbackground, long *pdirty, long *pbdi_dirty, | |||
355 | */ | 355 | */ |
356 | static void balance_dirty_pages(struct address_space *mapping) | 356 | static void balance_dirty_pages(struct address_space *mapping) |
357 | { | 357 | { |
358 | long bdi_nr_reclaimable; | 358 | long nr_reclaimable, bdi_nr_reclaimable; |
359 | long bdi_nr_writeback; | 359 | long nr_writeback, bdi_nr_writeback; |
360 | long background_thresh; | 360 | long background_thresh; |
361 | long dirty_thresh; | 361 | long dirty_thresh; |
362 | long bdi_thresh; | 362 | long bdi_thresh; |
@@ -376,11 +376,26 @@ static void balance_dirty_pages(struct address_space *mapping) | |||
376 | 376 | ||
377 | get_dirty_limits(&background_thresh, &dirty_thresh, | 377 | get_dirty_limits(&background_thresh, &dirty_thresh, |
378 | &bdi_thresh, bdi); | 378 | &bdi_thresh, bdi); |
379 | |||
380 | nr_reclaimable = global_page_state(NR_FILE_DIRTY) + | ||
381 | global_page_state(NR_UNSTABLE_NFS); | ||
382 | nr_writeback = global_page_state(NR_WRITEBACK); | ||
383 | |||
379 | bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE); | 384 | bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE); |
380 | bdi_nr_writeback = bdi_stat(bdi, BDI_WRITEBACK); | 385 | bdi_nr_writeback = bdi_stat(bdi, BDI_WRITEBACK); |
386 | |||
381 | if (bdi_nr_reclaimable + bdi_nr_writeback <= bdi_thresh) | 387 | if (bdi_nr_reclaimable + bdi_nr_writeback <= bdi_thresh) |
382 | break; | 388 | break; |
383 | 389 | ||
390 | /* | ||
391 | * Throttle it only when the background writeback cannot | ||
392 | * catch-up. This avoids (excessively) small writeouts | ||
393 | * when the bdi limits are ramping up. | ||
394 | */ | ||
395 | if (nr_reclaimable + nr_writeback < | ||
396 | (background_thresh + dirty_thresh) / 2) | ||
397 | break; | ||
398 | |||
384 | if (!bdi->dirty_exceeded) | 399 | if (!bdi->dirty_exceeded) |
385 | bdi->dirty_exceeded = 1; | 400 | bdi->dirty_exceeded = 1; |
386 | 401 | ||