diff options
author | Wu Fengguang <fengguang.wu@intel.com> | 2009-09-23 09:56:00 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2009-09-25 12:08:24 -0400 |
commit | 3a2e9a5a2afc1a2d2c548b8987f133235cebe933 (patch) | |
tree | cb05d2873b2701ded758a7087de5af2932a97736 /mm | |
parent | a5989bdc981ec85e0734ac22519cc0b780813d7b (diff) |
writeback: balance_dirty_pages() shall write more than dirtied pages
Some filesystem may choose to write much more than ratelimit_pages
before calling balance_dirty_pages_ratelimited_nr(). So it is safer to
determine number to write based on real number of dirtied pages.
Otherwise it is possible that
loop {
btrfs_file_write(): dirty 1024 pages
balance_dirty_pages(): write up to 48 pages (= ratelimit_pages * 1.5)
}
in which the writeback rate cannot keep up with dirty rate, and the
dirty pages go all the way beyond dirty_thresh.
The increased write_chunk may make the dirtier more bumpy.
So filesystems shall be take care not to dirty too much at
a time (eg. > 4MB) without checking the ratelimit.
Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page-writeback.c | 16 |
1 files changed, 10 insertions, 6 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 5f378dd58802..cbd4cba468bd 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -44,12 +44,15 @@ static long ratelimit_pages = 32; | |||
44 | /* | 44 | /* |
45 | * When balance_dirty_pages decides that the caller needs to perform some | 45 | * When balance_dirty_pages decides that the caller needs to perform some |
46 | * non-background writeback, this is how many pages it will attempt to write. | 46 | * non-background writeback, this is how many pages it will attempt to write. |
47 | * It should be somewhat larger than RATELIMIT_PAGES to ensure that reasonably | 47 | * It should be somewhat larger than dirtied pages to ensure that reasonably |
48 | * large amounts of I/O are submitted. | 48 | * large amounts of I/O are submitted. |
49 | */ | 49 | */ |
50 | static inline long sync_writeback_pages(void) | 50 | static inline long sync_writeback_pages(unsigned long dirtied) |
51 | { | 51 | { |
52 | return ratelimit_pages + ratelimit_pages / 2; | 52 | if (dirtied < ratelimit_pages) |
53 | dirtied = ratelimit_pages; | ||
54 | |||
55 | return dirtied + dirtied / 2; | ||
53 | } | 56 | } |
54 | 57 | ||
55 | /* The following parameters are exported via /proc/sys/vm */ | 58 | /* The following parameters are exported via /proc/sys/vm */ |
@@ -477,7 +480,8 @@ get_dirty_limits(unsigned long *pbackground, unsigned long *pdirty, | |||
477 | * If we're over `background_thresh' then pdflush is woken to perform some | 480 | * If we're over `background_thresh' then pdflush is woken to perform some |
478 | * writeout. | 481 | * writeout. |
479 | */ | 482 | */ |
480 | static void balance_dirty_pages(struct address_space *mapping) | 483 | static void balance_dirty_pages(struct address_space *mapping, |
484 | unsigned long write_chunk) | ||
481 | { | 485 | { |
482 | long nr_reclaimable, bdi_nr_reclaimable; | 486 | long nr_reclaimable, bdi_nr_reclaimable; |
483 | long nr_writeback, bdi_nr_writeback; | 487 | long nr_writeback, bdi_nr_writeback; |
@@ -485,7 +489,6 @@ static void balance_dirty_pages(struct address_space *mapping) | |||
485 | unsigned long dirty_thresh; | 489 | unsigned long dirty_thresh; |
486 | unsigned long bdi_thresh; | 490 | unsigned long bdi_thresh; |
487 | unsigned long pages_written = 0; | 491 | unsigned long pages_written = 0; |
488 | unsigned long write_chunk = sync_writeback_pages(); | ||
489 | unsigned long pause = 1; | 492 | unsigned long pause = 1; |
490 | 493 | ||
491 | struct backing_dev_info *bdi = mapping->backing_dev_info; | 494 | struct backing_dev_info *bdi = mapping->backing_dev_info; |
@@ -640,9 +643,10 @@ void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, | |||
640 | p = &__get_cpu_var(bdp_ratelimits); | 643 | p = &__get_cpu_var(bdp_ratelimits); |
641 | *p += nr_pages_dirtied; | 644 | *p += nr_pages_dirtied; |
642 | if (unlikely(*p >= ratelimit)) { | 645 | if (unlikely(*p >= ratelimit)) { |
646 | ratelimit = sync_writeback_pages(*p); | ||
643 | *p = 0; | 647 | *p = 0; |
644 | preempt_enable(); | 648 | preempt_enable(); |
645 | balance_dirty_pages(mapping); | 649 | balance_dirty_pages(mapping, ratelimit); |
646 | return; | 650 | return; |
647 | } | 651 | } |
648 | preempt_enable(); | 652 | preempt_enable(); |