diff options
author | Wu Fengguang <fengguang.wu@intel.com> | 2010-10-26 17:21:45 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-26 19:52:08 -0400 |
commit | 4cbec4c8b9fda9ec784086fe7f74cd32a8adda95 (patch) | |
tree | 669c3df27982345b52d0bfca8026e3f275e64a03 /mm/page-writeback.c | |
parent | 0e093d99763eb4cea09f8ca4f1d01f34e121d10b (diff) |
writeback: remove the internal 5% low bound on dirty_ratio
The dirty_ratio was silently limited in global_dirty_limits() to >= 5%.
This is not a user expected behavior. And it's inconsistent with
calc_period_shift(), which uses the plain vm_dirty_ratio value.
Let's remove the internal bound.
At the same time, fix balance_dirty_pages() to work with the
dirty_thresh=0 case. This allows applications to proceed when
dirty+writeback pages are all cleaned.
And ">" fits with the name "exceeded" better than ">=" does. Neil thinks
it is an aesthetic improvement as well as a functional one :)
Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
Cc: Jan Kara <jack@suse.cz>
Proposed-by: Con Kolivas <kernel@kolivas.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Reviewed-by: Rik van Riel <riel@redhat.com>
Reviewed-by: Neil Brown <neilb@suse.de>
Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Michael Rubin <mrubin@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r-- | mm/page-writeback.c | 16 |
1 files changed, 5 insertions, 11 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 4dd91f7fd39f..b840afa89761 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -415,14 +415,8 @@ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty) | |||
415 | 415 | ||
416 | if (vm_dirty_bytes) | 416 | if (vm_dirty_bytes) |
417 | dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE); | 417 | dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE); |
418 | else { | 418 | else |
419 | int dirty_ratio; | 419 | dirty = (vm_dirty_ratio * available_memory) / 100; |
420 | |||
421 | dirty_ratio = vm_dirty_ratio; | ||
422 | if (dirty_ratio < 5) | ||
423 | dirty_ratio = 5; | ||
424 | dirty = (dirty_ratio * available_memory) / 100; | ||
425 | } | ||
426 | 420 | ||
427 | if (dirty_background_bytes) | 421 | if (dirty_background_bytes) |
428 | background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE); | 422 | background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE); |
@@ -510,7 +504,7 @@ static void balance_dirty_pages(struct address_space *mapping, | |||
510 | * catch-up. This avoids (excessively) small writeouts | 504 | * catch-up. This avoids (excessively) small writeouts |
511 | * when the bdi limits are ramping up. | 505 | * when the bdi limits are ramping up. |
512 | */ | 506 | */ |
513 | if (nr_reclaimable + nr_writeback < | 507 | if (nr_reclaimable + nr_writeback <= |
514 | (background_thresh + dirty_thresh) / 2) | 508 | (background_thresh + dirty_thresh) / 2) |
515 | break; | 509 | break; |
516 | 510 | ||
@@ -542,8 +536,8 @@ static void balance_dirty_pages(struct address_space *mapping, | |||
542 | * the last resort safeguard. | 536 | * the last resort safeguard. |
543 | */ | 537 | */ |
544 | dirty_exceeded = | 538 | dirty_exceeded = |
545 | (bdi_nr_reclaimable + bdi_nr_writeback >= bdi_thresh) | 539 | (bdi_nr_reclaimable + bdi_nr_writeback > bdi_thresh) |
546 | || (nr_reclaimable + nr_writeback >= dirty_thresh); | 540 | || (nr_reclaimable + nr_writeback > dirty_thresh); |
547 | 541 | ||
548 | if (!dirty_exceeded) | 542 | if (!dirty_exceeded) |
549 | break; | 543 | break; |