aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2014-01-29 17:05:41 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-29 19:22:39 -0500
commita1c3bfb2f67ef766de03f1f56bdfff9c8595ab14 (patch)
treee06405192d674561bf2718ab03879c32103ae34e /mm/vmscan.c
parenta804552b9a15c931cfc2a92a2e0aed1add8b580a (diff)
mm/page-writeback.c: do not count anon pages as dirtyable memory
The VM is currently heavily tuned to avoid swapping. Whether that is good or bad is a separate discussion, but as long as the VM won't swap to make room for dirty cache, we can not consider anonymous pages when calculating the amount of dirtyable memory, the baseline to which dirty_background_ratio and dirty_ratio are applied. A simple workload that occupies a significant size (40+%, depending on memory layout, storage speeds etc.) of memory with anon/tmpfs pages and uses the remainder for a streaming writer demonstrates this problem. In that case, the actual cache pages are a small fraction of what is considered dirtyable overall, which results in an relatively large portion of the cache pages to be dirtied. As kswapd starts rotating these, random tasks enter direct reclaim and stall on IO. Only consider free pages and file pages dirtyable. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Reported-by: Tejun Heo <tj@kernel.org> Tested-by: Tejun Heo <tj@kernel.org> Reviewed-by: Rik van Riel <riel@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Wu Fengguang <fengguang.wu@intel.com> Reviewed-by: Michal Hocko <mhocko@suse.cz> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c23
1 files changed, 1 insertions, 22 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 90c4075d8d75..a9c74b409681 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -147,7 +147,7 @@ static bool global_reclaim(struct scan_control *sc)
147} 147}
148#endif 148#endif
149 149
150unsigned long zone_reclaimable_pages(struct zone *zone) 150static unsigned long zone_reclaimable_pages(struct zone *zone)
151{ 151{
152 int nr; 152 int nr;
153 153
@@ -3315,27 +3315,6 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
3315 wake_up_interruptible(&pgdat->kswapd_wait); 3315 wake_up_interruptible(&pgdat->kswapd_wait);
3316} 3316}
3317 3317
3318/*
3319 * The reclaimable count would be mostly accurate.
3320 * The less reclaimable pages may be
3321 * - mlocked pages, which will be moved to unevictable list when encountered
3322 * - mapped pages, which may require several travels to be reclaimed
3323 * - dirty pages, which is not "instantly" reclaimable
3324 */
3325unsigned long global_reclaimable_pages(void)
3326{
3327 int nr;
3328
3329 nr = global_page_state(NR_ACTIVE_FILE) +
3330 global_page_state(NR_INACTIVE_FILE);
3331
3332 if (get_nr_swap_pages() > 0)
3333 nr += global_page_state(NR_ACTIVE_ANON) +
3334 global_page_state(NR_INACTIVE_ANON);
3335
3336 return nr;
3337}
3338
3339#ifdef CONFIG_HIBERNATION 3318#ifdef CONFIG_HIBERNATION
3340/* 3319/*
3341 * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of 3320 * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of