aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page-writeback.c
diff options
context:
space:
mode:
authorWu Fengguang <fengguang.wu@intel.com>2009-09-21 20:01:42 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-22 10:17:30 -0400
commitadea02a1bea71a508da32c04d715485a1fe62029 (patch)
treec78742bbab36bf3b8d20f84b4dc6dc6585bb7cb4 /mm/page-writeback.c
parent55c37a840d9ec0ebed5c944355156d490b1ad5d1 (diff)
mm: count only reclaimable lru pages
global_lru_pages() / zone_lru_pages() can be used in two ways: - to estimate max reclaimable pages in determine_dirtyable_memory() - to calculate the slab scan ratio When swap is full or not present, the anon lru lists are not reclaimable and also won't be scanned. So the anon pages shall not be counted in both usage scenarios. Also rename to _reclaimable_pages: now they are counting the possibly reclaimable lru pages. It can greatly (and correctly) increase the slab scan rate under high memory pressure (when most file pages have been reclaimed and swap is full/absent), thus reduce false OOM kills. Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Reviewed-by: Rik van Riel <riel@redhat.com> Reviewed-by: Christoph Lameter <cl@linux-foundation.org> Reviewed-by: Minchan Kim <minchan.kim@gmail.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Wu Fengguang <fengguang.wu@intel.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Reviewed-by: Minchan Kim <minchan.kim@gmail.com> Reviewed-by: Jesse Barnes <jbarnes@virtuousgeek.org> Cc: David Howells <dhowells@redhat.com> Cc: "Li, Ming Chun" <macli@brc.ubc.ca> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r--mm/page-writeback.c5
1 files changed, 3 insertions, 2 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index d1ba46441053..5f378dd58802 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -380,7 +380,8 @@ static unsigned long highmem_dirtyable_memory(unsigned long total)
380 struct zone *z = 380 struct zone *z =
381 &NODE_DATA(node)->node_zones[ZONE_HIGHMEM]; 381 &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
382 382
383 x += zone_page_state(z, NR_FREE_PAGES) + zone_lru_pages(z); 383 x += zone_page_state(z, NR_FREE_PAGES) +
384 zone_reclaimable_pages(z);
384 } 385 }
385 /* 386 /*
386 * Make sure that the number of highmem pages is never larger 387 * Make sure that the number of highmem pages is never larger
@@ -404,7 +405,7 @@ unsigned long determine_dirtyable_memory(void)
404{ 405{
405 unsigned long x; 406 unsigned long x;
406 407
407 x = global_page_state(NR_FREE_PAGES) + global_lru_pages(); 408 x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();
408 409
409 if (!vm_highmem_is_dirtyable) 410 if (!vm_highmem_is_dirtyable)
410 x -= highmem_dirtyable_memory(x); 411 x -= highmem_dirtyable_memory(x);