aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWu Fengguang <fengguang.wu@intel.com>2009-09-21 20:01:42 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-22 10:17:30 -0400
commitadea02a1bea71a508da32c04d715485a1fe62029 (patch)
treec78742bbab36bf3b8d20f84b4dc6dc6585bb7cb4
parent55c37a840d9ec0ebed5c944355156d490b1ad5d1 (diff)
mm: count only reclaimable lru pages
global_lru_pages() / zone_lru_pages() can be used in two ways: - to estimate max reclaimable pages in determine_dirtyable_memory() - to calculate the slab scan ratio When swap is full or not present, the anon lru lists are not reclaimable and also won't be scanned. So the anon pages shall not be counted in both usage scenarios. Also rename to _reclaimable_pages: now they are counting the possibly reclaimable lru pages. It can greatly (and correctly) increase the slab scan rate under high memory pressure (when most file pages have been reclaimed and swap is full/absent), thus reduce false OOM kills. Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Reviewed-by: Rik van Riel <riel@redhat.com> Reviewed-by: Christoph Lameter <cl@linux-foundation.org> Reviewed-by: Minchan Kim <minchan.kim@gmail.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Wu Fengguang <fengguang.wu@intel.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Reviewed-by: Minchan Kim <minchan.kim@gmail.com> Reviewed-by: Jesse Barnes <jbarnes@virtuousgeek.org> Cc: David Howells <dhowells@redhat.com> Cc: "Li, Ming Chun" <macli@brc.ubc.ca> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/vmstat.h11
-rw-r--r--mm/page-writeback.c5
-rw-r--r--mm/vmscan.c50
3 files changed, 44 insertions, 22 deletions
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index d7f577f49d16..2d0f222388a8 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -166,15 +166,8 @@ static inline unsigned long zone_page_state(struct zone *zone,
166 return x; 166 return x;
167} 167}
168 168
169extern unsigned long global_lru_pages(void); 169extern unsigned long global_reclaimable_pages(void);
170 170extern unsigned long zone_reclaimable_pages(struct zone *zone);
171static inline unsigned long zone_lru_pages(struct zone *zone)
172{
173 return (zone_page_state(zone, NR_ACTIVE_ANON)
174 + zone_page_state(zone, NR_ACTIVE_FILE)
175 + zone_page_state(zone, NR_INACTIVE_ANON)
176 + zone_page_state(zone, NR_INACTIVE_FILE));
177}
178 171
179#ifdef CONFIG_NUMA 172#ifdef CONFIG_NUMA
180/* 173/*
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index d1ba46441053..5f378dd58802 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -380,7 +380,8 @@ static unsigned long highmem_dirtyable_memory(unsigned long total)
380 struct zone *z = 380 struct zone *z =
381 &NODE_DATA(node)->node_zones[ZONE_HIGHMEM]; 381 &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
382 382
383 x += zone_page_state(z, NR_FREE_PAGES) + zone_lru_pages(z); 383 x += zone_page_state(z, NR_FREE_PAGES) +
384 zone_reclaimable_pages(z);
384 } 385 }
385 /* 386 /*
386 * Make sure that the number of highmem pages is never larger 387 * Make sure that the number of highmem pages is never larger
@@ -404,7 +405,7 @@ unsigned long determine_dirtyable_memory(void)
404{ 405{
405 unsigned long x; 406 unsigned long x;
406 407
407 x = global_page_state(NR_FREE_PAGES) + global_lru_pages(); 408 x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();
408 409
409 if (!vm_highmem_is_dirtyable) 410 if (!vm_highmem_is_dirtyable)
410 x -= highmem_dirtyable_memory(x); 411 x -= highmem_dirtyable_memory(x);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index f90b76086ffa..208071c48bf2 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1734,7 +1734,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
1734 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 1734 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1735 continue; 1735 continue;
1736 1736
1737 lru_pages += zone_lru_pages(zone); 1737 lru_pages += zone_reclaimable_pages(zone);
1738 } 1738 }
1739 } 1739 }
1740 1740
@@ -1951,7 +1951,7 @@ loop_again:
1951 for (i = 0; i <= end_zone; i++) { 1951 for (i = 0; i <= end_zone; i++) {
1952 struct zone *zone = pgdat->node_zones + i; 1952 struct zone *zone = pgdat->node_zones + i;
1953 1953
1954 lru_pages += zone_lru_pages(zone); 1954 lru_pages += zone_reclaimable_pages(zone);
1955 } 1955 }
1956 1956
1957 /* 1957 /*
@@ -1995,7 +1995,7 @@ loop_again:
1995 if (zone_is_all_unreclaimable(zone)) 1995 if (zone_is_all_unreclaimable(zone))
1996 continue; 1996 continue;
1997 if (nr_slab == 0 && zone->pages_scanned >= 1997 if (nr_slab == 0 && zone->pages_scanned >=
1998 (zone_lru_pages(zone) * 6)) 1998 (zone_reclaimable_pages(zone) * 6))
1999 zone_set_flag(zone, 1999 zone_set_flag(zone,
2000 ZONE_ALL_UNRECLAIMABLE); 2000 ZONE_ALL_UNRECLAIMABLE);
2001 /* 2001 /*
@@ -2162,12 +2162,39 @@ void wakeup_kswapd(struct zone *zone, int order)
2162 wake_up_interruptible(&pgdat->kswapd_wait); 2162 wake_up_interruptible(&pgdat->kswapd_wait);
2163} 2163}
2164 2164
2165unsigned long global_lru_pages(void) 2165/*
2166 * The reclaimable count would be mostly accurate.
2167 * The less reclaimable pages may be
2168 * - mlocked pages, which will be moved to unevictable list when encountered
2169 * - mapped pages, which may require several travels to be reclaimed
2170 * - dirty pages, which is not "instantly" reclaimable
2171 */
2172unsigned long global_reclaimable_pages(void)
2166{ 2173{
2167 return global_page_state(NR_ACTIVE_ANON) 2174 int nr;
2168 + global_page_state(NR_ACTIVE_FILE) 2175
2169 + global_page_state(NR_INACTIVE_ANON) 2176 nr = global_page_state(NR_ACTIVE_FILE) +
2170 + global_page_state(NR_INACTIVE_FILE); 2177 global_page_state(NR_INACTIVE_FILE);
2178
2179 if (nr_swap_pages > 0)
2180 nr += global_page_state(NR_ACTIVE_ANON) +
2181 global_page_state(NR_INACTIVE_ANON);
2182
2183 return nr;
2184}
2185
2186unsigned long zone_reclaimable_pages(struct zone *zone)
2187{
2188 int nr;
2189
2190 nr = zone_page_state(zone, NR_ACTIVE_FILE) +
2191 zone_page_state(zone, NR_INACTIVE_FILE);
2192
2193 if (nr_swap_pages > 0)
2194 nr += zone_page_state(zone, NR_ACTIVE_ANON) +
2195 zone_page_state(zone, NR_INACTIVE_ANON);
2196
2197 return nr;
2171} 2198}
2172 2199
2173#ifdef CONFIG_HIBERNATION 2200#ifdef CONFIG_HIBERNATION
@@ -2239,7 +2266,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
2239 2266
2240 current->reclaim_state = &reclaim_state; 2267 current->reclaim_state = &reclaim_state;
2241 2268
2242 lru_pages = global_lru_pages(); 2269 lru_pages = global_reclaimable_pages();
2243 nr_slab = global_page_state(NR_SLAB_RECLAIMABLE); 2270 nr_slab = global_page_state(NR_SLAB_RECLAIMABLE);
2244 /* If slab caches are huge, it's better to hit them first */ 2271 /* If slab caches are huge, it's better to hit them first */
2245 while (nr_slab >= lru_pages) { 2272 while (nr_slab >= lru_pages) {
@@ -2281,7 +2308,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
2281 2308
2282 reclaim_state.reclaimed_slab = 0; 2309 reclaim_state.reclaimed_slab = 0;
2283 shrink_slab(sc.nr_scanned, sc.gfp_mask, 2310 shrink_slab(sc.nr_scanned, sc.gfp_mask,
2284 global_lru_pages()); 2311 global_reclaimable_pages());
2285 sc.nr_reclaimed += reclaim_state.reclaimed_slab; 2312 sc.nr_reclaimed += reclaim_state.reclaimed_slab;
2286 if (sc.nr_reclaimed >= nr_pages) 2313 if (sc.nr_reclaimed >= nr_pages)
2287 goto out; 2314 goto out;
@@ -2298,7 +2325,8 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
2298 if (!sc.nr_reclaimed) { 2325 if (!sc.nr_reclaimed) {
2299 do { 2326 do {
2300 reclaim_state.reclaimed_slab = 0; 2327 reclaim_state.reclaimed_slab = 0;
2301 shrink_slab(nr_pages, sc.gfp_mask, global_lru_pages()); 2328 shrink_slab(nr_pages, sc.gfp_mask,
2329 global_reclaimable_pages());
2302 sc.nr_reclaimed += reclaim_state.reclaimed_slab; 2330 sc.nr_reclaimed += reclaim_state.reclaimed_slab;
2303 } while (sc.nr_reclaimed < nr_pages && 2331 } while (sc.nr_reclaimed < nr_pages &&
2304 reclaim_state.reclaimed_slab > 0); 2332 reclaim_state.reclaimed_slab > 0);