aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorRik van Riel <riel@redhat.com>2009-09-21 20:01:38 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-22 10:17:29 -0400
commit35cd78156c499ef83f60605e4643d5a98fef14fd (patch)
tree67cdc9019d4b110b9d57634bd347c8dad6bf8346 /mm
parenta731286de62294b63d8ceb3c5914ac52cc17e690 (diff)
vmscan: throttle direct reclaim when too many pages are isolated already
When way too many processes go into direct reclaim, it is possible for all of the pages to be taken off the LRU. One result of this is that the next process in the page reclaim code thinks there are no reclaimable pages left and triggers an out of memory kill. One solution to this problem is to never let so many processes into the page reclaim path that the entire LRU is emptied. Limiting the system to only having half of each inactive list isolated for reclaim should be safe. Signed-off-by: Rik van Riel <riel@redhat.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/vmscan.c33
1 files changed, 33 insertions, 0 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 75c29974e878..f90b76086ffa 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1034,6 +1034,31 @@ int isolate_lru_page(struct page *page)
1034} 1034}
1035 1035
1036/* 1036/*
1037 * Are there way too many processes in the direct reclaim path already?
1038 */
1039static int too_many_isolated(struct zone *zone, int file,
1040 struct scan_control *sc)
1041{
1042 unsigned long inactive, isolated;
1043
1044 if (current_is_kswapd())
1045 return 0;
1046
1047 if (!scanning_global_lru(sc))
1048 return 0;
1049
1050 if (file) {
1051 inactive = zone_page_state(zone, NR_INACTIVE_FILE);
1052 isolated = zone_page_state(zone, NR_ISOLATED_FILE);
1053 } else {
1054 inactive = zone_page_state(zone, NR_INACTIVE_ANON);
1055 isolated = zone_page_state(zone, NR_ISOLATED_ANON);
1056 }
1057
1058 return isolated > inactive;
1059}
1060
1061/*
1037 * shrink_inactive_list() is a helper for shrink_zone(). It returns the number 1062 * shrink_inactive_list() is a helper for shrink_zone(). It returns the number
1038 * of reclaimed pages 1063 * of reclaimed pages
1039 */ 1064 */
@@ -1048,6 +1073,14 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
1048 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); 1073 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1049 int lumpy_reclaim = 0; 1074 int lumpy_reclaim = 0;
1050 1075
1076 while (unlikely(too_many_isolated(zone, file, sc))) {
1077 congestion_wait(WRITE, HZ/10);
1078
1079 /* We are about to die and free our memory. Return now. */
1080 if (fatal_signal_pending(current))
1081 return SWAP_CLUSTER_MAX;
1082 }
1083
1051 /* 1084 /*
1052 * If we need a large contiguous chunk of memory, or have 1085 * If we need a large contiguous chunk of memory, or have
1053 * trouble getting a small set of contiguous pages, we 1086 * trouble getting a small set of contiguous pages, we