aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRik van Riel <riel@redhat.com>2008-10-18 23:26:36 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-10-20 11:50:26 -0400
commit33c120ed2843090e2bd316de1588b8bf8b96cbde (patch)
tree7a6969fd7aae85fdaa8e63a90494950d8e4a0792
parentc5fdae469a6a26cd882d7fe0aa3fbfffb6b72fc5 (diff)
more aggressively use lumpy reclaim
During an AIM7 run on a 16GB system, fork started failing around 32000 threads, despite the system having plenty of free swap and 15GB of pageable memory. This was on x86-64, so 8k stacks. If a higher order allocation fails, we can either: - keep evicting pages off the end of the LRUs and hope that we eventually create a contiguous region; this is somewhat unlikely if the system is under enough stress by new allocations - after trying normal eviction for a bit, use lumpy reclaim This patch switches the system to lumpy reclaim if the VM is having trouble freeing enough pages, using the same threshold for detection as used by pageout congestion wait. Signed-off-by: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/vmscan.c20
1 files changed, 16 insertions, 4 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 9588973849d0..a8347b677e74 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -909,7 +909,8 @@ int isolate_lru_page(struct page *page)
909 * of reclaimed pages 909 * of reclaimed pages
910 */ 910 */
911static unsigned long shrink_inactive_list(unsigned long max_scan, 911static unsigned long shrink_inactive_list(unsigned long max_scan,
912 struct zone *zone, struct scan_control *sc, int file) 912 struct zone *zone, struct scan_control *sc,
913 int priority, int file)
913{ 914{
914 LIST_HEAD(page_list); 915 LIST_HEAD(page_list);
915 struct pagevec pvec; 916 struct pagevec pvec;
@@ -927,8 +928,19 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
927 unsigned long nr_freed; 928 unsigned long nr_freed;
928 unsigned long nr_active; 929 unsigned long nr_active;
929 unsigned int count[NR_LRU_LISTS] = { 0, }; 930 unsigned int count[NR_LRU_LISTS] = { 0, };
930 int mode = (sc->order > PAGE_ALLOC_COSTLY_ORDER) ? 931 int mode = ISOLATE_INACTIVE;
931 ISOLATE_BOTH : ISOLATE_INACTIVE; 932
933 /*
934 * If we need a large contiguous chunk of memory, or have
935 * trouble getting a small set of contiguous pages, we
936 * will reclaim both active and inactive pages.
937 *
938 * We use the same threshold as pageout congestion_wait below.
939 */
940 if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
941 mode = ISOLATE_BOTH;
942 else if (sc->order && priority < DEF_PRIORITY - 2)
943 mode = ISOLATE_BOTH;
932 944
933 nr_taken = sc->isolate_pages(sc->swap_cluster_max, 945 nr_taken = sc->isolate_pages(sc->swap_cluster_max,
934 &page_list, &nr_scan, sc->order, mode, 946 &page_list, &nr_scan, sc->order, mode,
@@ -1172,7 +1184,7 @@ static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
1172 shrink_active_list(nr_to_scan, zone, sc, priority, file); 1184 shrink_active_list(nr_to_scan, zone, sc, priority, file);
1173 return 0; 1185 return 0;
1174 } 1186 }
1175 return shrink_inactive_list(nr_to_scan, zone, sc, file); 1187 return shrink_inactive_list(nr_to_scan, zone, sc, priority, file);
1176} 1188}
1177 1189
1178/* 1190/*