aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>2009-06-16 18:31:40 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-16 22:47:31 -0400
commit78dc583d3ab43115579cb5f3f7bd12e3548dd5a5 (patch)
treeef8886bd9fce4bd8e4faa30bafcacd90aee54e25 /mm/vmscan.c
parentd2bf6be8ab63aa84e6149aac934649aadf3828b1 (diff)
vmscan: low order lumpy reclaim also should use PAGEOUT_IO_SYNC
Commit 33c120ed2843090e2bd316de1588b8bf8b96cbde ("more aggressively use lumpy reclaim") increased how aggressive lumpy reclaim was by isolating both active and inactive pages for asynchronous lumpy reclaim on costly-high-order pages and for cheap-high-order when memory pressure is high. However, if the system is under heavy pressure and there are dirty pages, asynchronous IO may not be sufficient to reclaim a suitable page in time. This patch causes the caller to enter synchronous lumpy reclaim for costly-high-order pages and for cheap-high-order pages when under memory pressure. Minchan.kim@gmail.com said: Andy added synchronous lumpy reclaim with c661b078fd62abe06fd11fab4ac5e4eeafe26b6d. At that time, lumpy reclaim is not agressive. His intension is just for high-order users.(above PAGE_ALLOC_COSTLY_ORDER). After some time, Rik added aggressive lumpy reclaim with 33c120ed2843090e2bd316de1588b8bf8b96cbde. His intention was to do lumpy reclaim when high-order users and trouble getting a small set of contiguous pages. So we also have to add synchronous pageout for small set of contiguous pages. Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com> Cc: Andy Whitcroft <apw@shadowen.org> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Rik van Riel <riel@redhat.com> Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Reviewed-by: Minchan Kim <Minchan.kim@gmail.com> Reviewed-by: Mel Gorman <mel@csn.ul.ie> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c29
1 files changed, 15 insertions, 14 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 95c08a8cc2ba..a6b7d14812e6 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1061,6 +1061,19 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
1061 unsigned long nr_scanned = 0; 1061 unsigned long nr_scanned = 0;
1062 unsigned long nr_reclaimed = 0; 1062 unsigned long nr_reclaimed = 0;
1063 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); 1063 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1064 int lumpy_reclaim = 0;
1065
1066 /*
1067 * If we need a large contiguous chunk of memory, or have
1068 * trouble getting a small set of contiguous pages, we
1069 * will reclaim both active and inactive pages.
1070 *
1071 * We use the same threshold as pageout congestion_wait below.
1072 */
1073 if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
1074 lumpy_reclaim = 1;
1075 else if (sc->order && priority < DEF_PRIORITY - 2)
1076 lumpy_reclaim = 1;
1064 1077
1065 pagevec_init(&pvec, 1); 1078 pagevec_init(&pvec, 1);
1066 1079
@@ -1073,19 +1086,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
1073 unsigned long nr_freed; 1086 unsigned long nr_freed;
1074 unsigned long nr_active; 1087 unsigned long nr_active;
1075 unsigned int count[NR_LRU_LISTS] = { 0, }; 1088 unsigned int count[NR_LRU_LISTS] = { 0, };
1076 int mode = ISOLATE_INACTIVE; 1089 int mode = lumpy_reclaim ? ISOLATE_BOTH : ISOLATE_INACTIVE;
1077
1078 /*
1079 * If we need a large contiguous chunk of memory, or have
1080 * trouble getting a small set of contiguous pages, we
1081 * will reclaim both active and inactive pages.
1082 *
1083 * We use the same threshold as pageout congestion_wait below.
1084 */
1085 if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
1086 mode = ISOLATE_BOTH;
1087 else if (sc->order && priority < DEF_PRIORITY - 2)
1088 mode = ISOLATE_BOTH;
1089 1090
1090 nr_taken = sc->isolate_pages(sc->swap_cluster_max, 1091 nr_taken = sc->isolate_pages(sc->swap_cluster_max,
1091 &page_list, &nr_scan, sc->order, mode, 1092 &page_list, &nr_scan, sc->order, mode,
@@ -1122,7 +1123,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
1122 * but that should be acceptable to the caller 1123 * but that should be acceptable to the caller
1123 */ 1124 */
1124 if (nr_freed < nr_taken && !current_is_kswapd() && 1125 if (nr_freed < nr_taken && !current_is_kswapd() &&
1125 sc->order > PAGE_ALLOC_COSTLY_ORDER) { 1126 lumpy_reclaim) {
1126 congestion_wait(WRITE, HZ/10); 1127 congestion_wait(WRITE, HZ/10);
1127 1128
1128 /* 1129 /*