aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMel Gorman <mel@csn.ul.ie>2010-08-09 20:19:33 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-08-09 23:45:00 -0400
commit1489fa14cb757b496c8fa2b63097dbcee6690695 (patch)
tree288905eab717db3cf1f6b7419e1989e89411ce04
parentabe4c3b50c3f25cb1baf56036024860f12f96015 (diff)
vmscan: update isolated page counters outside of main path in shrink_inactive_list()
When shrink_inactive_list() isolates pages, it updates a number of counters using temporary variables to gather them. These consume stack and it's in the main path that calls ->writepage(). This patch moves the accounting updates outside of the main path to reduce stack usage. Signed-off-by: Mel Gorman <mel@csn.ul.ie> Reviewed-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Rik van Riel <riel@redhat.com> Cc: Dave Chinner <david@fromorbit.com> Cc: Chris Mason <chris.mason@oracle.com> Cc: Nick Piggin <npiggin@suse.de> Cc: Rik van Riel <riel@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Christoph Hellwig <hch@infradead.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Michael Rubin <mrubin@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/vmscan.c63
1 files changed, 38 insertions, 25 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 512f4630ba8c..1c3d960de9d2 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1076,7 +1076,8 @@ static unsigned long clear_active_flags(struct list_head *page_list,
1076 ClearPageActive(page); 1076 ClearPageActive(page);
1077 nr_active++; 1077 nr_active++;
1078 } 1078 }
1079 count[lru]++; 1079 if (count)
1080 count[lru]++;
1080 } 1081 }
1081 1082
1082 return nr_active; 1083 return nr_active;
@@ -1156,12 +1157,13 @@ static int too_many_isolated(struct zone *zone, int file,
1156 * TODO: Try merging with migrations version of putback_lru_pages 1157 * TODO: Try merging with migrations version of putback_lru_pages
1157 */ 1158 */
1158static noinline_for_stack void 1159static noinline_for_stack void
1159putback_lru_pages(struct zone *zone, struct zone_reclaim_stat *reclaim_stat, 1160putback_lru_pages(struct zone *zone, struct scan_control *sc,
1160 unsigned long nr_anon, unsigned long nr_file, 1161 unsigned long nr_anon, unsigned long nr_file,
1161 struct list_head *page_list) 1162 struct list_head *page_list)
1162{ 1163{
1163 struct page *page; 1164 struct page *page;
1164 struct pagevec pvec; 1165 struct pagevec pvec;
1166 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1165 1167
1166 pagevec_init(&pvec, 1); 1168 pagevec_init(&pvec, 1);
1167 1169
@@ -1200,6 +1202,37 @@ putback_lru_pages(struct zone *zone, struct zone_reclaim_stat *reclaim_stat,
1200 pagevec_release(&pvec); 1202 pagevec_release(&pvec);
1201} 1203}
1202 1204
1205static noinline_for_stack void update_isolated_counts(struct zone *zone,
1206 struct scan_control *sc,
1207 unsigned long *nr_anon,
1208 unsigned long *nr_file,
1209 struct list_head *isolated_list)
1210{
1211 unsigned long nr_active;
1212 unsigned int count[NR_LRU_LISTS] = { 0, };
1213 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1214
1215 nr_active = clear_active_flags(isolated_list, count);
1216 __count_vm_events(PGDEACTIVATE, nr_active);
1217
1218 __mod_zone_page_state(zone, NR_ACTIVE_FILE,
1219 -count[LRU_ACTIVE_FILE]);
1220 __mod_zone_page_state(zone, NR_INACTIVE_FILE,
1221 -count[LRU_INACTIVE_FILE]);
1222 __mod_zone_page_state(zone, NR_ACTIVE_ANON,
1223 -count[LRU_ACTIVE_ANON]);
1224 __mod_zone_page_state(zone, NR_INACTIVE_ANON,
1225 -count[LRU_INACTIVE_ANON]);
1226
1227 *nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON];
1228 *nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE];
1229 __mod_zone_page_state(zone, NR_ISOLATED_ANON, *nr_anon);
1230 __mod_zone_page_state(zone, NR_ISOLATED_FILE, *nr_file);
1231
1232 reclaim_stat->recent_scanned[0] += *nr_anon;
1233 reclaim_stat->recent_scanned[1] += *nr_file;
1234}
1235
1203/* 1236/*
1204 * shrink_inactive_list() is a helper for shrink_zone(). It returns the number 1237 * shrink_inactive_list() is a helper for shrink_zone(). It returns the number
1205 * of reclaimed pages 1238 * of reclaimed pages
@@ -1211,10 +1244,8 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
1211 LIST_HEAD(page_list); 1244 LIST_HEAD(page_list);
1212 unsigned long nr_scanned; 1245 unsigned long nr_scanned;
1213 unsigned long nr_reclaimed = 0; 1246 unsigned long nr_reclaimed = 0;
1214 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1215 unsigned long nr_taken; 1247 unsigned long nr_taken;
1216 unsigned long nr_active; 1248 unsigned long nr_active;
1217 unsigned int count[NR_LRU_LISTS] = { 0, };
1218 unsigned long nr_anon; 1249 unsigned long nr_anon;
1219 unsigned long nr_file; 1250 unsigned long nr_file;
1220 1251
@@ -1261,25 +1292,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
1261 return 0; 1292 return 0;
1262 } 1293 }
1263 1294
1264 nr_active = clear_active_flags(&page_list, count); 1295 update_isolated_counts(zone, sc, &nr_anon, &nr_file, &page_list);
1265 __count_vm_events(PGDEACTIVATE, nr_active);
1266
1267 __mod_zone_page_state(zone, NR_ACTIVE_FILE,
1268 -count[LRU_ACTIVE_FILE]);
1269 __mod_zone_page_state(zone, NR_INACTIVE_FILE,
1270 -count[LRU_INACTIVE_FILE]);
1271 __mod_zone_page_state(zone, NR_ACTIVE_ANON,
1272 -count[LRU_ACTIVE_ANON]);
1273 __mod_zone_page_state(zone, NR_INACTIVE_ANON,
1274 -count[LRU_INACTIVE_ANON]);
1275
1276 nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON];
1277 nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE];
1278 __mod_zone_page_state(zone, NR_ISOLATED_ANON, nr_anon);
1279 __mod_zone_page_state(zone, NR_ISOLATED_FILE, nr_file);
1280
1281 reclaim_stat->recent_scanned[0] += nr_anon;
1282 reclaim_stat->recent_scanned[1] += nr_file;
1283 1296
1284 spin_unlock_irq(&zone->lru_lock); 1297 spin_unlock_irq(&zone->lru_lock);
1285 1298
@@ -1299,7 +1312,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
1299 * The attempt at page out may have made some 1312 * The attempt at page out may have made some
1300 * of the pages active, mark them inactive again. 1313 * of the pages active, mark them inactive again.
1301 */ 1314 */
1302 nr_active = clear_active_flags(&page_list, count); 1315 nr_active = clear_active_flags(&page_list, NULL);
1303 count_vm_events(PGDEACTIVATE, nr_active); 1316 count_vm_events(PGDEACTIVATE, nr_active);
1304 1317
1305 nr_reclaimed += shrink_page_list(&page_list, sc, PAGEOUT_IO_SYNC); 1318 nr_reclaimed += shrink_page_list(&page_list, sc, PAGEOUT_IO_SYNC);
@@ -1310,7 +1323,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
1310 __count_vm_events(KSWAPD_STEAL, nr_reclaimed); 1323 __count_vm_events(KSWAPD_STEAL, nr_reclaimed);
1311 __count_zone_vm_events(PGSTEAL, zone, nr_reclaimed); 1324 __count_zone_vm_events(PGSTEAL, zone, nr_reclaimed);
1312 1325
1313 putback_lru_pages(zone, reclaim_stat, nr_anon, nr_file, &page_list); 1326 putback_lru_pages(zone, sc, nr_anon, nr_file, &page_list);
1314 return nr_reclaimed; 1327 return nr_reclaimed;
1315} 1328}
1316 1329