diff options
author | Mel Gorman <mel@csn.ul.ie> | 2010-08-09 20:19:30 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-09 23:45:00 -0400 |
commit | 666356297ec4e9e6594c6008803f2b1403ff7950 (patch) | |
tree | aaa1e60f81588d0d90c2279c9812a32e5a085a27 | |
parent | d4debc66d1fc1b98a68081c4c8156f171841dca8 (diff) |
vmscan: set up pagevec as late as possible in shrink_inactive_list()
shrink_inactive_list() sets up a pagevec to release unfreeable pages. It
uses significant amounts of stack doing this. This patch splits
shrink_inactive_list() to take the stack usage out of the main path so
that callers to writepage() do not contain an unused pagevec on the stack.
Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Reviewed-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Chris Mason <chris.mason@oracle.com>
Cc: Nick Piggin <npiggin@suse.de>
Cc: Rik van Riel <riel@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Michael Rubin <mrubin@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | mm/vmscan.c | 99 |
1 files changed, 56 insertions, 43 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 7f25f336551a..12b692164bcc 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -1133,19 +1133,65 @@ static int too_many_isolated(struct zone *zone, int file, | |||
1133 | } | 1133 | } |
1134 | 1134 | ||
1135 | /* | 1135 | /* |
1136 | * TODO: Try merging with migrations version of putback_lru_pages | ||
1137 | */ | ||
1138 | static noinline_for_stack void | ||
1139 | putback_lru_pages(struct zone *zone, struct zone_reclaim_stat *reclaim_stat, | ||
1140 | unsigned long nr_anon, unsigned long nr_file, | ||
1141 | struct list_head *page_list) | ||
1142 | { | ||
1143 | struct page *page; | ||
1144 | struct pagevec pvec; | ||
1145 | |||
1146 | pagevec_init(&pvec, 1); | ||
1147 | |||
1148 | /* | ||
1149 | * Put back any unfreeable pages. | ||
1150 | */ | ||
1151 | spin_lock(&zone->lru_lock); | ||
1152 | while (!list_empty(page_list)) { | ||
1153 | int lru; | ||
1154 | page = lru_to_page(page_list); | ||
1155 | VM_BUG_ON(PageLRU(page)); | ||
1156 | list_del(&page->lru); | ||
1157 | if (unlikely(!page_evictable(page, NULL))) { | ||
1158 | spin_unlock_irq(&zone->lru_lock); | ||
1159 | putback_lru_page(page); | ||
1160 | spin_lock_irq(&zone->lru_lock); | ||
1161 | continue; | ||
1162 | } | ||
1163 | SetPageLRU(page); | ||
1164 | lru = page_lru(page); | ||
1165 | add_page_to_lru_list(zone, page, lru); | ||
1166 | if (is_active_lru(lru)) { | ||
1167 | int file = is_file_lru(lru); | ||
1168 | reclaim_stat->recent_rotated[file]++; | ||
1169 | } | ||
1170 | if (!pagevec_add(&pvec, page)) { | ||
1171 | spin_unlock_irq(&zone->lru_lock); | ||
1172 | __pagevec_release(&pvec); | ||
1173 | spin_lock_irq(&zone->lru_lock); | ||
1174 | } | ||
1175 | } | ||
1176 | __mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon); | ||
1177 | __mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file); | ||
1178 | |||
1179 | spin_unlock_irq(&zone->lru_lock); | ||
1180 | pagevec_release(&pvec); | ||
1181 | } | ||
1182 | |||
1183 | /* | ||
1136 | * shrink_inactive_list() is a helper for shrink_zone(). It returns the number | 1184 | * shrink_inactive_list() is a helper for shrink_zone(). It returns the number |
1137 | * of reclaimed pages | 1185 | * of reclaimed pages |
1138 | */ | 1186 | */ |
1139 | static unsigned long shrink_inactive_list(unsigned long nr_to_scan, | 1187 | static noinline_for_stack unsigned long |
1140 | struct zone *zone, struct scan_control *sc, | 1188 | shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone, |
1141 | int priority, int file) | 1189 | struct scan_control *sc, int priority, int file) |
1142 | { | 1190 | { |
1143 | LIST_HEAD(page_list); | 1191 | LIST_HEAD(page_list); |
1144 | struct pagevec pvec; | ||
1145 | unsigned long nr_scanned; | 1192 | unsigned long nr_scanned; |
1146 | unsigned long nr_reclaimed = 0; | 1193 | unsigned long nr_reclaimed = 0; |
1147 | struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); | 1194 | struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); |
1148 | struct page *page; | ||
1149 | unsigned long nr_taken; | 1195 | unsigned long nr_taken; |
1150 | unsigned long nr_active; | 1196 | unsigned long nr_active; |
1151 | unsigned int count[NR_LRU_LISTS] = { 0, }; | 1197 | unsigned int count[NR_LRU_LISTS] = { 0, }; |
@@ -1161,8 +1207,6 @@ static unsigned long shrink_inactive_list(unsigned long nr_to_scan, | |||
1161 | } | 1207 | } |
1162 | 1208 | ||
1163 | 1209 | ||
1164 | pagevec_init(&pvec, 1); | ||
1165 | |||
1166 | lru_add_drain(); | 1210 | lru_add_drain(); |
1167 | spin_lock_irq(&zone->lru_lock); | 1211 | spin_lock_irq(&zone->lru_lock); |
1168 | 1212 | ||
@@ -1192,8 +1236,10 @@ static unsigned long shrink_inactive_list(unsigned long nr_to_scan, | |||
1192 | */ | 1236 | */ |
1193 | } | 1237 | } |
1194 | 1238 | ||
1195 | if (nr_taken == 0) | 1239 | if (nr_taken == 0) { |
1196 | goto done; | 1240 | spin_unlock_irq(&zone->lru_lock); |
1241 | return 0; | ||
1242 | } | ||
1197 | 1243 | ||
1198 | nr_active = clear_active_flags(&page_list, count); | 1244 | nr_active = clear_active_flags(&page_list, count); |
1199 | __count_vm_events(PGDEACTIVATE, nr_active); | 1245 | __count_vm_events(PGDEACTIVATE, nr_active); |
@@ -1244,40 +1290,7 @@ static unsigned long shrink_inactive_list(unsigned long nr_to_scan, | |||
1244 | __count_vm_events(KSWAPD_STEAL, nr_reclaimed); | 1290 | __count_vm_events(KSWAPD_STEAL, nr_reclaimed); |
1245 | __count_zone_vm_events(PGSTEAL, zone, nr_reclaimed); | 1291 | __count_zone_vm_events(PGSTEAL, zone, nr_reclaimed); |
1246 | 1292 | ||
1247 | spin_lock(&zone->lru_lock); | 1293 | putback_lru_pages(zone, reclaim_stat, nr_anon, nr_file, &page_list); |
1248 | /* | ||
1249 | * Put back any unfreeable pages. | ||
1250 | */ | ||
1251 | while (!list_empty(&page_list)) { | ||
1252 | int lru; | ||
1253 | page = lru_to_page(&page_list); | ||
1254 | VM_BUG_ON(PageLRU(page)); | ||
1255 | list_del(&page->lru); | ||
1256 | if (unlikely(!page_evictable(page, NULL))) { | ||
1257 | spin_unlock_irq(&zone->lru_lock); | ||
1258 | putback_lru_page(page); | ||
1259 | spin_lock_irq(&zone->lru_lock); | ||
1260 | continue; | ||
1261 | } | ||
1262 | SetPageLRU(page); | ||
1263 | lru = page_lru(page); | ||
1264 | add_page_to_lru_list(zone, page, lru); | ||
1265 | if (is_active_lru(lru)) { | ||
1266 | int file = is_file_lru(lru); | ||
1267 | reclaim_stat->recent_rotated[file]++; | ||
1268 | } | ||
1269 | if (!pagevec_add(&pvec, page)) { | ||
1270 | spin_unlock_irq(&zone->lru_lock); | ||
1271 | __pagevec_release(&pvec); | ||
1272 | spin_lock_irq(&zone->lru_lock); | ||
1273 | } | ||
1274 | } | ||
1275 | __mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon); | ||
1276 | __mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file); | ||
1277 | |||
1278 | done: | ||
1279 | spin_unlock_irq(&zone->lru_lock); | ||
1280 | pagevec_release(&pvec); | ||
1281 | return nr_reclaimed; | 1294 | return nr_reclaimed; |
1282 | } | 1295 | } |
1283 | 1296 | ||