diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/vmscan.c | 51 |
1 files changed, 43 insertions, 8 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 154b37a33731..ec5ddccbf82e 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -1234,6 +1234,47 @@ static noinline_for_stack void update_isolated_counts(struct zone *zone, | |||
1234 | } | 1234 | } |
1235 | 1235 | ||
1236 | /* | 1236 | /* |
1237 | * Returns true if the caller should wait to clean dirty/writeback pages. | ||
1238 | * | ||
1239 | * If we are direct reclaiming for contiguous pages and we do not reclaim | ||
1240 | * everything in the list, try again and wait for writeback IO to complete. | ||
1241 | * This will stall high-order allocations noticeably. Only do that when really | ||
1242 | * need to free the pages under high memory pressure. | ||
1243 | */ | ||
1244 | static inline bool should_reclaim_stall(unsigned long nr_taken, | ||
1245 | unsigned long nr_freed, | ||
1246 | int priority, | ||
1247 | struct scan_control *sc) | ||
1248 | { | ||
1249 | int lumpy_stall_priority; | ||
1250 | |||
1251 | /* kswapd should not stall on sync IO */ | ||
1252 | if (current_is_kswapd()) | ||
1253 | return false; | ||
1254 | |||
1255 | /* Only stall on lumpy reclaim */ | ||
1256 | if (!sc->lumpy_reclaim_mode) | ||
1257 | return false; | ||
1258 | |||
1259 | /* If we have relaimed everything on the isolated list, no stall */ | ||
1260 | if (nr_freed == nr_taken) | ||
1261 | return false; | ||
1262 | |||
1263 | /* | ||
1264 | * For high-order allocations, there are two stall thresholds. | ||
1265 | * High-cost allocations stall immediately where as lower | ||
1266 | * order allocations such as stacks require the scanning | ||
1267 | * priority to be much higher before stalling. | ||
1268 | */ | ||
1269 | if (sc->order > PAGE_ALLOC_COSTLY_ORDER) | ||
1270 | lumpy_stall_priority = DEF_PRIORITY; | ||
1271 | else | ||
1272 | lumpy_stall_priority = DEF_PRIORITY / 3; | ||
1273 | |||
1274 | return priority <= lumpy_stall_priority; | ||
1275 | } | ||
1276 | |||
1277 | /* | ||
1237 | * shrink_inactive_list() is a helper for shrink_zone(). It returns the number | 1278 | * shrink_inactive_list() is a helper for shrink_zone(). It returns the number |
1238 | * of reclaimed pages | 1279 | * of reclaimed pages |
1239 | */ | 1280 | */ |
@@ -1298,14 +1339,8 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone, | |||
1298 | 1339 | ||
1299 | nr_reclaimed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC); | 1340 | nr_reclaimed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC); |
1300 | 1341 | ||
1301 | /* | 1342 | /* Check if we should syncronously wait for writeback */ |
1302 | * If we are direct reclaiming for contiguous pages and we do | 1343 | if (should_reclaim_stall(nr_taken, nr_reclaimed, priority, sc)) { |
1303 | * not reclaim everything in the list, try again and wait | ||
1304 | * for IO to complete. This will stall high-order allocations | ||
1305 | * but that should be acceptable to the caller | ||
1306 | */ | ||
1307 | if (nr_reclaimed < nr_taken && !current_is_kswapd() && | ||
1308 | sc->lumpy_reclaim_mode) { | ||
1309 | congestion_wait(BLK_RW_ASYNC, HZ/10); | 1344 | congestion_wait(BLK_RW_ASYNC, HZ/10); |
1310 | 1345 | ||
1311 | /* | 1346 | /* |