diff options
-rw-r--r-- | mm/page_alloc.c | 22 | ||||
-rw-r--r-- | mm/vmscan.c | 7 |
2 files changed, 22 insertions, 7 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 6965be064a31..0a502e99ee22 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -1461,7 +1461,8 @@ __alloc_pages_internal(gfp_t gfp_mask, unsigned int order, | |||
1461 | struct task_struct *p = current; | 1461 | struct task_struct *p = current; |
1462 | int do_retry; | 1462 | int do_retry; |
1463 | int alloc_flags; | 1463 | int alloc_flags; |
1464 | int did_some_progress; | 1464 | unsigned long did_some_progress; |
1465 | unsigned long pages_reclaimed = 0; | ||
1465 | 1466 | ||
1466 | might_sleep_if(wait); | 1467 | might_sleep_if(wait); |
1467 | 1468 | ||
@@ -1611,15 +1612,26 @@ nofail_alloc: | |||
1611 | * Don't let big-order allocations loop unless the caller explicitly | 1612 | * Don't let big-order allocations loop unless the caller explicitly |
1612 | * requests that. Wait for some write requests to complete then retry. | 1613 | * requests that. Wait for some write requests to complete then retry. |
1613 | * | 1614 | * |
1614 | * In this implementation, either order <= PAGE_ALLOC_COSTLY_ORDER or | 1615 | * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER |
1615 | * __GFP_REPEAT mean __GFP_NOFAIL, but that may not be true in other | 1616 | * means __GFP_NOFAIL, but that may not be true in other |
1616 | * implementations. | 1617 | * implementations. |
1618 | * | ||
1619 | * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is | ||
1620 | * specified, then we retry until we no longer reclaim any pages | ||
1621 | * (above), or we've reclaimed an order of pages at least as | ||
1622 | * large as the allocation's order. In both cases, if the | ||
1623 | * allocation still fails, we stop retrying. | ||
1617 | */ | 1624 | */ |
1625 | pages_reclaimed += did_some_progress; | ||
1618 | do_retry = 0; | 1626 | do_retry = 0; |
1619 | if (!(gfp_mask & __GFP_NORETRY)) { | 1627 | if (!(gfp_mask & __GFP_NORETRY)) { |
1620 | if ((order <= PAGE_ALLOC_COSTLY_ORDER) || | 1628 | if (order <= PAGE_ALLOC_COSTLY_ORDER) { |
1621 | (gfp_mask & __GFP_REPEAT)) | ||
1622 | do_retry = 1; | 1629 | do_retry = 1; |
1630 | } else { | ||
1631 | if (gfp_mask & __GFP_REPEAT && | ||
1632 | pages_reclaimed < (1 << order)) | ||
1633 | do_retry = 1; | ||
1634 | } | ||
1623 | if (gfp_mask & __GFP_NOFAIL) | 1635 | if (gfp_mask & __GFP_NOFAIL) |
1624 | do_retry = 1; | 1636 | do_retry = 1; |
1625 | } | 1637 | } |
diff --git a/mm/vmscan.c b/mm/vmscan.c index eceac9f9032f..12e8627c9747 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -1299,6 +1299,9 @@ static unsigned long shrink_zones(int priority, struct zonelist *zonelist, | |||
1299 | * hope that some of these pages can be written. But if the allocating task | 1299 | * hope that some of these pages can be written. But if the allocating task |
1300 | * holds filesystem locks which prevent writeout this might not work, and the | 1300 | * holds filesystem locks which prevent writeout this might not work, and the |
1301 | * allocation attempt will fail. | 1301 | * allocation attempt will fail. |
1302 | * | ||
1303 | * returns: 0, if no pages reclaimed | ||
1304 | * else, the number of pages reclaimed | ||
1302 | */ | 1305 | */ |
1303 | static unsigned long do_try_to_free_pages(struct zonelist *zonelist, | 1306 | static unsigned long do_try_to_free_pages(struct zonelist *zonelist, |
1304 | struct scan_control *sc) | 1307 | struct scan_control *sc) |
@@ -1347,7 +1350,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, | |||
1347 | } | 1350 | } |
1348 | total_scanned += sc->nr_scanned; | 1351 | total_scanned += sc->nr_scanned; |
1349 | if (nr_reclaimed >= sc->swap_cluster_max) { | 1352 | if (nr_reclaimed >= sc->swap_cluster_max) { |
1350 | ret = 1; | 1353 | ret = nr_reclaimed; |
1351 | goto out; | 1354 | goto out; |
1352 | } | 1355 | } |
1353 | 1356 | ||
@@ -1370,7 +1373,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, | |||
1370 | } | 1373 | } |
1371 | /* top priority shrink_caches still had more to do? don't OOM, then */ | 1374 | /* top priority shrink_caches still had more to do? don't OOM, then */ |
1372 | if (!sc->all_unreclaimable && scan_global_lru(sc)) | 1375 | if (!sc->all_unreclaimable && scan_global_lru(sc)) |
1373 | ret = 1; | 1376 | ret = nr_reclaimed; |
1374 | out: | 1377 | out: |
1375 | /* | 1378 | /* |
1376 | * Now that we've scanned all the zones at this priority level, note | 1379 | * Now that we've scanned all the zones at this priority level, note |