aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2012-10-08 19:29:11 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-09 03:22:20 -0400
commit83fde0f22872aa8c1d46f775cc7bdfa864499e65 (patch)
treee052edc57463924675da2c665b9fa2f2020d62c3 /mm
parent4ffb6335da87b51c17e7ff6495170785f21558dd (diff)
mm: vmscan: scale number of pages reclaimed by reclaim/compaction based on failures
If allocation fails after compaction then compaction may be deferred for a number of allocation attempts. If there are subsequent failures, compact_defer_shift is increased to defer for longer periods. This patch uses that information to scale the number of pages reclaimed with compact_defer_shift until allocations succeed again. The rationale is that reclaiming the normal number of pages still allowed compaction to fail and its success depends on the number of pages. If it's failing, reclaim more pages until it succeeds again. Note that this is not implying that VM reclaim is not reclaiming enough pages or that its logic is broken. try_to_free_pages() always asks for SWAP_CLUSTER_MAX pages to be reclaimed regardless of order and that is what it does. Direct reclaim stops normally with this check. if (sc->nr_reclaimed >= sc->nr_to_reclaim) goto out; should_continue_reclaim delays when that check is made until a minimum number of pages for reclaim/compaction are reclaimed. It is possible that this patch could instead set nr_to_reclaim in try_to_free_pages() and drive it from there but that's behaves differently and not necessarily for the better. If driven from do_try_to_free_pages(), it is also possible that priorities will rise. When they reach DEF_PRIORITY-2, it will also start stalling and setting pages for immediate reclaim which is more disruptive than not desirable in this case. That is a more wide-reaching change that could cause another regression related to THP requests causing interactive jitter. [akpm@linux-foundation.org: fix build] Signed-off-by: Mel Gorman <mgorman@suse.de> Acked-by: Rik van Riel <riel@redhat.com> Reviewed-by: Minchan Kim <minchan@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/vmscan.c25
1 files changed, 25 insertions, 0 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 99b434b674c0..f6fdae7a8d4c 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1729,6 +1729,28 @@ static bool in_reclaim_compaction(struct scan_control *sc)
1729 return false; 1729 return false;
1730} 1730}
1731 1731
1732#ifdef CONFIG_COMPACTION
1733/*
1734 * If compaction is deferred for sc->order then scale the number of pages
1735 * reclaimed based on the number of consecutive allocation failures
1736 */
1737static unsigned long scale_for_compaction(unsigned long pages_for_compaction,
1738 struct lruvec *lruvec, struct scan_control *sc)
1739{
1740 struct zone *zone = lruvec_zone(lruvec);
1741
1742 if (zone->compact_order_failed <= sc->order)
1743 pages_for_compaction <<= zone->compact_defer_shift;
1744 return pages_for_compaction;
1745}
1746#else
1747static unsigned long scale_for_compaction(unsigned long pages_for_compaction,
1748 struct lruvec *lruvec, struct scan_control *sc)
1749{
1750 return pages_for_compaction;
1751}
1752#endif
1753
1732/* 1754/*
1733 * Reclaim/compaction is used for high-order allocation requests. It reclaims 1755 * Reclaim/compaction is used for high-order allocation requests. It reclaims
1734 * order-0 pages before compacting the zone. should_continue_reclaim() returns 1756 * order-0 pages before compacting the zone. should_continue_reclaim() returns
@@ -1776,6 +1798,9 @@ static inline bool should_continue_reclaim(struct lruvec *lruvec,
1776 * inactive lists are large enough, continue reclaiming 1798 * inactive lists are large enough, continue reclaiming
1777 */ 1799 */
1778 pages_for_compaction = (2UL << sc->order); 1800 pages_for_compaction = (2UL << sc->order);
1801
1802 pages_for_compaction = scale_for_compaction(pages_for_compaction,
1803 lruvec, sc);
1779 inactive_lru_pages = get_lru_size(lruvec, LRU_INACTIVE_FILE); 1804 inactive_lru_pages = get_lru_size(lruvec, LRU_INACTIVE_FILE);
1780 if (nr_swap_pages > 0) 1805 if (nr_swap_pages > 0)
1781 inactive_lru_pages += get_lru_size(lruvec, LRU_INACTIVE_ANON); 1806 inactive_lru_pages += get_lru_size(lruvec, LRU_INACTIVE_ANON);