aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c27
1 files changed, 2 insertions, 25 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 2624edcfb420..48550c66f1f2 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1760,28 +1760,6 @@ static bool in_reclaim_compaction(struct scan_control *sc)
1760 return false; 1760 return false;
1761} 1761}
1762 1762
1763#ifdef CONFIG_COMPACTION
1764/*
1765 * If compaction is deferred for sc->order then scale the number of pages
1766 * reclaimed based on the number of consecutive allocation failures
1767 */
1768static unsigned long scale_for_compaction(unsigned long pages_for_compaction,
1769 struct lruvec *lruvec, struct scan_control *sc)
1770{
1771 struct zone *zone = lruvec_zone(lruvec);
1772
1773 if (zone->compact_order_failed <= sc->order)
1774 pages_for_compaction <<= zone->compact_defer_shift;
1775 return pages_for_compaction;
1776}
1777#else
1778static unsigned long scale_for_compaction(unsigned long pages_for_compaction,
1779 struct lruvec *lruvec, struct scan_control *sc)
1780{
1781 return pages_for_compaction;
1782}
1783#endif
1784
1785/* 1763/*
1786 * Reclaim/compaction is used for high-order allocation requests. It reclaims 1764 * Reclaim/compaction is used for high-order allocation requests. It reclaims
1787 * order-0 pages before compacting the zone. should_continue_reclaim() returns 1765 * order-0 pages before compacting the zone. should_continue_reclaim() returns
@@ -1829,9 +1807,6 @@ static inline bool should_continue_reclaim(struct lruvec *lruvec,
1829 * inactive lists are large enough, continue reclaiming 1807 * inactive lists are large enough, continue reclaiming
1830 */ 1808 */
1831 pages_for_compaction = (2UL << sc->order); 1809 pages_for_compaction = (2UL << sc->order);
1832
1833 pages_for_compaction = scale_for_compaction(pages_for_compaction,
1834 lruvec, sc);
1835 inactive_lru_pages = get_lru_size(lruvec, LRU_INACTIVE_FILE); 1810 inactive_lru_pages = get_lru_size(lruvec, LRU_INACTIVE_FILE);
1836 if (nr_swap_pages > 0) 1811 if (nr_swap_pages > 0)
1837 inactive_lru_pages += get_lru_size(lruvec, LRU_INACTIVE_ANON); 1812 inactive_lru_pages += get_lru_size(lruvec, LRU_INACTIVE_ANON);
@@ -3017,6 +2992,8 @@ static int kswapd(void *p)
3017 &balanced_classzone_idx); 2992 &balanced_classzone_idx);
3018 } 2993 }
3019 } 2994 }
2995
2996 current->reclaim_state = NULL;
3020 return 0; 2997 return 0;
3021} 2998}
3022 2999