aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorKonstantin Khlebnikov <khlebnikov@openvz.org>2012-05-29 18:07:02 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-05-29 19:22:27 -0400
commit90bdcfafdc660b359018262f0f8630d100e28760 (patch)
treef88bc1668a2f5dc9558a5498359ff295d90d792c /mm
parent90126375d89ab8e0bde30ff22139b6097d56ed8a (diff)
mm/vmscan: push lruvec pointer into should_continue_reclaim()
Signed-off-by: Konstantin Khlebnikov <khlebnikov@openvz.org> Cc: Mel Gorman <mel@csn.ul.ie> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/vmscan.c8
1 files changed, 3 insertions, 5 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b139ad7f396e..1d251b5b0a06 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1708,14 +1708,13 @@ static bool in_reclaim_compaction(struct scan_control *sc)
1708 * calls try_to_compact_zone() that it will have enough free pages to succeed. 1708 * calls try_to_compact_zone() that it will have enough free pages to succeed.
1709 * It will give up earlier than that if there is difficulty reclaiming pages. 1709 * It will give up earlier than that if there is difficulty reclaiming pages.
1710 */ 1710 */
1711static inline bool should_continue_reclaim(struct mem_cgroup_zone *mz, 1711static inline bool should_continue_reclaim(struct lruvec *lruvec,
1712 unsigned long nr_reclaimed, 1712 unsigned long nr_reclaimed,
1713 unsigned long nr_scanned, 1713 unsigned long nr_scanned,
1714 struct scan_control *sc) 1714 struct scan_control *sc)
1715{ 1715{
1716 unsigned long pages_for_compaction; 1716 unsigned long pages_for_compaction;
1717 unsigned long inactive_lru_pages; 1717 unsigned long inactive_lru_pages;
1718 struct lruvec *lruvec;
1719 1718
1720 /* If not in reclaim/compaction mode, stop */ 1719 /* If not in reclaim/compaction mode, stop */
1721 if (!in_reclaim_compaction(sc)) 1720 if (!in_reclaim_compaction(sc))
@@ -1748,7 +1747,6 @@ static inline bool should_continue_reclaim(struct mem_cgroup_zone *mz,
1748 * If we have not reclaimed enough pages for compaction and the 1747 * If we have not reclaimed enough pages for compaction and the
1749 * inactive lists are large enough, continue reclaiming 1748 * inactive lists are large enough, continue reclaiming
1750 */ 1749 */
1751 lruvec = mem_cgroup_zone_lruvec(mz->zone, mz->mem_cgroup);
1752 pages_for_compaction = (2UL << sc->order); 1750 pages_for_compaction = (2UL << sc->order);
1753 inactive_lru_pages = get_lruvec_size(lruvec, LRU_INACTIVE_FILE); 1751 inactive_lru_pages = get_lruvec_size(lruvec, LRU_INACTIVE_FILE);
1754 if (nr_swap_pages > 0) 1752 if (nr_swap_pages > 0)
@@ -1759,7 +1757,7 @@ static inline bool should_continue_reclaim(struct mem_cgroup_zone *mz,
1759 return true; 1757 return true;
1760 1758
1761 /* If compaction would go ahead or the allocation would succeed, stop */ 1759 /* If compaction would go ahead or the allocation would succeed, stop */
1762 switch (compaction_suitable(mz->zone, sc->order)) { 1760 switch (compaction_suitable(lruvec_zone(lruvec), sc->order)) {
1763 case COMPACT_PARTIAL: 1761 case COMPACT_PARTIAL:
1764 case COMPACT_CONTINUE: 1762 case COMPACT_CONTINUE:
1765 return false; 1763 return false;
@@ -1826,7 +1824,7 @@ restart:
1826 sc, LRU_ACTIVE_ANON); 1824 sc, LRU_ACTIVE_ANON);
1827 1825
1828 /* reclaim/compaction might need reclaim to continue */ 1826 /* reclaim/compaction might need reclaim to continue */
1829 if (should_continue_reclaim(mz, nr_reclaimed, 1827 if (should_continue_reclaim(lruvec, nr_reclaimed,
1830 sc->nr_scanned - nr_scanned, sc)) 1828 sc->nr_scanned - nr_scanned, sc))
1831 goto restart; 1829 goto restart;
1832 1830