aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorKonstantin Khlebnikov <khlebnikov@openvz.org>2012-05-29 18:07:00 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-05-29 19:22:26 -0400
commit074291fea8bcedeabf295360e2ddd9bbb5830b4a (patch)
tree01531a2ca7e9b648a4d8cd9bc772aa484ed2190d /mm/vmscan.c
parent27ac81d85e5cfcc755dd5fa3f04dc883ab5d821b (diff)
mm/vmscan: replace zone_nr_lru_pages() with get_lruvec_size()
If memory cgroup is enabled we always use lruvecs which are embedded into struct mem_cgroup_per_zone, so we can reach lru_size counters via container_of(). Signed-off-by: Konstantin Khlebnikov <khlebnikov@openvz.org> Cc: Mel Gorman <mel@csn.ul.ie> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c31
1 files changed, 16 insertions, 15 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 76d786eb84a8..5318faa6a251 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -155,19 +155,14 @@ static struct zone_reclaim_stat *get_reclaim_stat(struct mem_cgroup_zone *mz)
155 return &mem_cgroup_zone_lruvec(mz->zone, mz->mem_cgroup)->reclaim_stat; 155 return &mem_cgroup_zone_lruvec(mz->zone, mz->mem_cgroup)->reclaim_stat;
156} 156}
157 157
158static unsigned long zone_nr_lru_pages(struct mem_cgroup_zone *mz, 158static unsigned long get_lruvec_size(struct lruvec *lruvec, enum lru_list lru)
159 enum lru_list lru)
160{ 159{
161 if (!mem_cgroup_disabled()) 160 if (!mem_cgroup_disabled())
162 return mem_cgroup_zone_nr_lru_pages(mz->mem_cgroup, 161 return mem_cgroup_get_lruvec_size(lruvec, lru);
163 zone_to_nid(mz->zone),
164 zone_idx(mz->zone),
165 BIT(lru));
166 162
167 return zone_page_state(mz->zone, NR_LRU_BASE + lru); 163 return zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru);
168} 164}
169 165
170
171/* 166/*
172 * Add a shrinker callback to be called from the vm 167 * Add a shrinker callback to be called from the vm
173 */ 168 */
@@ -1603,6 +1598,9 @@ static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc,
1603 enum lru_list lru; 1598 enum lru_list lru;
1604 int noswap = 0; 1599 int noswap = 0;
1605 bool force_scan = false; 1600 bool force_scan = false;
1601 struct lruvec *lruvec;
1602
1603 lruvec = mem_cgroup_zone_lruvec(mz->zone, mz->mem_cgroup);
1606 1604
1607 /* 1605 /*
1608 * If the zone or memcg is small, nr[l] can be 0. This 1606 * If the zone or memcg is small, nr[l] can be 0. This
@@ -1628,10 +1626,10 @@ static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc,
1628 goto out; 1626 goto out;
1629 } 1627 }
1630 1628
1631 anon = zone_nr_lru_pages(mz, LRU_ACTIVE_ANON) + 1629 anon = get_lruvec_size(lruvec, LRU_ACTIVE_ANON) +
1632 zone_nr_lru_pages(mz, LRU_INACTIVE_ANON); 1630 get_lruvec_size(lruvec, LRU_INACTIVE_ANON);
1633 file = zone_nr_lru_pages(mz, LRU_ACTIVE_FILE) + 1631 file = get_lruvec_size(lruvec, LRU_ACTIVE_FILE) +
1634 zone_nr_lru_pages(mz, LRU_INACTIVE_FILE); 1632 get_lruvec_size(lruvec, LRU_INACTIVE_FILE);
1635 1633
1636 if (global_reclaim(sc)) { 1634 if (global_reclaim(sc)) {
1637 free = zone_page_state(mz->zone, NR_FREE_PAGES); 1635 free = zone_page_state(mz->zone, NR_FREE_PAGES);
@@ -1694,7 +1692,7 @@ out:
1694 int file = is_file_lru(lru); 1692 int file = is_file_lru(lru);
1695 unsigned long scan; 1693 unsigned long scan;
1696 1694
1697 scan = zone_nr_lru_pages(mz, lru); 1695 scan = get_lruvec_size(lruvec, lru);
1698 if (sc->priority || noswap || !vmscan_swappiness(sc)) { 1696 if (sc->priority || noswap || !vmscan_swappiness(sc)) {
1699 scan >>= sc->priority; 1697 scan >>= sc->priority;
1700 if (!scan && force_scan) 1698 if (!scan && force_scan)
@@ -1730,6 +1728,7 @@ static inline bool should_continue_reclaim(struct mem_cgroup_zone *mz,
1730{ 1728{
1731 unsigned long pages_for_compaction; 1729 unsigned long pages_for_compaction;
1732 unsigned long inactive_lru_pages; 1730 unsigned long inactive_lru_pages;
1731 struct lruvec *lruvec;
1733 1732
1734 /* If not in reclaim/compaction mode, stop */ 1733 /* If not in reclaim/compaction mode, stop */
1735 if (!in_reclaim_compaction(sc)) 1734 if (!in_reclaim_compaction(sc))
@@ -1762,10 +1761,12 @@ static inline bool should_continue_reclaim(struct mem_cgroup_zone *mz,
1762 * If we have not reclaimed enough pages for compaction and the 1761 * If we have not reclaimed enough pages for compaction and the
1763 * inactive lists are large enough, continue reclaiming 1762 * inactive lists are large enough, continue reclaiming
1764 */ 1763 */
1764 lruvec = mem_cgroup_zone_lruvec(mz->zone, mz->mem_cgroup);
1765 pages_for_compaction = (2UL << sc->order); 1765 pages_for_compaction = (2UL << sc->order);
1766 inactive_lru_pages = zone_nr_lru_pages(mz, LRU_INACTIVE_FILE); 1766 inactive_lru_pages = get_lruvec_size(lruvec, LRU_INACTIVE_FILE);
1767 if (nr_swap_pages > 0) 1767 if (nr_swap_pages > 0)
1768 inactive_lru_pages += zone_nr_lru_pages(mz, LRU_INACTIVE_ANON); 1768 inactive_lru_pages += get_lruvec_size(lruvec,
1769 LRU_INACTIVE_ANON);
1769 if (sc->nr_reclaimed < pages_for_compaction && 1770 if (sc->nr_reclaimed < pages_for_compaction &&
1770 inactive_lru_pages > pages_for_compaction) 1771 inactive_lru_pages > pages_for_compaction)
1771 return true; 1772 return true;