aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorVladimir Davydov <vdavydov@parallels.com>2014-12-10 18:44:16 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-10 20:41:07 -0500
commit4ef461e8f4dd13a2e64c6c8f00c420d62294e2d4 (patch)
tree18137acb6cceb84855c370abb5555c830966bd55 /mm
parent97ad2be1daf8e6f2d297aa349101b340e1327917 (diff)
memcg: remove mem_cgroup_reclaimable check from soft reclaim
mem_cgroup_reclaimable() checks whether a cgroup has reclaimable pages on *any* NUMA node. However, the only place where it's called is mem_cgroup_soft_reclaim(), which tries to reclaim memory from a *specific* zone. So the way it is used is incorrect - it will return true even if the cgroup doesn't have pages on the zone we're scanning. I think we can get rid of this check completely, because mem_cgroup_shrink_node_zone(), which is called by mem_cgroup_soft_reclaim() if mem_cgroup_reclaimable() returns true, is equivalent to shrink_lruvec(), which exits almost immediately if the lruvec passed to it is empty. So there's no need to optimize anything here. Besides, we don't have such a check in the general scan path (shrink_zone) either. Signed-off-by: Vladimir Davydov <vdavydov@parallels.com> Acked-by: Michal Hocko <mhocko@suse.cz> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c43
1 files changed, 0 insertions, 43 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 3734fd6d1132..32e3b191857d 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1743,52 +1743,11 @@ int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1743 memcg->last_scanned_node = node; 1743 memcg->last_scanned_node = node;
1744 return node; 1744 return node;
1745} 1745}
1746
1747/*
1748 * Check all nodes whether it contains reclaimable pages or not.
1749 * For quick scan, we make use of scan_nodes. This will allow us to skip
1750 * unused nodes. But scan_nodes is lazily updated and may not cotain
1751 * enough new information. We need to do double check.
1752 */
1753static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
1754{
1755 int nid;
1756
1757 /*
1758 * quick check...making use of scan_node.
1759 * We can skip unused nodes.
1760 */
1761 if (!nodes_empty(memcg->scan_nodes)) {
1762 for (nid = first_node(memcg->scan_nodes);
1763 nid < MAX_NUMNODES;
1764 nid = next_node(nid, memcg->scan_nodes)) {
1765
1766 if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
1767 return true;
1768 }
1769 }
1770 /*
1771 * Check rest of nodes.
1772 */
1773 for_each_node_state(nid, N_MEMORY) {
1774 if (node_isset(nid, memcg->scan_nodes))
1775 continue;
1776 if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
1777 return true;
1778 }
1779 return false;
1780}
1781
1782#else 1746#else
1783int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) 1747int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1784{ 1748{
1785 return 0; 1749 return 0;
1786} 1750}
1787
1788static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
1789{
1790 return test_mem_cgroup_node_reclaimable(memcg, 0, noswap);
1791}
1792#endif 1751#endif
1793 1752
1794static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg, 1753static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
@@ -1832,8 +1791,6 @@ static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1832 } 1791 }
1833 continue; 1792 continue;
1834 } 1793 }
1835 if (!mem_cgroup_reclaimable(victim, false))
1836 continue;
1837 total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false, 1794 total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false,
1838 zone, &nr_scanned); 1795 zone, &nr_scanned);
1839 *total_scanned += nr_scanned; 1796 *total_scanned += nr_scanned;