aboutsummaryrefslogtreecommitdiffstats
path: root/mm/workingset.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@techsingularity.net>2016-07-28 18:46:02 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-28 19:07:41 -0400
commita9dd0a83104c01269ea36a9b4ec42b51edf85427 (patch)
tree2d3329f49b0c91376945e96838bab1307e33b57d /mm/workingset.c
parent86c79f6b5426ce118d32c73fa9e328f0a86ab590 (diff)
mm, vmscan: make shrink_node decisions more node-centric
Earlier patches focused on having direct reclaim and kswapd use data that is node-centric for reclaiming but shrink_node() itself still uses too much zone information. This patch removes unnecessary zone-based information with the most important decision being whether to continue reclaim or not. Some memcg APIs are adjusted as a result even though memcg itself still uses some zone information. [mgorman@techsingularity.net: optimization] Link: http://lkml.kernel.org/r/1468588165-12461-2-git-send-email-mgorman@techsingularity.net Link: http://lkml.kernel.org/r/1467970510-21195-14-git-send-email-mgorman@techsingularity.net Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Acked-by: Michal Hocko <mhocko@suse.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Hillf Danton <hillf.zj@alibaba-inc.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Rik van Riel <riel@surriel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/workingset.c')
-rw-r--r--mm/workingset.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/mm/workingset.c b/mm/workingset.c
index 7820a7e1ca98..df0dacaf54ee 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -218,7 +218,7 @@ void *workingset_eviction(struct address_space *mapping, struct page *page)
218 VM_BUG_ON_PAGE(page_count(page), page); 218 VM_BUG_ON_PAGE(page_count(page), page);
219 VM_BUG_ON_PAGE(!PageLocked(page), page); 219 VM_BUG_ON_PAGE(!PageLocked(page), page);
220 220
221 lruvec = mem_cgroup_zone_lruvec(zone, memcg); 221 lruvec = mem_cgroup_lruvec(zone->zone_pgdat, zone, memcg);
222 eviction = atomic_long_inc_return(&lruvec->inactive_age); 222 eviction = atomic_long_inc_return(&lruvec->inactive_age);
223 return pack_shadow(memcgid, zone, eviction); 223 return pack_shadow(memcgid, zone, eviction);
224} 224}
@@ -267,7 +267,7 @@ bool workingset_refault(void *shadow)
267 rcu_read_unlock(); 267 rcu_read_unlock();
268 return false; 268 return false;
269 } 269 }
270 lruvec = mem_cgroup_zone_lruvec(zone, memcg); 270 lruvec = mem_cgroup_lruvec(zone->zone_pgdat, zone, memcg);
271 refault = atomic_long_read(&lruvec->inactive_age); 271 refault = atomic_long_read(&lruvec->inactive_age);
272 active_file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE); 272 active_file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE);
273 rcu_read_unlock(); 273 rcu_read_unlock();
@@ -319,7 +319,7 @@ void workingset_activation(struct page *page)
319 memcg = page_memcg_rcu(page); 319 memcg = page_memcg_rcu(page);
320 if (!mem_cgroup_disabled() && !memcg) 320 if (!mem_cgroup_disabled() && !memcg)
321 goto out; 321 goto out;
322 lruvec = mem_cgroup_zone_lruvec(page_zone(page), memcg); 322 lruvec = mem_cgroup_lruvec(page_pgdat(page), page_zone(page), memcg);
323 atomic_long_inc(&lruvec->inactive_age); 323 atomic_long_inc(&lruvec->inactive_age);
324out: 324out:
325 rcu_read_unlock(); 325 rcu_read_unlock();