aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJianyu Zhan <nasa4836@gmail.com>2014-06-06 17:38:20 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-06 19:08:17 -0400
commite231875ba7a118de7970fae3ac08b244a2822074 (patch)
tree3c4a182e837ce328b4f994f9021d7265a8c1b240
parentaedf95ea0583676cd7bfa395681ad744791a433e (diff)
mm: memcontrol: clean up memcg zoneinfo lookup
Memcg zoneinfo lookup sites have either the page, the zone, or the node id and zone index, but sites that only have the zone have to look up the node id and zone index themselves, whereas sites that already have those two integers use a function for a simple pointer chase. Provide mem_cgroup_zone_zoneinfo() that takes a zone pointer and let sites that already have node id and zone index - all for each node, for each zone iterators - use &memcg->nodeinfo[nid]->zoneinfo[zid]. Rename page_cgroup_zoneinfo() to mem_cgroup_page_zoneinfo() to match. Signed-off-by: Jianyu Zhan <nasa4836@gmail.com> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Michal Hocko <mhocko@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/memcontrol.c89
1 files changed, 39 insertions, 50 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 9bf8a84bcaae..41c1b393fef5 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -676,9 +676,11 @@ static void disarm_static_keys(struct mem_cgroup *memcg)
676static void drain_all_stock_async(struct mem_cgroup *memcg); 676static void drain_all_stock_async(struct mem_cgroup *memcg);
677 677
678static struct mem_cgroup_per_zone * 678static struct mem_cgroup_per_zone *
679mem_cgroup_zoneinfo(struct mem_cgroup *memcg, int nid, int zid) 679mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone)
680{ 680{
681 VM_BUG_ON((unsigned)nid >= nr_node_ids); 681 int nid = zone_to_nid(zone);
682 int zid = zone_idx(zone);
683
682 return &memcg->nodeinfo[nid]->zoneinfo[zid]; 684 return &memcg->nodeinfo[nid]->zoneinfo[zid];
683} 685}
684 686
@@ -688,12 +690,12 @@ struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg)
688} 690}
689 691
690static struct mem_cgroup_per_zone * 692static struct mem_cgroup_per_zone *
691page_cgroup_zoneinfo(struct mem_cgroup *memcg, struct page *page) 693mem_cgroup_page_zoneinfo(struct mem_cgroup *memcg, struct page *page)
692{ 694{
693 int nid = page_to_nid(page); 695 int nid = page_to_nid(page);
694 int zid = page_zonenum(page); 696 int zid = page_zonenum(page);
695 697
696 return mem_cgroup_zoneinfo(memcg, nid, zid); 698 return &memcg->nodeinfo[nid]->zoneinfo[zid];
697} 699}
698 700
699static struct mem_cgroup_tree_per_zone * 701static struct mem_cgroup_tree_per_zone *
@@ -772,16 +774,14 @@ static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
772 unsigned long long excess; 774 unsigned long long excess;
773 struct mem_cgroup_per_zone *mz; 775 struct mem_cgroup_per_zone *mz;
774 struct mem_cgroup_tree_per_zone *mctz; 776 struct mem_cgroup_tree_per_zone *mctz;
775 int nid = page_to_nid(page);
776 int zid = page_zonenum(page);
777 mctz = soft_limit_tree_from_page(page);
778 777
778 mctz = soft_limit_tree_from_page(page);
779 /* 779 /*
780 * Necessary to update all ancestors when hierarchy is used. 780 * Necessary to update all ancestors when hierarchy is used.
781 * because their event counter is not touched. 781 * because their event counter is not touched.
782 */ 782 */
783 for (; memcg; memcg = parent_mem_cgroup(memcg)) { 783 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
784 mz = mem_cgroup_zoneinfo(memcg, nid, zid); 784 mz = mem_cgroup_page_zoneinfo(memcg, page);
785 excess = res_counter_soft_limit_excess(&memcg->res); 785 excess = res_counter_soft_limit_excess(&memcg->res);
786 /* 786 /*
787 * We have to update the tree if mz is on RB-tree or 787 * We have to update the tree if mz is on RB-tree or
@@ -804,14 +804,14 @@ static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
804 804
805static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg) 805static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
806{ 806{
807 int node, zone;
808 struct mem_cgroup_per_zone *mz;
809 struct mem_cgroup_tree_per_zone *mctz; 807 struct mem_cgroup_tree_per_zone *mctz;
808 struct mem_cgroup_per_zone *mz;
809 int nid, zid;
810 810
811 for_each_node(node) { 811 for_each_node(nid) {
812 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 812 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
813 mz = mem_cgroup_zoneinfo(memcg, node, zone); 813 mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
814 mctz = soft_limit_tree_node_zone(node, zone); 814 mctz = soft_limit_tree_node_zone(nid, zid);
815 mem_cgroup_remove_exceeded(memcg, mz, mctz); 815 mem_cgroup_remove_exceeded(memcg, mz, mctz);
816 } 816 }
817 } 817 }
@@ -946,8 +946,7 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
946 __this_cpu_add(memcg->stat->nr_page_events, nr_pages); 946 __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
947} 947}
948 948
949unsigned long 949unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
950mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
951{ 950{
952 struct mem_cgroup_per_zone *mz; 951 struct mem_cgroup_per_zone *mz;
953 952
@@ -955,46 +954,38 @@ mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
955 return mz->lru_size[lru]; 954 return mz->lru_size[lru];
956} 955}
957 956
958static unsigned long 957static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
959mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid, 958 int nid,
960 unsigned int lru_mask) 959 unsigned int lru_mask)
961{
962 struct mem_cgroup_per_zone *mz;
963 enum lru_list lru;
964 unsigned long ret = 0;
965
966 mz = mem_cgroup_zoneinfo(memcg, nid, zid);
967
968 for_each_lru(lru) {
969 if (BIT(lru) & lru_mask)
970 ret += mz->lru_size[lru];
971 }
972 return ret;
973}
974
975static unsigned long
976mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
977 int nid, unsigned int lru_mask)
978{ 960{
979 u64 total = 0; 961 unsigned long nr = 0;
980 int zid; 962 int zid;
981 963
982 for (zid = 0; zid < MAX_NR_ZONES; zid++) 964 VM_BUG_ON((unsigned)nid >= nr_node_ids);
983 total += mem_cgroup_zone_nr_lru_pages(memcg, 965
984 nid, zid, lru_mask); 966 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
967 struct mem_cgroup_per_zone *mz;
968 enum lru_list lru;
985 969
986 return total; 970 for_each_lru(lru) {
971 if (!(BIT(lru) & lru_mask))
972 continue;
973 mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
974 nr += mz->lru_size[lru];
975 }
976 }
977 return nr;
987} 978}
988 979
989static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg, 980static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
990 unsigned int lru_mask) 981 unsigned int lru_mask)
991{ 982{
983 unsigned long nr = 0;
992 int nid; 984 int nid;
993 u64 total = 0;
994 985
995 for_each_node_state(nid, N_MEMORY) 986 for_each_node_state(nid, N_MEMORY)
996 total += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask); 987 nr += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
997 return total; 988 return nr;
998} 989}
999 990
1000static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg, 991static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
@@ -1242,11 +1233,9 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1242 int uninitialized_var(seq); 1233 int uninitialized_var(seq);
1243 1234
1244 if (reclaim) { 1235 if (reclaim) {
1245 int nid = zone_to_nid(reclaim->zone);
1246 int zid = zone_idx(reclaim->zone);
1247 struct mem_cgroup_per_zone *mz; 1236 struct mem_cgroup_per_zone *mz;
1248 1237
1249 mz = mem_cgroup_zoneinfo(root, nid, zid); 1238 mz = mem_cgroup_zone_zoneinfo(root, reclaim->zone);
1250 iter = &mz->reclaim_iter[reclaim->priority]; 1239 iter = &mz->reclaim_iter[reclaim->priority];
1251 if (prev && reclaim->generation != iter->generation) { 1240 if (prev && reclaim->generation != iter->generation) {
1252 iter->last_visited = NULL; 1241 iter->last_visited = NULL;
@@ -1353,7 +1342,7 @@ struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
1353 goto out; 1342 goto out;
1354 } 1343 }
1355 1344
1356 mz = mem_cgroup_zoneinfo(memcg, zone_to_nid(zone), zone_idx(zone)); 1345 mz = mem_cgroup_zone_zoneinfo(memcg, zone);
1357 lruvec = &mz->lruvec; 1346 lruvec = &mz->lruvec;
1358out: 1347out:
1359 /* 1348 /*
@@ -1412,7 +1401,7 @@ struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
1412 if (!PageLRU(page) && !PageCgroupUsed(pc) && memcg != root_mem_cgroup) 1401 if (!PageLRU(page) && !PageCgroupUsed(pc) && memcg != root_mem_cgroup)
1413 pc->mem_cgroup = memcg = root_mem_cgroup; 1402 pc->mem_cgroup = memcg = root_mem_cgroup;
1414 1403
1415 mz = page_cgroup_zoneinfo(memcg, page); 1404 mz = mem_cgroup_page_zoneinfo(memcg, page);
1416 lruvec = &mz->lruvec; 1405 lruvec = &mz->lruvec;
1417out: 1406out:
1418 /* 1407 /*
@@ -5305,7 +5294,7 @@ static int memcg_stat_show(struct seq_file *m, void *v)
5305 5294
5306 for_each_online_node(nid) 5295 for_each_online_node(nid)
5307 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 5296 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
5308 mz = mem_cgroup_zoneinfo(memcg, nid, zid); 5297 mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
5309 rstat = &mz->lruvec.reclaim_stat; 5298 rstat = &mz->lruvec.reclaim_stat;
5310 5299
5311 recent_rotated[0] += rstat->recent_rotated[0]; 5300 recent_rotated[0] += rstat->recent_rotated[0];