diff options
-rw-r--r-- | include/linux/mmzone.h | 2 | ||||
-rw-r--r-- | mm/memcontrol.c | 46 | ||||
-rw-r--r-- | mm/mmzone.c | 6 | ||||
-rw-r--r-- | mm/page_alloc.c | 2 |
4 files changed, 38 insertions, 18 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 50aaca81f63d..a23923ba8263 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -752,7 +752,7 @@ extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, | |||
752 | unsigned long size, | 752 | unsigned long size, |
753 | enum memmap_context context); | 753 | enum memmap_context context); |
754 | 754 | ||
755 | extern void lruvec_init(struct lruvec *lruvec, struct zone *zone); | 755 | extern void lruvec_init(struct lruvec *lruvec); |
756 | 756 | ||
757 | static inline struct zone *lruvec_zone(struct lruvec *lruvec) | 757 | static inline struct zone *lruvec_zone(struct lruvec *lruvec) |
758 | { | 758 | { |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 93a7e36ded89..dd39ba000b31 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -1055,12 +1055,24 @@ struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone, | |||
1055 | struct mem_cgroup *memcg) | 1055 | struct mem_cgroup *memcg) |
1056 | { | 1056 | { |
1057 | struct mem_cgroup_per_zone *mz; | 1057 | struct mem_cgroup_per_zone *mz; |
1058 | struct lruvec *lruvec; | ||
1058 | 1059 | ||
1059 | if (mem_cgroup_disabled()) | 1060 | if (mem_cgroup_disabled()) { |
1060 | return &zone->lruvec; | 1061 | lruvec = &zone->lruvec; |
1062 | goto out; | ||
1063 | } | ||
1061 | 1064 | ||
1062 | mz = mem_cgroup_zoneinfo(memcg, zone_to_nid(zone), zone_idx(zone)); | 1065 | mz = mem_cgroup_zoneinfo(memcg, zone_to_nid(zone), zone_idx(zone)); |
1063 | return &mz->lruvec; | 1066 | lruvec = &mz->lruvec; |
1067 | out: | ||
1068 | /* | ||
1069 | * Since a node can be onlined after the mem_cgroup was created, | ||
1070 | * we have to be prepared to initialize lruvec->zone here; | ||
1071 | * and if offlined then reonlined, we need to reinitialize it. | ||
1072 | */ | ||
1073 | if (unlikely(lruvec->zone != zone)) | ||
1074 | lruvec->zone = zone; | ||
1075 | return lruvec; | ||
1064 | } | 1076 | } |
1065 | 1077 | ||
1066 | /* | 1078 | /* |
@@ -1087,9 +1099,12 @@ struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone) | |||
1087 | struct mem_cgroup_per_zone *mz; | 1099 | struct mem_cgroup_per_zone *mz; |
1088 | struct mem_cgroup *memcg; | 1100 | struct mem_cgroup *memcg; |
1089 | struct page_cgroup *pc; | 1101 | struct page_cgroup *pc; |
1102 | struct lruvec *lruvec; | ||
1090 | 1103 | ||
1091 | if (mem_cgroup_disabled()) | 1104 | if (mem_cgroup_disabled()) { |
1092 | return &zone->lruvec; | 1105 | lruvec = &zone->lruvec; |
1106 | goto out; | ||
1107 | } | ||
1093 | 1108 | ||
1094 | pc = lookup_page_cgroup(page); | 1109 | pc = lookup_page_cgroup(page); |
1095 | memcg = pc->mem_cgroup; | 1110 | memcg = pc->mem_cgroup; |
@@ -1107,7 +1122,16 @@ struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone) | |||
1107 | pc->mem_cgroup = memcg = root_mem_cgroup; | 1122 | pc->mem_cgroup = memcg = root_mem_cgroup; |
1108 | 1123 | ||
1109 | mz = page_cgroup_zoneinfo(memcg, page); | 1124 | mz = page_cgroup_zoneinfo(memcg, page); |
1110 | return &mz->lruvec; | 1125 | lruvec = &mz->lruvec; |
1126 | out: | ||
1127 | /* | ||
1128 | * Since a node can be onlined after the mem_cgroup was created, | ||
1129 | * we have to be prepared to initialize lruvec->zone here; | ||
1130 | * and if offlined then reonlined, we need to reinitialize it. | ||
1131 | */ | ||
1132 | if (unlikely(lruvec->zone != zone)) | ||
1133 | lruvec->zone = zone; | ||
1134 | return lruvec; | ||
1111 | } | 1135 | } |
1112 | 1136 | ||
1113 | /** | 1137 | /** |
@@ -3697,17 +3721,17 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, | |||
3697 | static bool mem_cgroup_force_empty_list(struct mem_cgroup *memcg, | 3721 | static bool mem_cgroup_force_empty_list(struct mem_cgroup *memcg, |
3698 | int node, int zid, enum lru_list lru) | 3722 | int node, int zid, enum lru_list lru) |
3699 | { | 3723 | { |
3700 | struct mem_cgroup_per_zone *mz; | 3724 | struct lruvec *lruvec; |
3701 | unsigned long flags, loop; | 3725 | unsigned long flags, loop; |
3702 | struct list_head *list; | 3726 | struct list_head *list; |
3703 | struct page *busy; | 3727 | struct page *busy; |
3704 | struct zone *zone; | 3728 | struct zone *zone; |
3705 | 3729 | ||
3706 | zone = &NODE_DATA(node)->node_zones[zid]; | 3730 | zone = &NODE_DATA(node)->node_zones[zid]; |
3707 | mz = mem_cgroup_zoneinfo(memcg, node, zid); | 3731 | lruvec = mem_cgroup_zone_lruvec(zone, memcg); |
3708 | list = &mz->lruvec.lists[lru]; | 3732 | list = &lruvec->lists[lru]; |
3709 | 3733 | ||
3710 | loop = mz->lru_size[lru]; | 3734 | loop = mem_cgroup_get_lru_size(lruvec, lru); |
3711 | /* give some margin against EBUSY etc...*/ | 3735 | /* give some margin against EBUSY etc...*/ |
3712 | loop += 256; | 3736 | loop += 256; |
3713 | busy = NULL; | 3737 | busy = NULL; |
@@ -4745,7 +4769,7 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node) | |||
4745 | 4769 | ||
4746 | for (zone = 0; zone < MAX_NR_ZONES; zone++) { | 4770 | for (zone = 0; zone < MAX_NR_ZONES; zone++) { |
4747 | mz = &pn->zoneinfo[zone]; | 4771 | mz = &pn->zoneinfo[zone]; |
4748 | lruvec_init(&mz->lruvec, &NODE_DATA(node)->node_zones[zone]); | 4772 | lruvec_init(&mz->lruvec); |
4749 | mz->usage_in_excess = 0; | 4773 | mz->usage_in_excess = 0; |
4750 | mz->on_tree = false; | 4774 | mz->on_tree = false; |
4751 | mz->memcg = memcg; | 4775 | mz->memcg = memcg; |
diff --git a/mm/mmzone.c b/mm/mmzone.c index 3cef80f6ac79..4596d81b89b1 100644 --- a/mm/mmzone.c +++ b/mm/mmzone.c | |||
@@ -87,7 +87,7 @@ int memmap_valid_within(unsigned long pfn, | |||
87 | } | 87 | } |
88 | #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ | 88 | #endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ |
89 | 89 | ||
90 | void lruvec_init(struct lruvec *lruvec, struct zone *zone) | 90 | void lruvec_init(struct lruvec *lruvec) |
91 | { | 91 | { |
92 | enum lru_list lru; | 92 | enum lru_list lru; |
93 | 93 | ||
@@ -95,8 +95,4 @@ void lruvec_init(struct lruvec *lruvec, struct zone *zone) | |||
95 | 95 | ||
96 | for_each_lru(lru) | 96 | for_each_lru(lru) |
97 | INIT_LIST_HEAD(&lruvec->lists[lru]); | 97 | INIT_LIST_HEAD(&lruvec->lists[lru]); |
98 | |||
99 | #ifdef CONFIG_MEMCG | ||
100 | lruvec->zone = zone; | ||
101 | #endif | ||
102 | } | 98 | } |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5b74de6702e0..c91598b1b4c0 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -4505,7 +4505,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat, | |||
4505 | zone->zone_pgdat = pgdat; | 4505 | zone->zone_pgdat = pgdat; |
4506 | 4506 | ||
4507 | zone_pcp_init(zone); | 4507 | zone_pcp_init(zone); |
4508 | lruvec_init(&zone->lruvec, zone); | 4508 | lruvec_init(&zone->lruvec); |
4509 | if (!size) | 4509 | if (!size) |
4510 | continue; | 4510 | continue; |
4511 | 4511 | ||