aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
authorJohannes Weiner <jweiner@redhat.com>2012-01-12 20:18:15 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-01-12 23:13:05 -0500
commit925b7673cce39116ce61e7a06683a4a0dad1e72a (patch)
tree66c134db836e531e196ee3dfc23c124ff74ac827 /mm/memcontrol.c
parent6290df545814990ca2663baf6e894669132d5f73 (diff)
mm: make per-memcg LRU lists exclusive
Now that all code that operated on global per-zone LRU lists is converted to operate on per-memory cgroup LRU lists instead, there is no reason to keep the double-LRU scheme around any longer. The pc->lru member is removed and page->lru is linked directly to the per-memory cgroup LRU lists, which removes two pointers from a descriptor that exists for every page frame in the system. Signed-off-by: Johannes Weiner <jweiner@redhat.com> Signed-off-by: Hugh Dickins <hughd@google.com> Signed-off-by: Ying Han <yinghan@google.com> Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Reviewed-by: Michal Hocko <mhocko@suse.cz> Reviewed-by: Kirill A. Shutemov <kirill@shutemov.name> Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Cc: Balbir Singh <bsingharora@gmail.com> Cc: Greg Thelen <gthelen@google.com> Cc: Michel Lespinasse <walken@google.com> Cc: Rik van Riel <riel@redhat.com> Cc: Minchan Kim <minchan.kim@gmail.com> Cc: Christoph Hellwig <hch@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c311
1 files changed, 151 insertions, 160 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 6e7f849a1a9e..972878b648c2 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -995,6 +995,27 @@ out:
995} 995}
996EXPORT_SYMBOL(mem_cgroup_count_vm_event); 996EXPORT_SYMBOL(mem_cgroup_count_vm_event);
997 997
998/**
999 * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg
1000 * @zone: zone of the wanted lruvec
1001 * @mem: memcg of the wanted lruvec
1002 *
1003 * Returns the lru list vector holding pages for the given @zone and
1004 * @mem. This can be the global zone lruvec, if the memory controller
1005 * is disabled.
1006 */
1007struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
1008 struct mem_cgroup *memcg)
1009{
1010 struct mem_cgroup_per_zone *mz;
1011
1012 if (mem_cgroup_disabled())
1013 return &zone->lruvec;
1014
1015 mz = mem_cgroup_zoneinfo(memcg, zone_to_nid(zone), zone_idx(zone));
1016 return &mz->lruvec;
1017}
1018
998/* 1019/*
999 * Following LRU functions are allowed to be used without PCG_LOCK. 1020 * Following LRU functions are allowed to be used without PCG_LOCK.
1000 * Operations are called by routine of global LRU independently from memcg. 1021 * Operations are called by routine of global LRU independently from memcg.
@@ -1009,104 +1030,123 @@ EXPORT_SYMBOL(mem_cgroup_count_vm_event);
1009 * When moving account, the page is not on LRU. It's isolated. 1030 * When moving account, the page is not on LRU. It's isolated.
1010 */ 1031 */
1011 1032
1012void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru) 1033/**
1034 * mem_cgroup_lru_add_list - account for adding an lru page and return lruvec
1035 * @zone: zone of the page
1036 * @page: the page
1037 * @lru: current lru
1038 *
1039 * This function accounts for @page being added to @lru, and returns
1040 * the lruvec for the given @zone and the memcg @page is charged to.
1041 *
1042 * The callsite is then responsible for physically linking the page to
1043 * the returned lruvec->lists[@lru].
1044 */
1045struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page,
1046 enum lru_list lru)
1013{ 1047{
1014 struct page_cgroup *pc;
1015 struct mem_cgroup_per_zone *mz; 1048 struct mem_cgroup_per_zone *mz;
1049 struct mem_cgroup *memcg;
1050 struct page_cgroup *pc;
1016 1051
1017 if (mem_cgroup_disabled()) 1052 if (mem_cgroup_disabled())
1018 return; 1053 return &zone->lruvec;
1054
1019 pc = lookup_page_cgroup(page); 1055 pc = lookup_page_cgroup(page);
1020 /* can happen while we handle swapcache. */ 1056 VM_BUG_ON(PageCgroupAcctLRU(pc));
1021 if (!TestClearPageCgroupAcctLRU(pc))
1022 return;
1023 VM_BUG_ON(!pc->mem_cgroup);
1024 /* 1057 /*
1025 * We don't check PCG_USED bit. It's cleared when the "page" is finally 1058 * putback: charge:
1026 * removed from global LRU. 1059 * SetPageLRU SetPageCgroupUsed
1060 * smp_mb smp_mb
1061 * PageCgroupUsed && add to memcg LRU PageLRU && add to memcg LRU
1062 *
1063 * Ensure that one of the two sides adds the page to the memcg
1064 * LRU during a race.
1027 */ 1065 */
1028 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); 1066 smp_mb();
1029 /* huge page split is done under lru_lock. so, we have no races. */ 1067 /*
1030 MEM_CGROUP_ZSTAT(mz, lru) -= 1 << compound_order(page); 1068 * If the page is uncharged, it may be freed soon, but it
1031 VM_BUG_ON(list_empty(&pc->lru)); 1069 * could also be swap cache (readahead, swapoff) that needs to
1032 list_del_init(&pc->lru); 1070 * be reclaimable in the future. root_mem_cgroup will babysit
1033} 1071 * it for the time being.
1034 1072 */
1035void mem_cgroup_del_lru(struct page *page) 1073 if (PageCgroupUsed(pc)) {
1036{ 1074 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
1037 mem_cgroup_del_lru_list(page, page_lru(page)); 1075 smp_rmb();
1076 memcg = pc->mem_cgroup;
1077 SetPageCgroupAcctLRU(pc);
1078 } else
1079 memcg = root_mem_cgroup;
1080 mz = page_cgroup_zoneinfo(memcg, page);
1081 /* compound_order() is stabilized through lru_lock */
1082 MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page);
1083 return &mz->lruvec;
1038} 1084}
1039 1085
1040/* 1086/**
1041 * Writeback is about to end against a page which has been marked for immediate 1087 * mem_cgroup_lru_del_list - account for removing an lru page
1042 * reclaim. If it still appears to be reclaimable, move it to the tail of the 1088 * @page: the page
1043 * inactive list. 1089 * @lru: target lru
1090 *
1091 * This function accounts for @page being removed from @lru.
1092 *
1093 * The callsite is then responsible for physically unlinking
1094 * @page->lru.
1044 */ 1095 */
1045void mem_cgroup_rotate_reclaimable_page(struct page *page) 1096void mem_cgroup_lru_del_list(struct page *page, enum lru_list lru)
1046{ 1097{
1047 struct mem_cgroup_per_zone *mz; 1098 struct mem_cgroup_per_zone *mz;
1099 struct mem_cgroup *memcg;
1048 struct page_cgroup *pc; 1100 struct page_cgroup *pc;
1049 enum lru_list lru = page_lru(page);
1050 1101
1051 if (mem_cgroup_disabled()) 1102 if (mem_cgroup_disabled())
1052 return; 1103 return;
1053 1104
1054 pc = lookup_page_cgroup(page); 1105 pc = lookup_page_cgroup(page);
1055 /* unused page is not rotated. */ 1106 /*
1056 if (!PageCgroupUsed(pc)) 1107 * root_mem_cgroup babysits uncharged LRU pages, but
1057 return; 1108 * PageCgroupUsed is cleared when the page is about to get
1058 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ 1109 * freed. PageCgroupAcctLRU remembers whether the
1059 smp_rmb(); 1110 * LRU-accounting happened against pc->mem_cgroup or
1060 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); 1111 * root_mem_cgroup.
1061 list_move_tail(&pc->lru, &mz->lruvec.lists[lru]); 1112 */
1113 if (TestClearPageCgroupAcctLRU(pc)) {
1114 VM_BUG_ON(!pc->mem_cgroup);
1115 memcg = pc->mem_cgroup;
1116 } else
1117 memcg = root_mem_cgroup;
1118 mz = page_cgroup_zoneinfo(memcg, page);
1119 /* huge page split is done under lru_lock. so, we have no races. */
1120 MEM_CGROUP_ZSTAT(mz, lru) -= 1 << compound_order(page);
1062} 1121}
1063 1122
1064void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru) 1123void mem_cgroup_lru_del(struct page *page)
1065{ 1124{
1066 struct mem_cgroup_per_zone *mz; 1125 mem_cgroup_lru_del_list(page, page_lru(page));
1067 struct page_cgroup *pc;
1068
1069 if (mem_cgroup_disabled())
1070 return;
1071
1072 pc = lookup_page_cgroup(page);
1073 /* unused page is not rotated. */
1074 if (!PageCgroupUsed(pc))
1075 return;
1076 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
1077 smp_rmb();
1078 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
1079 list_move(&pc->lru, &mz->lruvec.lists[lru]);
1080} 1126}
1081 1127
1082void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru) 1128/**
1129 * mem_cgroup_lru_move_lists - account for moving a page between lrus
1130 * @zone: zone of the page
1131 * @page: the page
1132 * @from: current lru
1133 * @to: target lru
1134 *
1135 * This function accounts for @page being moved between the lrus @from
1136 * and @to, and returns the lruvec for the given @zone and the memcg
1137 * @page is charged to.
1138 *
1139 * The callsite is then responsible for physically relinking
1140 * @page->lru to the returned lruvec->lists[@to].
1141 */
1142struct lruvec *mem_cgroup_lru_move_lists(struct zone *zone,
1143 struct page *page,
1144 enum lru_list from,
1145 enum lru_list to)
1083{ 1146{
1084 struct page_cgroup *pc; 1147 /* XXX: Optimize this, especially for @from == @to */
1085 struct mem_cgroup_per_zone *mz; 1148 mem_cgroup_lru_del_list(page, from);
1086 1149 return mem_cgroup_lru_add_list(zone, page, to);
1087 if (mem_cgroup_disabled())
1088 return;
1089 pc = lookup_page_cgroup(page);
1090 VM_BUG_ON(PageCgroupAcctLRU(pc));
1091 /*
1092 * putback: charge:
1093 * SetPageLRU SetPageCgroupUsed
1094 * smp_mb smp_mb
1095 * PageCgroupUsed && add to memcg LRU PageLRU && add to memcg LRU
1096 *
1097 * Ensure that one of the two sides adds the page to the memcg
1098 * LRU during a race.
1099 */
1100 smp_mb();
1101 if (!PageCgroupUsed(pc))
1102 return;
1103 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
1104 smp_rmb();
1105 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
1106 /* huge page split is done under lru_lock. so, we have no races. */
1107 MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page);
1108 SetPageCgroupAcctLRU(pc);
1109 list_add(&pc->lru, &mz->lruvec.lists[lru]);
1110} 1150}
1111 1151
1112/* 1152/*
@@ -1117,6 +1157,7 @@ void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
1117 */ 1157 */
1118static void mem_cgroup_lru_del_before_commit(struct page *page) 1158static void mem_cgroup_lru_del_before_commit(struct page *page)
1119{ 1159{
1160 enum lru_list lru;
1120 unsigned long flags; 1161 unsigned long flags;
1121 struct zone *zone = page_zone(page); 1162 struct zone *zone = page_zone(page);
1122 struct page_cgroup *pc = lookup_page_cgroup(page); 1163 struct page_cgroup *pc = lookup_page_cgroup(page);
@@ -1133,17 +1174,28 @@ static void mem_cgroup_lru_del_before_commit(struct page *page)
1133 return; 1174 return;
1134 1175
1135 spin_lock_irqsave(&zone->lru_lock, flags); 1176 spin_lock_irqsave(&zone->lru_lock, flags);
1177 lru = page_lru(page);
1136 /* 1178 /*
1137 * Forget old LRU when this page_cgroup is *not* used. This Used bit 1179 * The uncharged page could still be registered to the LRU of
1138 * is guarded by lock_page() because the page is SwapCache. 1180 * the stale pc->mem_cgroup.
1181 *
1182 * As pc->mem_cgroup is about to get overwritten, the old LRU
1183 * accounting needs to be taken care of. Let root_mem_cgroup
1184 * babysit the page until the new memcg is responsible for it.
1185 *
1186 * The PCG_USED bit is guarded by lock_page() as the page is
1187 * swapcache/pagecache.
1139 */ 1188 */
1140 if (!PageCgroupUsed(pc)) 1189 if (PageLRU(page) && PageCgroupAcctLRU(pc) && !PageCgroupUsed(pc)) {
1141 mem_cgroup_del_lru_list(page, page_lru(page)); 1190 del_page_from_lru_list(zone, page, lru);
1191 add_page_to_lru_list(zone, page, lru);
1192 }
1142 spin_unlock_irqrestore(&zone->lru_lock, flags); 1193 spin_unlock_irqrestore(&zone->lru_lock, flags);
1143} 1194}
1144 1195
1145static void mem_cgroup_lru_add_after_commit(struct page *page) 1196static void mem_cgroup_lru_add_after_commit(struct page *page)
1146{ 1197{
1198 enum lru_list lru;
1147 unsigned long flags; 1199 unsigned long flags;
1148 struct zone *zone = page_zone(page); 1200 struct zone *zone = page_zone(page);
1149 struct page_cgroup *pc = lookup_page_cgroup(page); 1201 struct page_cgroup *pc = lookup_page_cgroup(page);
@@ -1161,22 +1213,22 @@ static void mem_cgroup_lru_add_after_commit(struct page *page)
1161 if (likely(!PageLRU(page))) 1213 if (likely(!PageLRU(page)))
1162 return; 1214 return;
1163 spin_lock_irqsave(&zone->lru_lock, flags); 1215 spin_lock_irqsave(&zone->lru_lock, flags);
1164 /* link when the page is linked to LRU but page_cgroup isn't */ 1216 lru = page_lru(page);
1165 if (PageLRU(page) && !PageCgroupAcctLRU(pc)) 1217 /*
1166 mem_cgroup_add_lru_list(page, page_lru(page)); 1218 * If the page is not on the LRU, someone will soon put it
1219 * there. If it is, and also already accounted for on the
1220 * memcg-side, it must be on the right lruvec as setting
1221 * pc->mem_cgroup and PageCgroupUsed is properly ordered.
1222 * Otherwise, root_mem_cgroup has been babysitting the page
1223 * during the charge. Move it to the new memcg now.
1224 */
1225 if (PageLRU(page) && !PageCgroupAcctLRU(pc)) {
1226 del_page_from_lru_list(zone, page, lru);
1227 add_page_to_lru_list(zone, page, lru);
1228 }
1167 spin_unlock_irqrestore(&zone->lru_lock, flags); 1229 spin_unlock_irqrestore(&zone->lru_lock, flags);
1168} 1230}
1169 1231
1170
1171void mem_cgroup_move_lists(struct page *page,
1172 enum lru_list from, enum lru_list to)
1173{
1174 if (mem_cgroup_disabled())
1175 return;
1176 mem_cgroup_del_lru_list(page, from);
1177 mem_cgroup_add_lru_list(page, to);
1178}
1179
1180/* 1232/*
1181 * Checks whether given mem is same or in the root_mem_cgroup's 1233 * Checks whether given mem is same or in the root_mem_cgroup's
1182 * hierarchy subtree 1234 * hierarchy subtree
@@ -1282,68 +1334,6 @@ mem_cgroup_get_reclaim_stat_from_page(struct page *page)
1282 return &mz->reclaim_stat; 1334 return &mz->reclaim_stat;
1283} 1335}
1284 1336
1285unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
1286 struct list_head *dst,
1287 unsigned long *scanned, int order,
1288 isolate_mode_t mode,
1289 struct zone *z,
1290 struct mem_cgroup *mem_cont,
1291 int active, int file)
1292{
1293 unsigned long nr_taken = 0;
1294 struct page *page;
1295 unsigned long scan;
1296 LIST_HEAD(pc_list);
1297 struct list_head *src;
1298 struct page_cgroup *pc, *tmp;
1299 int nid = zone_to_nid(z);
1300 int zid = zone_idx(z);
1301 struct mem_cgroup_per_zone *mz;
1302 int lru = LRU_FILE * file + active;
1303 int ret;
1304
1305 BUG_ON(!mem_cont);
1306 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
1307 src = &mz->lruvec.lists[lru];
1308
1309 scan = 0;
1310 list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
1311 if (scan >= nr_to_scan)
1312 break;
1313
1314 if (unlikely(!PageCgroupUsed(pc)))
1315 continue;
1316
1317 page = lookup_cgroup_page(pc);
1318
1319 if (unlikely(!PageLRU(page)))
1320 continue;
1321
1322 scan++;
1323 ret = __isolate_lru_page(page, mode, file);
1324 switch (ret) {
1325 case 0:
1326 list_move(&page->lru, dst);
1327 mem_cgroup_del_lru(page);
1328 nr_taken += hpage_nr_pages(page);
1329 break;
1330 case -EBUSY:
1331 /* we don't affect global LRU but rotate in our LRU */
1332 mem_cgroup_rotate_lru_list(page, page_lru(page));
1333 break;
1334 default:
1335 break;
1336 }
1337 }
1338
1339 *scanned = scan;
1340
1341 trace_mm_vmscan_memcg_isolate(0, nr_to_scan, scan, nr_taken,
1342 0, 0, 0, mode);
1343
1344 return nr_taken;
1345}
1346
1347#define mem_cgroup_from_res_counter(counter, member) \ 1337#define mem_cgroup_from_res_counter(counter, member) \
1348 container_of(counter, struct mem_cgroup, member) 1338 container_of(counter, struct mem_cgroup, member)
1349 1339
@@ -3726,11 +3716,11 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
3726static int mem_cgroup_force_empty_list(struct mem_cgroup *memcg, 3716static int mem_cgroup_force_empty_list(struct mem_cgroup *memcg,
3727 int node, int zid, enum lru_list lru) 3717 int node, int zid, enum lru_list lru)
3728{ 3718{
3729 struct zone *zone;
3730 struct mem_cgroup_per_zone *mz; 3719 struct mem_cgroup_per_zone *mz;
3731 struct page_cgroup *pc, *busy;
3732 unsigned long flags, loop; 3720 unsigned long flags, loop;
3733 struct list_head *list; 3721 struct list_head *list;
3722 struct page *busy;
3723 struct zone *zone;
3734 int ret = 0; 3724 int ret = 0;
3735 3725
3736 zone = &NODE_DATA(node)->node_zones[zid]; 3726 zone = &NODE_DATA(node)->node_zones[zid];
@@ -3742,6 +3732,7 @@ static int mem_cgroup_force_empty_list(struct mem_cgroup *memcg,
3742 loop += 256; 3732 loop += 256;
3743 busy = NULL; 3733 busy = NULL;
3744 while (loop--) { 3734 while (loop--) {
3735 struct page_cgroup *pc;
3745 struct page *page; 3736 struct page *page;
3746 3737
3747 ret = 0; 3738 ret = 0;
@@ -3750,16 +3741,16 @@ static int mem_cgroup_force_empty_list(struct mem_cgroup *memcg,
3750 spin_unlock_irqrestore(&zone->lru_lock, flags); 3741 spin_unlock_irqrestore(&zone->lru_lock, flags);
3751 break; 3742 break;
3752 } 3743 }
3753 pc = list_entry(list->prev, struct page_cgroup, lru); 3744 page = list_entry(list->prev, struct page, lru);
3754 if (busy == pc) { 3745 if (busy == page) {
3755 list_move(&pc->lru, list); 3746 list_move(&page->lru, list);
3756 busy = NULL; 3747 busy = NULL;
3757 spin_unlock_irqrestore(&zone->lru_lock, flags); 3748 spin_unlock_irqrestore(&zone->lru_lock, flags);
3758 continue; 3749 continue;
3759 } 3750 }
3760 spin_unlock_irqrestore(&zone->lru_lock, flags); 3751 spin_unlock_irqrestore(&zone->lru_lock, flags);
3761 3752
3762 page = lookup_cgroup_page(pc); 3753 pc = lookup_page_cgroup(page);
3763 3754
3764 ret = mem_cgroup_move_parent(page, pc, memcg, GFP_KERNEL); 3755 ret = mem_cgroup_move_parent(page, pc, memcg, GFP_KERNEL);
3765 if (ret == -ENOMEM) 3756 if (ret == -ENOMEM)
@@ -3767,7 +3758,7 @@ static int mem_cgroup_force_empty_list(struct mem_cgroup *memcg,
3767 3758
3768 if (ret == -EBUSY || ret == -EINVAL) { 3759 if (ret == -EBUSY || ret == -EINVAL) {
3769 /* found lock contention or "pc" is obsolete. */ 3760 /* found lock contention or "pc" is obsolete. */
3770 busy = pc; 3761 busy = page;
3771 cond_resched(); 3762 cond_resched();
3772 } else 3763 } else
3773 busy = NULL; 3764 busy = NULL;