aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c59
1 files changed, 50 insertions, 9 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index b311f19bbe01..f50cb7b1efdb 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -331,8 +331,12 @@ void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
331 return; 331 return;
332 pc = lookup_page_cgroup(page); 332 pc = lookup_page_cgroup(page);
333 /* can happen while we handle swapcache. */ 333 /* can happen while we handle swapcache. */
334 if (list_empty(&pc->lru)) 334 if (list_empty(&pc->lru) || !pc->mem_cgroup)
335 return; 335 return;
336 /*
337 * We don't check PCG_USED bit. It's cleared when the "page" is finally
338 * removed from global LRU.
339 */
336 mz = page_cgroup_zoneinfo(pc); 340 mz = page_cgroup_zoneinfo(pc);
337 mem = pc->mem_cgroup; 341 mem = pc->mem_cgroup;
338 MEM_CGROUP_ZSTAT(mz, lru) -= 1; 342 MEM_CGROUP_ZSTAT(mz, lru) -= 1;
@@ -379,16 +383,44 @@ void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
379 MEM_CGROUP_ZSTAT(mz, lru) += 1; 383 MEM_CGROUP_ZSTAT(mz, lru) += 1;
380 list_add(&pc->lru, &mz->lists[lru]); 384 list_add(&pc->lru, &mz->lists[lru]);
381} 385}
386
382/* 387/*
383 * To add swapcache into LRU. Be careful to all this function. 388 * At handling SwapCache, pc->mem_cgroup may be changed while it's linked to
384 * zone->lru_lock shouldn't be held and irq must not be disabled. 389 * lru because the page may.be reused after it's fully uncharged (because of
390 * SwapCache behavior).To handle that, unlink page_cgroup from LRU when charge
391 * it again. This function is only used to charge SwapCache. It's done under
392 * lock_page and expected that zone->lru_lock is never held.
385 */ 393 */
386static void mem_cgroup_lru_fixup(struct page *page) 394static void mem_cgroup_lru_del_before_commit_swapcache(struct page *page)
395{
396 unsigned long flags;
397 struct zone *zone = page_zone(page);
398 struct page_cgroup *pc = lookup_page_cgroup(page);
399
400 spin_lock_irqsave(&zone->lru_lock, flags);
401 /*
402 * Forget old LRU when this page_cgroup is *not* used. This Used bit
403 * is guarded by lock_page() because the page is SwapCache.
404 */
405 if (!PageCgroupUsed(pc))
406 mem_cgroup_del_lru_list(page, page_lru(page));
407 spin_unlock_irqrestore(&zone->lru_lock, flags);
408}
409
410static void mem_cgroup_lru_add_after_commit_swapcache(struct page *page)
387{ 411{
388 if (!isolate_lru_page(page)) 412 unsigned long flags;
389 putback_lru_page(page); 413 struct zone *zone = page_zone(page);
414 struct page_cgroup *pc = lookup_page_cgroup(page);
415
416 spin_lock_irqsave(&zone->lru_lock, flags);
417 /* link when the page is linked to LRU but page_cgroup isn't */
418 if (PageLRU(page) && list_empty(&pc->lru))
419 mem_cgroup_add_lru_list(page, page_lru(page));
420 spin_unlock_irqrestore(&zone->lru_lock, flags);
390} 421}
391 422
423
392void mem_cgroup_move_lists(struct page *page, 424void mem_cgroup_move_lists(struct page *page,
393 enum lru_list from, enum lru_list to) 425 enum lru_list from, enum lru_list to)
394{ 426{
@@ -1168,8 +1200,11 @@ int mem_cgroup_cache_charge_swapin(struct page *page,
1168 mem = NULL; /* charge to current */ 1200 mem = NULL; /* charge to current */
1169 } 1201 }
1170 } 1202 }
1203 /* SwapCache may be still linked to LRU now. */
1204 mem_cgroup_lru_del_before_commit_swapcache(page);
1171 ret = mem_cgroup_charge_common(page, mm, mask, 1205 ret = mem_cgroup_charge_common(page, mm, mask,
1172 MEM_CGROUP_CHARGE_TYPE_SHMEM, mem); 1206 MEM_CGROUP_CHARGE_TYPE_SHMEM, mem);
1207 mem_cgroup_lru_add_after_commit_swapcache(page);
1173 /* drop extra refcnt from tryget */ 1208 /* drop extra refcnt from tryget */
1174 if (mem) 1209 if (mem)
1175 css_put(&mem->css); 1210 css_put(&mem->css);
@@ -1185,8 +1220,6 @@ int mem_cgroup_cache_charge_swapin(struct page *page,
1185 } 1220 }
1186 if (!locked) 1221 if (!locked)
1187 unlock_page(page); 1222 unlock_page(page);
1188 /* add this page(page_cgroup) to the LRU we want. */
1189 mem_cgroup_lru_fixup(page);
1190 1223
1191 return ret; 1224 return ret;
1192} 1225}
@@ -1201,7 +1234,9 @@ void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
1201 if (!ptr) 1234 if (!ptr)
1202 return; 1235 return;
1203 pc = lookup_page_cgroup(page); 1236 pc = lookup_page_cgroup(page);
1237 mem_cgroup_lru_del_before_commit_swapcache(page);
1204 __mem_cgroup_commit_charge(ptr, pc, MEM_CGROUP_CHARGE_TYPE_MAPPED); 1238 __mem_cgroup_commit_charge(ptr, pc, MEM_CGROUP_CHARGE_TYPE_MAPPED);
1239 mem_cgroup_lru_add_after_commit_swapcache(page);
1205 /* 1240 /*
1206 * Now swap is on-memory. This means this page may be 1241 * Now swap is on-memory. This means this page may be
1207 * counted both as mem and swap....double count. 1242 * counted both as mem and swap....double count.
@@ -1220,7 +1255,7 @@ void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
1220 1255
1221 } 1256 }
1222 /* add this page(page_cgroup) to the LRU we want. */ 1257 /* add this page(page_cgroup) to the LRU we want. */
1223 mem_cgroup_lru_fixup(page); 1258
1224} 1259}
1225 1260
1226void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem) 1261void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
@@ -1288,6 +1323,12 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
1288 1323
1289 mem_cgroup_charge_statistics(mem, pc, false); 1324 mem_cgroup_charge_statistics(mem, pc, false);
1290 ClearPageCgroupUsed(pc); 1325 ClearPageCgroupUsed(pc);
1326 /*
1327 * pc->mem_cgroup is not cleared here. It will be accessed when it's
1328 * freed from LRU. This is safe because uncharged page is expected not
1329 * to be reused (freed soon). Exception is SwapCache, it's handled by
1330 * special functions.
1331 */
1291 1332
1292 mz = page_cgroup_zoneinfo(pc); 1333 mz = page_cgroup_zoneinfo(pc);
1293 unlock_page_cgroup(pc); 1334 unlock_page_cgroup(pc);