aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
authorKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>2012-01-12 20:17:44 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-01-12 23:13:04 -0500
commitab936cbcd02072a34b60d268f94440fd5cf1970b (patch)
treed37e3e3c54cc4cc691a428b6ceb71b4b40e4f42b /mm/memcontrol.c
parent28d82dc1c4edbc352129f97f4ca22624d1fe61de (diff)
memcg: add mem_cgroup_replace_page_cache() to fix LRU issue
Commit ef6a3c6311 ("mm: add replace_page_cache_page() function") added a function replace_page_cache_page(). This function replaces a page in the radix-tree with a new page. WHen doing this, memory cgroup needs to fix up the accounting information. memcg need to check PCG_USED bit etc. In some(many?) cases, 'newpage' is on LRU before calling replace_page_cache(). So, memcg's LRU accounting information should be fixed, too. This patch adds mem_cgroup_replace_page_cache() and removes the old hooks. In that function, old pages will be unaccounted without touching res_counter and new page will be accounted to the memcg (of old page). WHen overwriting pc->mem_cgroup of newpage, take zone->lru_lock and avoid races with LRU handling. Background: replace_page_cache_page() is called by FUSE code in its splice() handling. Here, 'newpage' is replacing oldpage but this newpage is not a newly allocated page and may be on LRU. LRU mis-accounting will be critical for memory cgroup because rmdir() checks the whole LRU is empty and there is no account leak. If a page is on the other LRU than it should be, rmdir() will fail. This bug was added in March 2011, but no bug report yet. I guess there are not many people who use memcg and FUSE at the same time with upstream kernels. The result of this bug is that admin cannot destroy a memcg because of account leak. So, no panic, no deadlock. And, even if an active cgroup exist, umount can succseed. So no problem at shutdown. Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Michal Hocko <mhocko@suse.cz> Cc: Miklos Szeredi <mszeredi@suse.cz> Cc: Hugh Dickins <hughd@google.com> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c44
1 files changed, 44 insertions, 0 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index d87aa3510c5e..0b2d4036f1cd 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3432,6 +3432,50 @@ void mem_cgroup_end_migration(struct mem_cgroup *memcg,
3432 cgroup_release_and_wakeup_rmdir(&memcg->css); 3432 cgroup_release_and_wakeup_rmdir(&memcg->css);
3433} 3433}
3434 3434
3435/*
3436 * At replace page cache, newpage is not under any memcg but it's on
3437 * LRU. So, this function doesn't touch res_counter but handles LRU
3438 * in correct way. Both pages are locked so we cannot race with uncharge.
3439 */
3440void mem_cgroup_replace_page_cache(struct page *oldpage,
3441 struct page *newpage)
3442{
3443 struct mem_cgroup *memcg;
3444 struct page_cgroup *pc;
3445 struct zone *zone;
3446 enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
3447 unsigned long flags;
3448
3449 if (mem_cgroup_disabled())
3450 return;
3451
3452 pc = lookup_page_cgroup(oldpage);
3453 /* fix accounting on old pages */
3454 lock_page_cgroup(pc);
3455 memcg = pc->mem_cgroup;
3456 mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), -1);
3457 ClearPageCgroupUsed(pc);
3458 unlock_page_cgroup(pc);
3459
3460 if (PageSwapBacked(oldpage))
3461 type = MEM_CGROUP_CHARGE_TYPE_SHMEM;
3462
3463 zone = page_zone(newpage);
3464 pc = lookup_page_cgroup(newpage);
3465 /*
3466 * Even if newpage->mapping was NULL before starting replacement,
3467 * the newpage may be on LRU(or pagevec for LRU) already. We lock
3468 * LRU while we overwrite pc->mem_cgroup.
3469 */
3470 spin_lock_irqsave(&zone->lru_lock, flags);
3471 if (PageLRU(newpage))
3472 del_page_from_lru_list(zone, newpage, page_lru(newpage));
3473 __mem_cgroup_commit_charge(memcg, newpage, 1, pc, type);
3474 if (PageLRU(newpage))
3475 add_page_to_lru_list(zone, newpage, page_lru(newpage));
3476 spin_unlock_irqrestore(&zone->lru_lock, flags);
3477}
3478
3435#ifdef CONFIG_DEBUG_VM 3479#ifdef CONFIG_DEBUG_VM
3436static struct page_cgroup *lookup_page_cgroup_used(struct page *page) 3480static struct page_cgroup *lookup_page_cgroup_used(struct page *page)
3437{ 3481{