diff options
author | KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> | 2012-01-12 20:17:44 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-12 23:13:04 -0500 |
commit | ab936cbcd02072a34b60d268f94440fd5cf1970b (patch) | |
tree | d37e3e3c54cc4cc691a428b6ceb71b4b40e4f42b /mm | |
parent | 28d82dc1c4edbc352129f97f4ca22624d1fe61de (diff) |
memcg: add mem_cgroup_replace_page_cache() to fix LRU issue
Commit ef6a3c6311 ("mm: add replace_page_cache_page() function") added a
function replace_page_cache_page(). This function replaces a page in the
radix-tree with a new page. WHen doing this, memory cgroup needs to fix
up the accounting information. memcg need to check PCG_USED bit etc.
In some(many?) cases, 'newpage' is on LRU before calling
replace_page_cache(). So, memcg's LRU accounting information should be
fixed, too.
This patch adds mem_cgroup_replace_page_cache() and removes the old hooks.
In that function, old pages will be unaccounted without touching
res_counter and new page will be accounted to the memcg (of old page).
WHen overwriting pc->mem_cgroup of newpage, take zone->lru_lock and avoid
races with LRU handling.
Background:
replace_page_cache_page() is called by FUSE code in its splice() handling.
Here, 'newpage' is replacing oldpage but this newpage is not a newly allocated
page and may be on LRU. LRU mis-accounting will be critical for memory cgroup
because rmdir() checks the whole LRU is empty and there is no account leak.
If a page is on the other LRU than it should be, rmdir() will fail.
This bug was added in March 2011, but no bug report yet. I guess there
are not many people who use memcg and FUSE at the same time with upstream
kernels.
The result of this bug is that admin cannot destroy a memcg because of
account leak. So, no panic, no deadlock. And, even if an active cgroup
exist, umount can succseed. So no problem at shutdown.
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Michal Hocko <mhocko@suse.cz>
Cc: Miklos Szeredi <mszeredi@suse.cz>
Cc: Hugh Dickins <hughd@google.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/filemap.c | 18 | ||||
-rw-r--r-- | mm/memcontrol.c | 44 |
2 files changed, 46 insertions, 16 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index c4ee2e918bea..97f49ed35bd2 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -393,24 +393,11 @@ EXPORT_SYMBOL(filemap_write_and_wait_range); | |||
393 | int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) | 393 | int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) |
394 | { | 394 | { |
395 | int error; | 395 | int error; |
396 | struct mem_cgroup *memcg = NULL; | ||
397 | 396 | ||
398 | VM_BUG_ON(!PageLocked(old)); | 397 | VM_BUG_ON(!PageLocked(old)); |
399 | VM_BUG_ON(!PageLocked(new)); | 398 | VM_BUG_ON(!PageLocked(new)); |
400 | VM_BUG_ON(new->mapping); | 399 | VM_BUG_ON(new->mapping); |
401 | 400 | ||
402 | /* | ||
403 | * This is not page migration, but prepare_migration and | ||
404 | * end_migration does enough work for charge replacement. | ||
405 | * | ||
406 | * In the longer term we probably want a specialized function | ||
407 | * for moving the charge from old to new in a more efficient | ||
408 | * manner. | ||
409 | */ | ||
410 | error = mem_cgroup_prepare_migration(old, new, &memcg, gfp_mask); | ||
411 | if (error) | ||
412 | return error; | ||
413 | |||
414 | error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); | 401 | error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); |
415 | if (!error) { | 402 | if (!error) { |
416 | struct address_space *mapping = old->mapping; | 403 | struct address_space *mapping = old->mapping; |
@@ -432,13 +419,12 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) | |||
432 | if (PageSwapBacked(new)) | 419 | if (PageSwapBacked(new)) |
433 | __inc_zone_page_state(new, NR_SHMEM); | 420 | __inc_zone_page_state(new, NR_SHMEM); |
434 | spin_unlock_irq(&mapping->tree_lock); | 421 | spin_unlock_irq(&mapping->tree_lock); |
422 | /* mem_cgroup codes must not be called under tree_lock */ | ||
423 | mem_cgroup_replace_page_cache(old, new); | ||
435 | radix_tree_preload_end(); | 424 | radix_tree_preload_end(); |
436 | if (freepage) | 425 | if (freepage) |
437 | freepage(old); | 426 | freepage(old); |
438 | page_cache_release(old); | 427 | page_cache_release(old); |
439 | mem_cgroup_end_migration(memcg, old, new, true); | ||
440 | } else { | ||
441 | mem_cgroup_end_migration(memcg, old, new, false); | ||
442 | } | 428 | } |
443 | 429 | ||
444 | return error; | 430 | return error; |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index d87aa3510c5e..0b2d4036f1cd 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -3432,6 +3432,50 @@ void mem_cgroup_end_migration(struct mem_cgroup *memcg, | |||
3432 | cgroup_release_and_wakeup_rmdir(&memcg->css); | 3432 | cgroup_release_and_wakeup_rmdir(&memcg->css); |
3433 | } | 3433 | } |
3434 | 3434 | ||
3435 | /* | ||
3436 | * At replace page cache, newpage is not under any memcg but it's on | ||
3437 | * LRU. So, this function doesn't touch res_counter but handles LRU | ||
3438 | * in correct way. Both pages are locked so we cannot race with uncharge. | ||
3439 | */ | ||
3440 | void mem_cgroup_replace_page_cache(struct page *oldpage, | ||
3441 | struct page *newpage) | ||
3442 | { | ||
3443 | struct mem_cgroup *memcg; | ||
3444 | struct page_cgroup *pc; | ||
3445 | struct zone *zone; | ||
3446 | enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE; | ||
3447 | unsigned long flags; | ||
3448 | |||
3449 | if (mem_cgroup_disabled()) | ||
3450 | return; | ||
3451 | |||
3452 | pc = lookup_page_cgroup(oldpage); | ||
3453 | /* fix accounting on old pages */ | ||
3454 | lock_page_cgroup(pc); | ||
3455 | memcg = pc->mem_cgroup; | ||
3456 | mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), -1); | ||
3457 | ClearPageCgroupUsed(pc); | ||
3458 | unlock_page_cgroup(pc); | ||
3459 | |||
3460 | if (PageSwapBacked(oldpage)) | ||
3461 | type = MEM_CGROUP_CHARGE_TYPE_SHMEM; | ||
3462 | |||
3463 | zone = page_zone(newpage); | ||
3464 | pc = lookup_page_cgroup(newpage); | ||
3465 | /* | ||
3466 | * Even if newpage->mapping was NULL before starting replacement, | ||
3467 | * the newpage may be on LRU(or pagevec for LRU) already. We lock | ||
3468 | * LRU while we overwrite pc->mem_cgroup. | ||
3469 | */ | ||
3470 | spin_lock_irqsave(&zone->lru_lock, flags); | ||
3471 | if (PageLRU(newpage)) | ||
3472 | del_page_from_lru_list(zone, newpage, page_lru(newpage)); | ||
3473 | __mem_cgroup_commit_charge(memcg, newpage, 1, pc, type); | ||
3474 | if (PageLRU(newpage)) | ||
3475 | add_page_to_lru_list(zone, newpage, page_lru(newpage)); | ||
3476 | spin_unlock_irqrestore(&zone->lru_lock, flags); | ||
3477 | } | ||
3478 | |||
3435 | #ifdef CONFIG_DEBUG_VM | 3479 | #ifdef CONFIG_DEBUG_VM |
3436 | static struct page_cgroup *lookup_page_cgroup_used(struct page *page) | 3480 | static struct page_cgroup *lookup_page_cgroup_used(struct page *page) |
3437 | { | 3481 | { |