diff options
author | KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> | 2012-01-12 20:17:44 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-12 23:13:04 -0500 |
commit | ab936cbcd02072a34b60d268f94440fd5cf1970b (patch) | |
tree | d37e3e3c54cc4cc691a428b6ceb71b4b40e4f42b /mm/filemap.c | |
parent | 28d82dc1c4edbc352129f97f4ca22624d1fe61de (diff) |
memcg: add mem_cgroup_replace_page_cache() to fix LRU issue
Commit ef6a3c6311 ("mm: add replace_page_cache_page() function") added a
function replace_page_cache_page(). This function replaces a page in the
radix-tree with a new page. WHen doing this, memory cgroup needs to fix
up the accounting information. memcg need to check PCG_USED bit etc.
In some(many?) cases, 'newpage' is on LRU before calling
replace_page_cache(). So, memcg's LRU accounting information should be
fixed, too.
This patch adds mem_cgroup_replace_page_cache() and removes the old hooks.
In that function, old pages will be unaccounted without touching
res_counter and new page will be accounted to the memcg (of old page).
WHen overwriting pc->mem_cgroup of newpage, take zone->lru_lock and avoid
races with LRU handling.
Background:
replace_page_cache_page() is called by FUSE code in its splice() handling.
Here, 'newpage' is replacing oldpage but this newpage is not a newly allocated
page and may be on LRU. LRU mis-accounting will be critical for memory cgroup
because rmdir() checks the whole LRU is empty and there is no account leak.
If a page is on the other LRU than it should be, rmdir() will fail.
This bug was added in March 2011, but no bug report yet. I guess there
are not many people who use memcg and FUSE at the same time with upstream
kernels.
The result of this bug is that admin cannot destroy a memcg because of
account leak. So, no panic, no deadlock. And, even if an active cgroup
exist, umount can succseed. So no problem at shutdown.
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Michal Hocko <mhocko@suse.cz>
Cc: Miklos Szeredi <mszeredi@suse.cz>
Cc: Hugh Dickins <hughd@google.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/filemap.c')
-rw-r--r-- | mm/filemap.c | 18 |
1 files changed, 2 insertions, 16 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index c4ee2e918bea..97f49ed35bd2 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -393,24 +393,11 @@ EXPORT_SYMBOL(filemap_write_and_wait_range); | |||
393 | int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) | 393 | int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) |
394 | { | 394 | { |
395 | int error; | 395 | int error; |
396 | struct mem_cgroup *memcg = NULL; | ||
397 | 396 | ||
398 | VM_BUG_ON(!PageLocked(old)); | 397 | VM_BUG_ON(!PageLocked(old)); |
399 | VM_BUG_ON(!PageLocked(new)); | 398 | VM_BUG_ON(!PageLocked(new)); |
400 | VM_BUG_ON(new->mapping); | 399 | VM_BUG_ON(new->mapping); |
401 | 400 | ||
402 | /* | ||
403 | * This is not page migration, but prepare_migration and | ||
404 | * end_migration does enough work for charge replacement. | ||
405 | * | ||
406 | * In the longer term we probably want a specialized function | ||
407 | * for moving the charge from old to new in a more efficient | ||
408 | * manner. | ||
409 | */ | ||
410 | error = mem_cgroup_prepare_migration(old, new, &memcg, gfp_mask); | ||
411 | if (error) | ||
412 | return error; | ||
413 | |||
414 | error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); | 401 | error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); |
415 | if (!error) { | 402 | if (!error) { |
416 | struct address_space *mapping = old->mapping; | 403 | struct address_space *mapping = old->mapping; |
@@ -432,13 +419,12 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) | |||
432 | if (PageSwapBacked(new)) | 419 | if (PageSwapBacked(new)) |
433 | __inc_zone_page_state(new, NR_SHMEM); | 420 | __inc_zone_page_state(new, NR_SHMEM); |
434 | spin_unlock_irq(&mapping->tree_lock); | 421 | spin_unlock_irq(&mapping->tree_lock); |
422 | /* mem_cgroup codes must not be called under tree_lock */ | ||
423 | mem_cgroup_replace_page_cache(old, new); | ||
435 | radix_tree_preload_end(); | 424 | radix_tree_preload_end(); |
436 | if (freepage) | 425 | if (freepage) |
437 | freepage(old); | 426 | freepage(old); |
438 | page_cache_release(old); | 427 | page_cache_release(old); |
439 | mem_cgroup_end_migration(memcg, old, new, true); | ||
440 | } else { | ||
441 | mem_cgroup_end_migration(memcg, old, new, false); | ||
442 | } | 428 | } |
443 | 429 | ||
444 | return error; | 430 | return error; |