diff options
author | KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> | 2008-07-25 04:47:17 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-25 13:53:37 -0400 |
commit | accf163e6ab729f1fc5fffaa0310e498270bf4e7 (patch) | |
tree | 798d5c97aa10bd26018fb1175a176c9d57560a39 /mm | |
parent | b76734e5e34e1889ab9fc5f3756570b1129f0f50 (diff) |
memcg: remove a redundant check
Because of remove refcnt patch, it's very rare case to that
mem_cgroup_charge_common() is called against a page which is accounted.
mem_cgroup_charge_common() is called when.
1. a page is added into file cache.
2. an anon page is _newly_ mapped.
A racy case is that a newly-swapped-in anonymous page is referred from
prural threads in do_swap_page() at the same time.
(a page is not Locked when mem_cgroup_charge() is called from do_swap_page.)
Another case is shmem. It charges its page before calling add_to_page_cache().
Then, mem_cgroup_charge_cache() is called twice. This case is handled in
mem_cgroup_cache_charge(). But this check may be too hacky...
Signed-off-by : KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Pavel Emelyanov <xemul@openvz.org>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: YAMAMOTO Takashi <yamamoto@valinux.co.jp>
Cc: Paul Menage <menage@google.com>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memcontrol.c | 53 |
1 files changed, 25 insertions, 28 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 04ded27f622..5b3759bd549 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -536,28 +536,6 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, | |||
536 | if (mem_cgroup_subsys.disabled) | 536 | if (mem_cgroup_subsys.disabled) |
537 | return 0; | 537 | return 0; |
538 | 538 | ||
539 | /* | ||
540 | * Should page_cgroup's go to their own slab? | ||
541 | * One could optimize the performance of the charging routine | ||
542 | * by saving a bit in the page_flags and using it as a lock | ||
543 | * to see if the cgroup page already has a page_cgroup associated | ||
544 | * with it | ||
545 | */ | ||
546 | retry: | ||
547 | lock_page_cgroup(page); | ||
548 | pc = page_get_page_cgroup(page); | ||
549 | /* | ||
550 | * The page_cgroup exists and | ||
551 | * the page has already been accounted. | ||
552 | */ | ||
553 | if (unlikely(pc)) { | ||
554 | VM_BUG_ON(pc->page != page); | ||
555 | VM_BUG_ON(!pc->mem_cgroup); | ||
556 | unlock_page_cgroup(page); | ||
557 | goto done; | ||
558 | } | ||
559 | unlock_page_cgroup(page); | ||
560 | |||
561 | pc = kmem_cache_alloc(page_cgroup_cache, gfp_mask); | 539 | pc = kmem_cache_alloc(page_cgroup_cache, gfp_mask); |
562 | if (unlikely(pc == NULL)) | 540 | if (unlikely(pc == NULL)) |
563 | goto err; | 541 | goto err; |
@@ -618,15 +596,10 @@ retry: | |||
618 | lock_page_cgroup(page); | 596 | lock_page_cgroup(page); |
619 | if (unlikely(page_get_page_cgroup(page))) { | 597 | if (unlikely(page_get_page_cgroup(page))) { |
620 | unlock_page_cgroup(page); | 598 | unlock_page_cgroup(page); |
621 | /* | ||
622 | * Another charge has been added to this page already. | ||
623 | * We take lock_page_cgroup(page) again and read | ||
624 | * page->cgroup, increment refcnt.... just retry is OK. | ||
625 | */ | ||
626 | res_counter_uncharge(&mem->res, PAGE_SIZE); | 599 | res_counter_uncharge(&mem->res, PAGE_SIZE); |
627 | css_put(&mem->css); | 600 | css_put(&mem->css); |
628 | kmem_cache_free(page_cgroup_cache, pc); | 601 | kmem_cache_free(page_cgroup_cache, pc); |
629 | goto retry; | 602 | goto done; |
630 | } | 603 | } |
631 | page_assign_page_cgroup(page, pc); | 604 | page_assign_page_cgroup(page, pc); |
632 | 605 | ||
@@ -665,8 +638,32 @@ int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) | |||
665 | int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, | 638 | int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, |
666 | gfp_t gfp_mask) | 639 | gfp_t gfp_mask) |
667 | { | 640 | { |
641 | /* | ||
642 | * Corner case handling. This is called from add_to_page_cache() | ||
643 | * in usual. But some FS (shmem) precharges this page before calling it | ||
644 | * and call add_to_page_cache() with GFP_NOWAIT. | ||
645 | * | ||
646 | * For GFP_NOWAIT case, the page may be pre-charged before calling | ||
647 | * add_to_page_cache(). (See shmem.c) check it here and avoid to call | ||
648 | * charge twice. (It works but has to pay a bit larger cost.) | ||
649 | */ | ||
650 | if (!(gfp_mask & __GFP_WAIT)) { | ||
651 | struct page_cgroup *pc; | ||
652 | |||
653 | lock_page_cgroup(page); | ||
654 | pc = page_get_page_cgroup(page); | ||
655 | if (pc) { | ||
656 | VM_BUG_ON(pc->page != page); | ||
657 | VM_BUG_ON(!pc->mem_cgroup); | ||
658 | unlock_page_cgroup(page); | ||
659 | return 0; | ||
660 | } | ||
661 | unlock_page_cgroup(page); | ||
662 | } | ||
663 | |||
668 | if (unlikely(!mm)) | 664 | if (unlikely(!mm)) |
669 | mm = &init_mm; | 665 | mm = &init_mm; |
666 | |||
670 | return mem_cgroup_charge_common(page, mm, gfp_mask, | 667 | return mem_cgroup_charge_common(page, mm, gfp_mask, |
671 | MEM_CGROUP_CHARGE_TYPE_CACHE, NULL); | 668 | MEM_CGROUP_CHARGE_TYPE_CACHE, NULL); |
672 | } | 669 | } |