diff options
author | KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> | 2012-01-12 20:18:20 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-12 23:13:05 -0500 |
commit | e94c8a9cbce1aee4af9e1285802785481b7f93c5 (patch) | |
tree | 14d496e62c2e8d59a8954ff10368d59d8dfaea03 /mm/memcontrol.c | |
parent | 6b208e3f6e35aa76d254c395bdcd984b17c6b626 (diff) |
memcg: make mem_cgroup_split_huge_fixup() more efficient
In split_huge_page(), mem_cgroup_split_huge_fixup() is called to handle
page_cgroup modifcations. It takes move_lock_page_cgroup() and modifies
page_cgroup and LRU accounting jobs and called HPAGE_PMD_SIZE - 1 times.
But thinking again,
- compound_lock() is held at move_accout...then, it's not necessary
to take move_lock_page_cgroup().
- LRU is locked and all tail pages will go into the same LRU as
head is now on.
- page_cgroup is contiguous in huge page range.
This patch fixes mem_cgroup_split_huge_fixup() as to be called once per
hugepage and reduce costs for spliting.
[akpm@linux-foundation.org: fix typo, per Michal]
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Reviewed-by: Michal Hocko <mhocko@suse.cz>
Cc: Balbir Singh <bsingharora@gmail.com>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r-- | mm/memcontrol.c | 34 |
1 files changed, 17 insertions, 17 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 972878b648c2..42174612cc0b 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -2553,39 +2553,39 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg, | |||
2553 | (1 << PCG_ACCT_LRU) | (1 << PCG_MIGRATION)) | 2553 | (1 << PCG_ACCT_LRU) | (1 << PCG_MIGRATION)) |
2554 | /* | 2554 | /* |
2555 | * Because tail pages are not marked as "used", set it. We're under | 2555 | * Because tail pages are not marked as "used", set it. We're under |
2556 | * zone->lru_lock, 'splitting on pmd' and compund_lock. | 2556 | * zone->lru_lock, 'splitting on pmd' and compound_lock. |
2557 | * charge/uncharge will be never happen and move_account() is done under | ||
2558 | * compound_lock(), so we don't have to take care of races. | ||
2557 | */ | 2559 | */ |
2558 | void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail) | 2560 | void mem_cgroup_split_huge_fixup(struct page *head) |
2559 | { | 2561 | { |
2560 | struct page_cgroup *head_pc = lookup_page_cgroup(head); | 2562 | struct page_cgroup *head_pc = lookup_page_cgroup(head); |
2561 | struct page_cgroup *tail_pc = lookup_page_cgroup(tail); | 2563 | struct page_cgroup *pc; |
2562 | unsigned long flags; | 2564 | int i; |
2563 | 2565 | ||
2564 | if (mem_cgroup_disabled()) | 2566 | if (mem_cgroup_disabled()) |
2565 | return; | 2567 | return; |
2566 | /* | 2568 | for (i = 1; i < HPAGE_PMD_NR; i++) { |
2567 | * We have no races with charge/uncharge but will have races with | 2569 | pc = head_pc + i; |
2568 | * page state accounting. | 2570 | pc->mem_cgroup = head_pc->mem_cgroup; |
2569 | */ | 2571 | smp_wmb();/* see __commit_charge() */ |
2570 | move_lock_page_cgroup(head_pc, &flags); | 2572 | /* |
2573 | * LRU flags cannot be copied because we need to add tail | ||
2574 | * page to LRU by generic call and our hooks will be called. | ||
2575 | */ | ||
2576 | pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT; | ||
2577 | } | ||
2571 | 2578 | ||
2572 | tail_pc->mem_cgroup = head_pc->mem_cgroup; | ||
2573 | smp_wmb(); /* see __commit_charge() */ | ||
2574 | if (PageCgroupAcctLRU(head_pc)) { | 2579 | if (PageCgroupAcctLRU(head_pc)) { |
2575 | enum lru_list lru; | 2580 | enum lru_list lru; |
2576 | struct mem_cgroup_per_zone *mz; | 2581 | struct mem_cgroup_per_zone *mz; |
2577 | |||
2578 | /* | 2582 | /* |
2579 | * LRU flags cannot be copied because we need to add tail | ||
2580 | *.page to LRU by generic call and our hook will be called. | ||
2581 | * We hold lru_lock, then, reduce counter directly. | 2583 | * We hold lru_lock, then, reduce counter directly. |
2582 | */ | 2584 | */ |
2583 | lru = page_lru(head); | 2585 | lru = page_lru(head); |
2584 | mz = page_cgroup_zoneinfo(head_pc->mem_cgroup, head); | 2586 | mz = page_cgroup_zoneinfo(head_pc->mem_cgroup, head); |
2585 | MEM_CGROUP_ZSTAT(mz, lru) -= 1; | 2587 | MEM_CGROUP_ZSTAT(mz, lru) -= HPAGE_PMD_NR - 1; |
2586 | } | 2588 | } |
2587 | tail_pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT; | ||
2588 | move_unlock_page_cgroup(head_pc, &flags); | ||
2589 | } | 2589 | } |
2590 | #endif | 2590 | #endif |
2591 | 2591 | ||