aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2014-08-08 17:19:33 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-08 18:57:18 -0400
commit6abb5a867ba0866cb21827b172cee6aa71244bd1 (patch)
tree069d4eac9f0bfa9586c978362c9b7c7d0d40b288
parent3cbb01871e22709fdd39478eca831de317df332f (diff)
mm: memcontrol: avoid charge statistics churn during page migration
Charge migration currently disables IRQs twice to update the charge statistics for the old page and then again for the new page. But migration is a seamless transition of a charge from one physical page to another one of the same size, so this should be a non-event from an accounting point of view. Leave the statistics alone. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Michal Hocko <mhocko@suse.cz> Cc: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/memcontrol.c35
1 files changed, 10 insertions, 25 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index a6e2be0241af..ec4dcf1b9562 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2728,7 +2728,7 @@ static void unlock_page_lru(struct page *page, int isolated)
2728} 2728}
2729 2729
2730static void commit_charge(struct page *page, struct mem_cgroup *memcg, 2730static void commit_charge(struct page *page, struct mem_cgroup *memcg,
2731 unsigned int nr_pages, bool lrucare) 2731 bool lrucare)
2732{ 2732{
2733 struct page_cgroup *pc = lookup_page_cgroup(page); 2733 struct page_cgroup *pc = lookup_page_cgroup(page);
2734 int isolated; 2734 int isolated;
@@ -2765,16 +2765,6 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg,
2765 2765
2766 if (lrucare) 2766 if (lrucare)
2767 unlock_page_lru(page, isolated); 2767 unlock_page_lru(page, isolated);
2768
2769 local_irq_disable();
2770 mem_cgroup_charge_statistics(memcg, page, nr_pages);
2771 /*
2772 * "charge_statistics" updated event counter. Then, check it.
2773 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
2774 * if they exceeds softlimit.
2775 */
2776 memcg_check_events(memcg, page);
2777 local_irq_enable();
2778} 2768}
2779 2769
2780static DEFINE_MUTEX(set_limit_mutex); 2770static DEFINE_MUTEX(set_limit_mutex);
@@ -6460,12 +6450,17 @@ void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
6460 if (!memcg) 6450 if (!memcg)
6461 return; 6451 return;
6462 6452
6453 commit_charge(page, memcg, lrucare);
6454
6463 if (PageTransHuge(page)) { 6455 if (PageTransHuge(page)) {
6464 nr_pages <<= compound_order(page); 6456 nr_pages <<= compound_order(page);
6465 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 6457 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
6466 } 6458 }
6467 6459
6468 commit_charge(page, memcg, nr_pages, lrucare); 6460 local_irq_disable();
6461 mem_cgroup_charge_statistics(memcg, page, nr_pages);
6462 memcg_check_events(memcg, page);
6463 local_irq_enable();
6469 6464
6470 if (do_swap_account && PageSwapCache(page)) { 6465 if (do_swap_account && PageSwapCache(page)) {
6471 swp_entry_t entry = { .val = page_private(page) }; 6466 swp_entry_t entry = { .val = page_private(page) };
@@ -6651,7 +6646,6 @@ void mem_cgroup_uncharge_list(struct list_head *page_list)
6651void mem_cgroup_migrate(struct page *oldpage, struct page *newpage, 6646void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
6652 bool lrucare) 6647 bool lrucare)
6653{ 6648{
6654 unsigned int nr_pages = 1;
6655 struct page_cgroup *pc; 6649 struct page_cgroup *pc;
6656 int isolated; 6650 int isolated;
6657 6651
@@ -6660,6 +6654,8 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
6660 VM_BUG_ON_PAGE(!lrucare && PageLRU(oldpage), oldpage); 6654 VM_BUG_ON_PAGE(!lrucare && PageLRU(oldpage), oldpage);
6661 VM_BUG_ON_PAGE(!lrucare && PageLRU(newpage), newpage); 6655 VM_BUG_ON_PAGE(!lrucare && PageLRU(newpage), newpage);
6662 VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage); 6656 VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
6657 VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
6658 newpage);
6663 6659
6664 if (mem_cgroup_disabled()) 6660 if (mem_cgroup_disabled())
6665 return; 6661 return;
@@ -6677,12 +6673,6 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
6677 VM_BUG_ON_PAGE(!(pc->flags & PCG_MEM), oldpage); 6673 VM_BUG_ON_PAGE(!(pc->flags & PCG_MEM), oldpage);
6678 VM_BUG_ON_PAGE(do_swap_account && !(pc->flags & PCG_MEMSW), oldpage); 6674 VM_BUG_ON_PAGE(do_swap_account && !(pc->flags & PCG_MEMSW), oldpage);
6679 6675
6680 if (PageTransHuge(oldpage)) {
6681 nr_pages <<= compound_order(oldpage);
6682 VM_BUG_ON_PAGE(!PageTransHuge(oldpage), oldpage);
6683 VM_BUG_ON_PAGE(!PageTransHuge(newpage), newpage);
6684 }
6685
6686 if (lrucare) 6676 if (lrucare)
6687 lock_page_lru(oldpage, &isolated); 6677 lock_page_lru(oldpage, &isolated);
6688 6678
@@ -6691,12 +6681,7 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
6691 if (lrucare) 6681 if (lrucare)
6692 unlock_page_lru(oldpage, isolated); 6682 unlock_page_lru(oldpage, isolated);
6693 6683
6694 local_irq_disable(); 6684 commit_charge(newpage, pc->mem_cgroup, lrucare);
6695 mem_cgroup_charge_statistics(pc->mem_cgroup, oldpage, -nr_pages);
6696 memcg_check_events(pc->mem_cgroup, oldpage);
6697 local_irq_enable();
6698
6699 commit_charge(newpage, pc->mem_cgroup, nr_pages, lrucare);
6700} 6685}
6701 6686
6702/* 6687/*