aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mm/memcontrol.c168
1 files changed, 92 insertions, 76 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index f1f3f5b41155..f0fea095d16a 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5540,48 +5540,102 @@ void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
5540 cancel_charge(memcg, nr_pages); 5540 cancel_charge(memcg, nr_pages);
5541} 5541}
5542 5542
5543static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout, 5543struct uncharge_gather {
5544 unsigned long nr_anon, unsigned long nr_file, 5544 struct mem_cgroup *memcg;
5545 unsigned long nr_kmem, unsigned long nr_huge, 5545 unsigned long pgpgout;
5546 unsigned long nr_shmem, struct page *dummy_page) 5546 unsigned long nr_anon;
5547 unsigned long nr_file;
5548 unsigned long nr_kmem;
5549 unsigned long nr_huge;
5550 unsigned long nr_shmem;
5551 struct page *dummy_page;
5552};
5553
5554static inline void uncharge_gather_clear(struct uncharge_gather *ug)
5547{ 5555{
5548 unsigned long nr_pages = nr_anon + nr_file + nr_kmem; 5556 memset(ug, 0, sizeof(*ug));
5557}
5558
5559static void uncharge_batch(const struct uncharge_gather *ug)
5560{
5561 unsigned long nr_pages = ug->nr_anon + ug->nr_file + ug->nr_kmem;
5549 unsigned long flags; 5562 unsigned long flags;
5550 5563
5551 if (!mem_cgroup_is_root(memcg)) { 5564 if (!mem_cgroup_is_root(ug->memcg)) {
5552 page_counter_uncharge(&memcg->memory, nr_pages); 5565 page_counter_uncharge(&ug->memcg->memory, nr_pages);
5553 if (do_memsw_account()) 5566 if (do_memsw_account())
5554 page_counter_uncharge(&memcg->memsw, nr_pages); 5567 page_counter_uncharge(&ug->memcg->memsw, nr_pages);
5555 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && nr_kmem) 5568 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && ug->nr_kmem)
5556 page_counter_uncharge(&memcg->kmem, nr_kmem); 5569 page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem);
5557 memcg_oom_recover(memcg); 5570 memcg_oom_recover(ug->memcg);
5558 } 5571 }
5559 5572
5560 local_irq_save(flags); 5573 local_irq_save(flags);
5561 __this_cpu_sub(memcg->stat->count[MEMCG_RSS], nr_anon); 5574 __this_cpu_sub(ug->memcg->stat->count[MEMCG_RSS], ug->nr_anon);
5562 __this_cpu_sub(memcg->stat->count[MEMCG_CACHE], nr_file); 5575 __this_cpu_sub(ug->memcg->stat->count[MEMCG_CACHE], ug->nr_file);
5563 __this_cpu_sub(memcg->stat->count[MEMCG_RSS_HUGE], nr_huge); 5576 __this_cpu_sub(ug->memcg->stat->count[MEMCG_RSS_HUGE], ug->nr_huge);
5564 __this_cpu_sub(memcg->stat->count[NR_SHMEM], nr_shmem); 5577 __this_cpu_sub(ug->memcg->stat->count[NR_SHMEM], ug->nr_shmem);
5565 __this_cpu_add(memcg->stat->events[PGPGOUT], pgpgout); 5578 __this_cpu_add(ug->memcg->stat->events[PGPGOUT], ug->pgpgout);
5566 __this_cpu_add(memcg->stat->nr_page_events, nr_pages); 5579 __this_cpu_add(ug->memcg->stat->nr_page_events, nr_pages);
5567 memcg_check_events(memcg, dummy_page); 5580 memcg_check_events(ug->memcg, ug->dummy_page);
5568 local_irq_restore(flags); 5581 local_irq_restore(flags);
5569 5582
5570 if (!mem_cgroup_is_root(memcg)) 5583 if (!mem_cgroup_is_root(ug->memcg))
5571 css_put_many(&memcg->css, nr_pages); 5584 css_put_many(&ug->memcg->css, nr_pages);
5585}
5586
5587static void uncharge_page(struct page *page, struct uncharge_gather *ug)
5588{
5589 VM_BUG_ON_PAGE(PageLRU(page), page);
5590 VM_BUG_ON_PAGE(!PageHWPoison(page) && page_count(page), page);
5591
5592 if (!page->mem_cgroup)
5593 return;
5594
5595 /*
5596 * Nobody should be changing or seriously looking at
5597 * page->mem_cgroup at this point, we have fully
5598 * exclusive access to the page.
5599 */
5600
5601 if (ug->memcg != page->mem_cgroup) {
5602 if (ug->memcg) {
5603 uncharge_batch(ug);
5604 uncharge_gather_clear(ug);
5605 }
5606 ug->memcg = page->mem_cgroup;
5607 }
5608
5609 if (!PageKmemcg(page)) {
5610 unsigned int nr_pages = 1;
5611
5612 if (PageTransHuge(page)) {
5613 nr_pages <<= compound_order(page);
5614 ug->nr_huge += nr_pages;
5615 }
5616 if (PageAnon(page))
5617 ug->nr_anon += nr_pages;
5618 else {
5619 ug->nr_file += nr_pages;
5620 if (PageSwapBacked(page))
5621 ug->nr_shmem += nr_pages;
5622 }
5623 ug->pgpgout++;
5624 } else {
5625 ug->nr_kmem += 1 << compound_order(page);
5626 __ClearPageKmemcg(page);
5627 }
5628
5629 ug->dummy_page = page;
5630 page->mem_cgroup = NULL;
5572} 5631}
5573 5632
5574static void uncharge_list(struct list_head *page_list) 5633static void uncharge_list(struct list_head *page_list)
5575{ 5634{
5576 struct mem_cgroup *memcg = NULL; 5635 struct uncharge_gather ug;
5577 unsigned long nr_shmem = 0;
5578 unsigned long nr_anon = 0;
5579 unsigned long nr_file = 0;
5580 unsigned long nr_huge = 0;
5581 unsigned long nr_kmem = 0;
5582 unsigned long pgpgout = 0;
5583 struct list_head *next; 5636 struct list_head *next;
5584 struct page *page; 5637
5638 uncharge_gather_clear(&ug);
5585 5639
5586 /* 5640 /*
5587 * Note that the list can be a single page->lru; hence the 5641 * Note that the list can be a single page->lru; hence the
@@ -5589,57 +5643,16 @@ static void uncharge_list(struct list_head *page_list)
5589 */ 5643 */
5590 next = page_list->next; 5644 next = page_list->next;
5591 do { 5645 do {
5646 struct page *page;
5647
5592 page = list_entry(next, struct page, lru); 5648 page = list_entry(next, struct page, lru);
5593 next = page->lru.next; 5649 next = page->lru.next;
5594 5650
5595 VM_BUG_ON_PAGE(PageLRU(page), page); 5651 uncharge_page(page, &ug);
5596 VM_BUG_ON_PAGE(!PageHWPoison(page) && page_count(page), page);
5597
5598 if (!page->mem_cgroup)
5599 continue;
5600
5601 /*
5602 * Nobody should be changing or seriously looking at
5603 * page->mem_cgroup at this point, we have fully
5604 * exclusive access to the page.
5605 */
5606
5607 if (memcg != page->mem_cgroup) {
5608 if (memcg) {
5609 uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
5610 nr_kmem, nr_huge, nr_shmem, page);
5611 pgpgout = nr_anon = nr_file = nr_kmem = 0;
5612 nr_huge = nr_shmem = 0;
5613 }
5614 memcg = page->mem_cgroup;
5615 }
5616
5617 if (!PageKmemcg(page)) {
5618 unsigned int nr_pages = 1;
5619
5620 if (PageTransHuge(page)) {
5621 nr_pages <<= compound_order(page);
5622 nr_huge += nr_pages;
5623 }
5624 if (PageAnon(page))
5625 nr_anon += nr_pages;
5626 else {
5627 nr_file += nr_pages;
5628 if (PageSwapBacked(page))
5629 nr_shmem += nr_pages;
5630 }
5631 pgpgout++;
5632 } else {
5633 nr_kmem += 1 << compound_order(page);
5634 __ClearPageKmemcg(page);
5635 }
5636
5637 page->mem_cgroup = NULL;
5638 } while (next != page_list); 5652 } while (next != page_list);
5639 5653
5640 if (memcg) 5654 if (ug.memcg)
5641 uncharge_batch(memcg, pgpgout, nr_anon, nr_file, 5655 uncharge_batch(&ug);
5642 nr_kmem, nr_huge, nr_shmem, page);
5643} 5656}
5644 5657
5645/** 5658/**
@@ -5651,6 +5664,8 @@ static void uncharge_list(struct list_head *page_list)
5651 */ 5664 */
5652void mem_cgroup_uncharge(struct page *page) 5665void mem_cgroup_uncharge(struct page *page)
5653{ 5666{
5667 struct uncharge_gather ug;
5668
5654 if (mem_cgroup_disabled()) 5669 if (mem_cgroup_disabled())
5655 return; 5670 return;
5656 5671
@@ -5658,8 +5673,9 @@ void mem_cgroup_uncharge(struct page *page)
5658 if (!page->mem_cgroup) 5673 if (!page->mem_cgroup)
5659 return; 5674 return;
5660 5675
5661 INIT_LIST_HEAD(&page->lru); 5676 uncharge_gather_clear(&ug);
5662 uncharge_list(&page->lru); 5677 uncharge_page(page, &ug);
5678 uncharge_batch(&ug);
5663} 5679}
5664 5680
5665/** 5681/**