aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
authorChen Gang <762976180@qq.com>2015-04-14 18:47:35 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-04-14 19:49:04 -0400
commitb1b0deabbffa922fed808d4a5d99d03372a4c701 (patch)
tree2e4a8fe4560046131a570d1076fcb54f659405d0 /mm/memcontrol.c
parent6b6378355b925050eb6fa966742d8c2d65ff0d83 (diff)
mm: memcontrol: let mem_cgroup_move_account() have effect only if MMU enabled
When !MMU, it will report warning. The related warning with allmodconfig under c6x: CC mm/memcontrol.o mm/memcontrol.c:2802:12: warning: 'mem_cgroup_move_account' defined but not used [-Wunused-function] static int mem_cgroup_move_account(struct page *page, ^ Signed-off-by: Chen Gang <gang.chen.5i5j@gmail.com> Acked-by: Michal Hocko <mhocko@suse.cz> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c172
1 files changed, 86 insertions, 86 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 68d4890fc4bd..f227786e73db 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2785,92 +2785,6 @@ void mem_cgroup_split_huge_fixup(struct page *head)
2785} 2785}
2786#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 2786#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2787 2787
2788/**
2789 * mem_cgroup_move_account - move account of the page
2790 * @page: the page
2791 * @nr_pages: number of regular pages (>1 for huge pages)
2792 * @from: mem_cgroup which the page is moved from.
2793 * @to: mem_cgroup which the page is moved to. @from != @to.
2794 *
2795 * The caller must confirm following.
2796 * - page is not on LRU (isolate_page() is useful.)
2797 * - compound_lock is held when nr_pages > 1
2798 *
2799 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
2800 * from old cgroup.
2801 */
2802static int mem_cgroup_move_account(struct page *page,
2803 unsigned int nr_pages,
2804 struct mem_cgroup *from,
2805 struct mem_cgroup *to)
2806{
2807 unsigned long flags;
2808 int ret;
2809
2810 VM_BUG_ON(from == to);
2811 VM_BUG_ON_PAGE(PageLRU(page), page);
2812 /*
2813 * The page is isolated from LRU. So, collapse function
2814 * will not handle this page. But page splitting can happen.
2815 * Do this check under compound_page_lock(). The caller should
2816 * hold it.
2817 */
2818 ret = -EBUSY;
2819 if (nr_pages > 1 && !PageTransHuge(page))
2820 goto out;
2821
2822 /*
2823 * Prevent mem_cgroup_migrate() from looking at page->mem_cgroup
2824 * of its source page while we change it: page migration takes
2825 * both pages off the LRU, but page cache replacement doesn't.
2826 */
2827 if (!trylock_page(page))
2828 goto out;
2829
2830 ret = -EINVAL;
2831 if (page->mem_cgroup != from)
2832 goto out_unlock;
2833
2834 spin_lock_irqsave(&from->move_lock, flags);
2835
2836 if (!PageAnon(page) && page_mapped(page)) {
2837 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
2838 nr_pages);
2839 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
2840 nr_pages);
2841 }
2842
2843 if (PageWriteback(page)) {
2844 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_WRITEBACK],
2845 nr_pages);
2846 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_WRITEBACK],
2847 nr_pages);
2848 }
2849
2850 /*
2851 * It is safe to change page->mem_cgroup here because the page
2852 * is referenced, charged, and isolated - we can't race with
2853 * uncharging, charging, migration, or LRU putback.
2854 */
2855
2856 /* caller should have done css_get */
2857 page->mem_cgroup = to;
2858 spin_unlock_irqrestore(&from->move_lock, flags);
2859
2860 ret = 0;
2861
2862 local_irq_disable();
2863 mem_cgroup_charge_statistics(to, page, nr_pages);
2864 memcg_check_events(to, page);
2865 mem_cgroup_charge_statistics(from, page, -nr_pages);
2866 memcg_check_events(from, page);
2867 local_irq_enable();
2868out_unlock:
2869 unlock_page(page);
2870out:
2871 return ret;
2872}
2873
2874#ifdef CONFIG_MEMCG_SWAP 2788#ifdef CONFIG_MEMCG_SWAP
2875static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg, 2789static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
2876 bool charge) 2790 bool charge)
@@ -4822,6 +4736,92 @@ static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
4822 return page; 4736 return page;
4823} 4737}
4824 4738
4739/**
4740 * mem_cgroup_move_account - move account of the page
4741 * @page: the page
4742 * @nr_pages: number of regular pages (>1 for huge pages)
4743 * @from: mem_cgroup which the page is moved from.
4744 * @to: mem_cgroup which the page is moved to. @from != @to.
4745 *
4746 * The caller must confirm following.
4747 * - page is not on LRU (isolate_page() is useful.)
4748 * - compound_lock is held when nr_pages > 1
4749 *
4750 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
4751 * from old cgroup.
4752 */
4753static int mem_cgroup_move_account(struct page *page,
4754 unsigned int nr_pages,
4755 struct mem_cgroup *from,
4756 struct mem_cgroup *to)
4757{
4758 unsigned long flags;
4759 int ret;
4760
4761 VM_BUG_ON(from == to);
4762 VM_BUG_ON_PAGE(PageLRU(page), page);
4763 /*
4764 * The page is isolated from LRU. So, collapse function
4765 * will not handle this page. But page splitting can happen.
4766 * Do this check under compound_page_lock(). The caller should
4767 * hold it.
4768 */
4769 ret = -EBUSY;
4770 if (nr_pages > 1 && !PageTransHuge(page))
4771 goto out;
4772
4773 /*
4774 * Prevent mem_cgroup_migrate() from looking at page->mem_cgroup
4775 * of its source page while we change it: page migration takes
4776 * both pages off the LRU, but page cache replacement doesn't.
4777 */
4778 if (!trylock_page(page))
4779 goto out;
4780
4781 ret = -EINVAL;
4782 if (page->mem_cgroup != from)
4783 goto out_unlock;
4784
4785 spin_lock_irqsave(&from->move_lock, flags);
4786
4787 if (!PageAnon(page) && page_mapped(page)) {
4788 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
4789 nr_pages);
4790 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
4791 nr_pages);
4792 }
4793
4794 if (PageWriteback(page)) {
4795 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_WRITEBACK],
4796 nr_pages);
4797 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_WRITEBACK],
4798 nr_pages);
4799 }
4800
4801 /*
4802 * It is safe to change page->mem_cgroup here because the page
4803 * is referenced, charged, and isolated - we can't race with
4804 * uncharging, charging, migration, or LRU putback.
4805 */
4806
4807 /* caller should have done css_get */
4808 page->mem_cgroup = to;
4809 spin_unlock_irqrestore(&from->move_lock, flags);
4810
4811 ret = 0;
4812
4813 local_irq_disable();
4814 mem_cgroup_charge_statistics(to, page, nr_pages);
4815 memcg_check_events(to, page);
4816 mem_cgroup_charge_statistics(from, page, -nr_pages);
4817 memcg_check_events(from, page);
4818 local_irq_enable();
4819out_unlock:
4820 unlock_page(page);
4821out:
4822 return ret;
4823}
4824
4825static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma, 4825static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
4826 unsigned long addr, pte_t ptent, union mc_target *target) 4826 unsigned long addr, pte_t ptent, union mc_target *target)
4827{ 4827{